blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1dc90019573b41fd04ccda4e3a6b90bc90a27b7a | 48b6546e0cf0aeba23f802c005dbcb863f8ceecb | /searching-algorithms/linear_search.py | 1da3586fe8a61e11198ed6a4335d78e697ab93b3 | [] | no_license | panu2306/Data-Structure-Programs | 42bc1b592fc070eed9c16a192d27103593723061 | a4cb5fb496b672492e19468868a2da266d7d76aa | refs/heads/master | 2022-11-14T23:04:20.609617 | 2022-11-14T18:01:21 | 2022-11-14T18:01:21 | 148,877,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | def linearSearch(a, searchElement):
arraySize = len(a)
for i in range(0, arraySize):
if(a[i] == searchElement):
return 1
return -1
a = [1, 4, 2, 5, 3]
searchElement = 5
result = linearSearch(a, searchElement)
print("Element is not present in array") if(result == -1) else print("Element is present in array")
| [
"[email protected]"
] | |
4206b96806030b27e6a032f97cb22dfdb1822a45 | d42f7d4f2377f67797d41b2f75347c5330d34953 | /fabfile/france.local.py | 319441ef820b3e39735a14277588f4449a76011b | [
"WTFPL"
] | permissive | phreephree/addok | 8d92893b791416e0169e6c74f5842868833478e9 | 320d145e72964d54eb33742f0329e9f46f5c5ab5 | refs/heads/master | 2021-08-24T06:51:04.971611 | 2017-12-08T13:44:40 | 2017-12-08T13:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | QUERY_PROCESSORS_PYPATHS = [
'addok.helpers.text.check_query_length',
"addok_france.extract_address",
"addok_france.clean_query",
"addok_france.remove_leading_zeros",
]
SEARCH_RESULT_PROCESSORS_PYPATHS = [
"addok.helpers.results.match_housenumber",
"addok_france.make_labels",
"addok.helpers.results.score_by_importance",
"addok.helpers.results.score_by_autocomplete_distance",
"addok.helpers.results.score_by_ngram_distance",
"addok.helpers.results.score_by_geo_distance",
]
PROCESSORS_PYPATHS = [
"addok.helpers.text.tokenize",
"addok.helpers.text.normalize",
"addok_france.glue_ordinal",
"addok_france.fold_ordinal",
"addok_france.flag_housenumber",
"addok.helpers.text.synonymize",
"addok_fr.phonemicize",
]
SQLITE_DB_PATH = '/srv/addok/addok.db'
| [
"[email protected]"
] | |
d2619d839aa6f5611e6030cf62cb0f38db8c7b50 | c3cff86728d436e4e7b522b1382d96f8e32611ff | /minidjango/utils/types.py | 6df56e1950ad1a53a82390965a13bada3545b9c2 | [] | no_license | pahaz/lesson2 | 6189ce3d3c06c040b27d283ae0754eed6c496b43 | aea2e09e98e5562476a5d15447e15e127f900d43 | refs/heads/master | 2023-08-26T20:27:08.697314 | 2016-04-15T12:26:09 | 2016-04-15T12:26:09 | 55,223,164 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,387 | py | import collections
from io import BytesIO
import io
__author__ = 'pahaz'
class MultiValueDict(collections.UserDict):
"""
>>> d = MultiValueDict()
>>> d['foo'] = ['bar']
>>> d['foo']
'bar'
>>> d = MultiValueDict({'foo': ['v1', 'v2']})
>>> d['foo']
'v1'
>>> d.getlist('foo')
['v1', 'v2']
>>> list(d.items())
[('foo', 'v1')]
>>> dict(MultiValueDict({'foo': ['v1', 'v2']}))
{'foo': 'v1'}
>>> dict(MultiValueDict({'foo': ['v1']}))
{'foo': 'v1'}
"""
def __iter__(self):
a = super().__iter__()
for x in a:
yield x
def __getitem__(self, key):
val = super().__getitem__(key)
if isinstance(val, (list, tuple)):
val = val[0]
else:
raise RuntimeError('Invalid MultiValueDict inner state')
return val
def __setitem__(self, key, item):
if not isinstance(item, (list, tuple)):
raise TypeError("Can't set not a multi value")
if not item:
raise ValueError("Can't set empty multi value")
self.data[key] = item
def getlist(self, key, default=None):
val = self.data.get(key, default)
if not isinstance(val, (list, tuple)):
raise RuntimeError('Invalid MultiValueDict inner state')
return val
class LimitedStream(io.IOBase):
"""
LimitedStream wraps another stream in order to not allow
reading from it past specified amount of bytes.
>>> import io
>>> bio = io.BytesIO(b"some -- long -- byte string")
>>> lbio = LimitedStream(bio, 4)
>>> lbio.read()
b'some'
>>> lbio.read()
b''
>>> bio = io.BytesIO(b"s\\nome -- long -- byte string")
>>> lbio = LimitedStream(bio, 4)
>>> lbio.readline()
b's\\n'
>>> lbio.read()
b'om'
>>> lbio.read()
b''
"""
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
| [
"[email protected]"
] | |
5b5cfdbc293e4fea4032ad37a2ddd1f57d91ab27 | 4999d470db3128d6b2d904babf1446d62a9a6cc2 | /flask_project/app.py | ae20cc5f3b6b108c0d979f90fd5d9a9704534c78 | [] | no_license | love-adela/jungle_admission | 2ade66d7a828965f250f5eac9b971a9a84eddb88 | bf3c3f52c61c6dded256245e28aaf30ab2ec5ffa | refs/heads/main | 2023-01-01T17:16:16.756328 | 2020-10-29T09:27:55 | 2020-10-29T09:27:55 | 306,814,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/test', methods=['GET'])
def test_get():
received_title = request.args.get('given_title')
print(received_title)
return jsonify({'result': 'success', 'msg': '이 요청은 GET!'})
@app.route('/test', methods=['POST'])
def test_post():
received_title = request.form('given_title')
print(received_title)
return jsonify({'result':'success', 'msg': '이 요청은 POST!'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True) | [
"[email protected]"
] | |
7ff6e8fd5a803d6d9e9157afe6eacd17efe5e4a0 | 0c3f4769e91bf7bea8f9ce74a6dd11e092638bc4 | /tests/test_ingest_title.py | 613caef04600cc33de74fa6ad3a98fe50d745030 | [
"MIT"
] | permissive | ourresearch/journalsdb | 392ea36282b17154289f1845628cc4706d3c59e9 | 718b118b8e97da9a07f89c2cd2bae207a9217b66 | refs/heads/main | 2022-10-29T10:05:59.410041 | 2022-10-26T18:31:21 | 2022-10-26T18:31:21 | 331,048,946 | 9 | 0 | null | 2021-04-03T16:34:12 | 2021-01-19T16:56:22 | Python | UTF-8 | Python | false | false | 728 | py | from ingest.journals.journals_new_journal import NewJournal
from models.journal import ISSNMetaData
def test_clean_title_print():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_print = nj.clean_title("Cooking Today (Print)")
assert title_with_print == "Cooking Today"
def test_clean_title_electronic():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_electronic = nj.clean_title("Cooking Today (electronic)")
assert title_with_electronic == "Cooking Today"
def test_clean_title_trailing_period():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_period = nj.clean_title("Cooking today. ")
assert title_with_period == "Cooking today"
| [
"[email protected]"
] | |
ae4a4cd38051c792e2d024de49626d30f9f91601 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/common_shamil_v3/hr_custom/report/promotion_report.py | 28508ee80d1f425d9b4ed82fa841a66a554347f0 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | import time
import pooler
#import rml_parse
import copy
from report import report_sxw
import pdb
import re
class promotion_report(report_sxw.rml_parse):
_name = 'report.promotion.report'
def __init__(self, cr, uid, name, context):
super(promotion_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'_get_emp':self._get_emp,
'line5':self.get_promotion_total,
})
self.context = context
def get_promotion_total(self,data,choice,date1_v,date2_v):
process_archive=self.pool.get('hr.process.archive')
# res = process_archive.self(self,choice,'promotion_date','promotion_date',date1_v,date2_v)
res = process_archive._archive_count(self,choice,'promotion_date','promotion_date',date1_v,date2_v)
return res
def _get_emp(self,data):
prom_obj=self.pool.get('hr.process.archive')
date1 = data['form']['fromm']
date2 = data['form']['to']
ids_list=prom_obj.search(self.cr,self.uid, [('approve_date', '>=', date1),('approve_date', '<=', date2)],context=self.context)
#for l in ids_list:
# degree_id=prom_obj.browse(self.cr,self.uid,l,context=self.context).reference.id
self.cr.execute('''
SELECT ROW_NUMBER ( )
OVER (order by p.id) as no,e.emp_code as code,r.name as emp,p.approve_date as date,
d.name AS degree FROM hr_process_archive AS p
left join hr_employee AS e on (p.employee_id=e.id)
left join resource_resource AS r on (e.resource_id=r.id)
left join hr_salary_degree AS d on (e.degree_id=d.id)
where
e.employment_date < p.approve_date and
p.approve_date between %s and %s
''',(date1,date2))
res = self.cr.dictfetchall()
return res
report_sxw.report_sxw('report.promotion.report', 'hr.process.archive','addons/hr_custom/report/promotion_report.rml', parser=promotion_report, header=True)
| [
"[email protected]"
] | |
dd31b52d075a85e5b53ed2def7f9ba0e2cdb578c | 6813299c28aab1e49f724c0b62c147b201cfaad4 | /keystone/manage/__init__.py | 899bf4a64f2e122f26e7b33a0a2b07789f3e92d7 | [] | no_license | emonty/test-deb-keystone | dbe460bc29f667d625c9b13d2006ebdfc2a96cdb | 1d374a52fc2c91c604abfc448d12324922a585a6 | refs/heads/master | 2021-01-22T06:54:26.712546 | 2011-08-19T20:59:54 | 2011-08-19T20:59:54 | 2,236,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,989 | py | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Keystone Identity Server - CLI Management Interface
"""
import sys
import logging
import optparse
import keystone
from keystone.common import config
from keystone.manage.api import *
class RaisingOptionParser(optparse.OptionParser):
def error(self, msg):
self.print_usage(sys.stderr)
raise optparse.OptParseError(msg)
def parse_args(args=None):
usage = "usage: %prog [options] type command [id [attributes]]"
# Initialize a parser for our configuration paramaters
parser = RaisingOptionParser(usage, version='%%prog %s'
% keystone.version())
_common_group = config.add_common_options(parser)
config.add_log_options(parser)
# Parse command-line and load config
(options, args) = config.parse_options(parser, args)
_config_file, conf = config.load_paste_config('admin', options, args)
config.setup_logging(options, conf)
db.configure_backends(conf.global_conf)
return args
def process(*args):
"""
Usage: keystone-manage [options] type command [id [attributes]]
type : role, tenant, user, token, endpoint, endpointTemplates
command : add, list, disable, delete, grant, revoke
id : name or id
attributes : depending on type...
users : password, tenant
tokens : user, tenant, expiration
role list [tenant] will list roles granted on that tenant
options
-c | --config-file : config file to use
-d | --debug : debug mode
Example: keystone-manage user add Admin P@ssw0rd
"""
# Check arguments
if len(args) == 0:
raise optparse.OptParseError(
'No obj type specified for first argument')
object_type = args[0]
if object_type not in ['user', 'tenant', 'role', 'service',
'endpointTemplates', 'token', 'endpoint', 'credentials']:
raise optparse.OptParseError(
'%s is not a supported obj type' % object_type)
if len(args) == 1:
raise optparse.OptParseError(
'No command specified for second argument')
command = args[1]
if command not in ['add', 'list', 'disable', 'delete', 'grant', 'revoke']:
raise optparse.OptParseError('add, disable, delete, and list are the '
'only supported commands (right now)')
if len(args) == 2:
if command != 'list':
raise optparse.OptParseError('No id specified for third argument')
if len(args) > 2:
object_id = args[2]
# Helper functions
def require_args(args, min, msg):
"""Ensure there are at least `min` arguments"""
if len(args) < min:
raise optparse.OptParseError(msg)
optional_arg = (lambda x: len(args) > x and args[x] or None)
def print_table(header_row, rows):
"""Prints a lists of lists as table in a human readable format"""
print "\t".join(header_row)
print '-' * 79
rows = [[str(col) for col in row] for row in rows]
print "\n".join(["\t".join(row) for row in rows])
# Execute command
if (object_type, command) == ('user', 'add'):
require_args(args, 4, 'No password specified for fourth argument')
if add_user(id=object_id, password=args[3], tenant=optional_arg(4)):
print "SUCCESS: User %s created." % object_id
elif (object_type, command) == ('user', 'disable'):
if disable_user(id=object_id):
print "SUCCESS: User %s disabled." % object_id
elif (object_type, command) == ('user', 'list'):
print_table(('id', 'enabled', 'tenant'), list_users())
elif (object_type, command) == ('tenant', 'add'):
if add_tenant(id=object_id):
print "SUCCESS: Tenant %s created." % object_id
elif (object_type, command) == ('tenant', 'list'):
print_table(('tenant', 'enabled'), list_tenants())
elif (object_type, command) == ('tenant', 'disable'):
if disable_tenant(id=object_id):
print "SUCCESS: Tenant %s disabled." % object_id
elif (object_type, command) == ('role', 'add'):
if add_role(id=object_id):
print "SUCCESS: Role %s created successfully." % object_id
elif (object_type, command) == ('role', 'list'):
tenant = optional_arg(2)
if tenant:
# print with users
print 'Role assignments for tenant %s' % tenant
print_table(('User', 'Role'), list_roles(tenant=tenant))
else:
# print without tenants
print_table(('id'), list_roles())
elif (object_type, command) == ('role', 'grant'):
require_args(args, 4, "Missing arguments: role grant 'role' 'user' "
"'tenant (optional)'")
tenant = len(args) > 4 and args[4] or None
if grant_role(object_id, args[3], tenant):
print("SUCCESS: Granted %s the %s role on %s." %
(object_id, args[3], tenant))
elif (object_type, command) == ('endpointTemplates', 'add'):
require_args(args, 9, "Missing arguments: endpointTemplates add "
"'region' 'service' 'publicURL' 'adminURL' 'internalURL' "
"'enabled' 'global'")
if add_endpoint_template(region=args[2], service=args[3],
public_url=args[4], admin_url=args[5], internal_url=args[6],
enabled=args[7], is_global=args[8]):
print("SUCCESS: Created EndpointTemplates for %s pointing to %s." %
(args[3], args[4]))
elif (object_type, command) == ('endpointTemplates', 'list'):
tenant = optional_arg(2)
if tenant:
print 'Endpoints for tenant %s' % tenant
print_table(('service', 'region', 'Public URL'),
list_tenant_endpoints())
else:
print 'All EndpointTemplates'
print_table(('service', 'region', 'Public URL'),
list_endpoint_templates())
elif (object_type, command) == ('endpoint', 'add'):
require_args(args, 4, "Missing arguments: endPoint add tenant "
"endPointTemplate")
if add_endpoint(tenant=args[2], endpoint_template=args[3]):
print("SUCCESS: Endpoint %s added to tenant %s." %
(args[3], args[2]))
elif (object_type, command) == ('token', 'add'):
require_args(args, 6, 'Creating a token requires a token id, user, '
'tenant, and expiration')
if add_token(token=object_id, user=args[3], tenant=args[4],
expires=args[5]):
print "SUCCESS: Token %s created." % (object_id,)
elif (object_type, command) == ('token', 'list'):
print_table(('token', 'user', 'expiration', 'tenant'), list_tokens())
elif (object_type, command) == ('token', 'delete'):
if delete_token(token=object_id):
print 'SUCCESS: Token %s deleted.' % (object_id,)
elif (object_type, command) == ('service', 'add'):
if add_service(service=object_id):
print "SUCCESS: Service %s created successfully." % (object_id,)
elif (object_type, command) == ('service', 'list'):
print_table(('service'), list_services())
elif (object_type, command) == ('credentials', 'add'):
require_args(args, 6, 'Creating a credentials requires a type, key, '
'secret, and tenant_id (id is user_id)')
if add_credentials(user=object_id, type=args[3], key=args[4],
secrete=args[5], tenant=optional_arg(6)):
print "SUCCESS: Credentials %s created." % result.id
else:
# Command not handled
print ("ERROR: unrecognized command %s %s" % (object_type, command))
def main():
try:
process(*parse_args())
except optparse.OptParseError as exc:
print >> sys.stderr, exc
sys.exit(2)
except Exception as exc:
try:
info = exc.args[1]
except IndexError:
print "ERROR: %s" % (exc,)
logging.error(str(exc))
else:
print "ERROR: %s: %s" % (exc.args[0], info)
logging.error(exc.args[0], exc_info=info)
sys.exit(1)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ce3e467bee2432e67dcc978a34ac48c49a0424b6 | 466912406272829982f75854cf0104c6ce8c9814 | /data/nlp/fund/gongshang.py | 818b29f17f8deb811cc1922bc2142c0840229a65 | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | # -*- coding: utf-8 -*-
__author__ = 'victor'
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '..'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import db as dbcon
import loghelper
from common import dbutil
from datetime import datetime, timedelta
from bson.objectid import ObjectId
loghelper.init_logger("gsf", stream=True)
logger_gsf = loghelper.get_logger('gsf')
class GongshangFundEvent(object):
def __init__(self, check_period=1):
self.db = dbcon.connect_torndb()
self.mongo = dbcon.connect_mongo()
self.check_period = check_period
def generate_gs_fund_event(self):
global logger_gsf
yesterday = datetime.now() - timedelta(days=self.check_period)
logger_gsf.info('Gongshang Fund starts')
for tpm in dbutil.get_topic_messages(self.db, 44, yesterday):
logger_gsf.info('Processing %s' % tpm.id)
change_date = tpm.get('comments')
# update funding
cids = self.mongo.article.news.find_one({'_id': ObjectId(tpm.relateId)}).get('companyIds', [])
for cid in cids:
cprtid = dbutil.get_company_corporate_id(self.db, cid)
dbutil.update_gongshang_funding(self.db, cid, cprtid, change_date)
# generate task news
self.mongo.task.news.update({'news_id': tpm.relateId},
{'news_id': tpm.relateId, 'news_date': datetime.now(), 'type': 'fund',
'createTime': datetime.utcnow(), 'processStatus': int(0),
'source': 'gongshang', 'companyIds': cids}, True)
if __name__ == '__main__':
gsfe = GongshangFundEvent()
gsfe.generate_gs_fund_event()
| [
"[email protected]"
] | |
ed8d5a9e266a052caacfc08d036eb4aa9618228c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02886/s131934757.py | 9c2208d916bb0fd3e3065d0aabe691293dfc81f2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | #!/usr/bin/env python3
N = int(input())
D = [int(s) for s in input().split()]
life = 0
for i in range(N):
for j in range(i+1, N):
life += D[i] * D[j]
print(life)
| [
"[email protected]"
] | |
36a6f635c959bf4b4b5b30b9756599e5d0831ffd | 108db0d378354947b94b0d649fdb2779c8b7957f | /jwtauth/jwtauth/settings.py | 51713e79093067e6196f4aa3a8c2f50e6eee1390 | [] | no_license | P-iyushRaj/DRF-WORK | 25f2676f62694ea5619397a2e10aca0947dbe902 | 21ca80a6027f110e7213fe7ee3e783bcfe357089 | refs/heads/master | 2023-03-27T13:25:03.928779 | 2021-03-27T04:14:20 | 2021-03-27T04:14:20 | 346,010,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | """
Django settings for jwtauth project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z5ubsq2gb+^a4@^!tucy@(bv7u#1z6ksja(wq724&i5__6+3im'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'knox',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jwtauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jwtauth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'knox.auth.TokenAuthentication',
]
} | [
"[email protected]"
] | |
00d156d0f2a3e03443c4a9aa53137d649e2d9735 | 18a846d1d598d193976437fbefdf144a13e0404b | /mezzanine/utils/models.py | ff1d2ea8f18a782a0a0e607e836138d0e185fa25 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kowalej/mezzanine | 8282195c99717625856510474f4f9583a36c7cf6 | 96915c33325fd74277a630c27069e4c92482e951 | refs/heads/master | 2021-01-17T22:25:25.495684 | 2012-04-16T11:59:21 | 2012-04-16T11:59:21 | 3,392,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py |
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is ued for injecting model
fields and methods into models defined outside of a project.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(object):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
__metaclass__ = ModelMixinBase
| [
"[email protected]"
] | |
b1cbfa625f10b9e309d35cfdf8103961d6a183cb | 2dd4b89f60bd22d96ca6043666816069ba060875 | /TPplots/circos_convert_contigs_coords.py | 4458d301054967ca4be4740ced7df70f80b3440b | [] | no_license | liaochenlanruo/TPplots | 4d65b970f3a105c48a1a66aeb176e299f4bb3cea | 4d0ed24f9b5b7fcd80942abb5f22167d1aba38c6 | refs/heads/master | 2023-07-19T13:22:25.278304 | 2021-09-07T14:13:01 | 2021-09-07T14:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,444 | py | #!/usr/bin/env python
# python 2.7.5 requires biopython
########### promer2circos ############
def get_contig(location, contig_coordlist, contig_pos):
# print 'location', location
# print contig_coordlist
for i, one_contig in enumerate(contig_coordlist):
if location >= one_contig[1] and location <= one_contig[2]:
# print 'match!'
return one_contig, contig_coordlist[i + 1:len(contig_coordlist)], location
elif location > contig_coordlist[i - 1][2] and location <= one_contig[1]:
# print 'between contigs!'
if contig_pos == 'start':
# print "start located between contigs,------", location, '-->new start:', one_contig[1]
# print one_contig
# print contig_coordlist[i+1:len(contig_coordlist)]
# return contig start as location
return one_contig, contig_coordlist[i + 1:len(contig_coordlist)], one_contig[1]
else:
# print "end located between contigs-------", location, '-->new end:', contig_coordlist[i-1][2]
# end of contig located between contigs, return previous contig data
# print 'contig', contig_coordlist[i-1]
# print 'following contigs', contig_coordlist[i:len(contig_coordlist)]
return contig_coordlist[i - 1], contig_coordlist[i:len(contig_coordlist)], contig_coordlist[i - 1][2]
else:
# print "partial match, probably overlap between end contig and gap region", one_contig
continue
return False, False, False
def rename_karyotype(contig_coords, data_list):
'''
:param contig_coords: chr - NZ_JSAM00000000_1 NZ_JSAM00000000_1 0 104228 spectral-5-div-4 ==> keep 3, 4 et 5
:param data_list: list of contig coords: [[contig_X, start, end],[...]]
:return:
'''
import copy
renamed_data = []
for i, data in enumerate(data_list):
start = int(data[1])
end = int(data[2])
# print i, data
contig_start, following_contigs1, position1 = get_contig(start, contig_coords, contig_pos="start")
contig_end, following_contigs2, position2 = get_contig(end, contig_coords, contig_pos="end")
if (contig_start is False) or (contig_end is False):
# print 'one falese!'
# print start, end, contig_start, contig_end
if start > contig_coords[-1][1]:
# print 'last contig'
contig_start, following_contigs1, position1 = contig_coords[-1], [], start
contig_end, following_contigs2, position2 = contig_coords[-1], [], contig_coords[-1][2]
if contig_end is False:
continue
# print 'contig_start', contig_start
# print 'contig_end', contig_end
data[1] = position1
data[2] = position2
# if position2-position1>1000:
# print 'current range:', position1, position2
if contig_start[0] == contig_end[0]:
data[0] = contig_start[0]
renamed_data.append(data)
else:
# print 'new start end', position1, position2
# print 'contig start', contig_start
# print 'contig end', contig_end
# print 'spanning several contigs!'
# span across 2 contigs: make 2 coordinates (until the end of the first contig and from the begining of the second)
data_1 = copy.copy(data)
data_1[0] = contig_start[0]
data_1[2] = contig_start[2]
renamed_data.append(data_1)
# enumerate following contigs until we match the final one
for contig2 in following_contigs1:
# final contig of the range, add it and break the inner loop
if contig2[0] == contig_end[0]:
data_2 = copy.copy(data)
data_2[0] = contig_end[0]
# start from the first position of the second contig
data_2[1] = contig_end[1]
renamed_data.append(data_2)
break
else:
#print contig_end
#print 'entire contig within the range! %s bp long' % (int(contig2[2]) - int(contig2[1])), contig2
# entire contig comprised within the range
# add it entiely to the new list
renamed_data.append(contig2)
'''
for one_contig in contig_coords:
# within contig
if start >= one_contig[1] and end <=one_contig[2]:
data[0] = one_contig[0]
renamed_data.append(data)
# overlap between two contigs
elif start >= one_contig[1] and start <=one_contig[2] and end >one_contig[2]:
data_1 = data
data_2 = data
'''
return renamed_data
def read_circos_file(circos_file):
data_list = []
with open(circos_file) as f:
for row in f:
data = row.rstrip().split(' ')
if len(data) < 3:
data = row.rstrip().split('\t')
data_list.append(data)
return data_list
if __name__ == '__main__':
###Argument handling.
import argparse
arg_parser = argparse.ArgumentParser(description='');
# arg_parser.add_argument("coords_input", help="Directory to show-coords tab-delimited input file.");
arg_parser.add_argument("-i", "--reference_karyotype", help="ref karyotype")
arg_parser.add_argument("-t", "--target_karyotype", help="target karyotype")
arg_parser.add_argument("-o", "--out_name", help="output name")
args = arg_parser.parse_args()
if not args.out_name:
out_name = args.target_karyotype.split('.')[0] + '_renamed.' + args.target_karyotype.split('.')[1]
with open(args.reference_karyotype, 'r') as f:
contig_coords = []
for row in f:
data = row.rstrip().split(' ')
if len(data) < 3:
data = row.rstrip().split('\t')
contig_coords.append([data[3], int(data[4]), int(data[5])])
data_list = read_circos_file(args.target_karyotype)
renamed_data = rename_karyotype(contig_coords, data_list)
with open(out_name, 'w') as new_circos_file:
for row in renamed_data:
row = [str(i) for i in row]
new_circos_file.write('\t'.join(row) + '\n')
| [
"[email protected]"
] | |
d08097d57e86dac2468c68b27434003012380081 | 17cad1d357380875243b804ffd13882f1a7d61a8 | /0x01-python-if_else_loops_functions/5-print_comb2.py | 261080a0c9b07790c69df208dff6f370a248541f | [] | no_license | Abdou-Hidoussi/holbertonschool-higher_level_programming | 9a0c0714b63ccd9823798adb51eb4f395ab375dc | 1dd37cc5f848d1f37884e6ffbe9598eae8c4f30e | refs/heads/master | 2023-03-05T00:02:25.283646 | 2021-02-18T20:42:26 | 2021-02-18T20:42:26 | 291,713,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | #!/usr/bin/python3
for x in range(0, 99):
print("{:02d}, ".format(x), end='')
print(x + 1)
| [
"[email protected]"
] | |
8020130f940f8f456909b2b56cec135d85f0a20b | 8bf56892667d732c67ed0ae43fe7b08923893c71 | /version4.py | 2d633cee8aa4adf9a88167e0e1eb32f2e21b3da3 | [] | no_license | erhan-orun/BuildTable | 8a3ff82a979a19f7f2b975b88c690a5f769510a0 | 546e14a548d22d0ac237c2c8e544e152e33002bd | refs/heads/master | 2023-09-01T16:27:32.695756 | 2021-11-02T06:22:15 | 2021-11-02T06:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,908 | py | import datetime as dt
import tkinter as tk
from tkinter import *
from tkinter import ttk
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.root = tk.Tk()
self.tree = ttk.Treeview(self.root, selectmode='extended')
self.tree["columns"] = ("1", "2", "3", "4")
self.tree.column("1", width=125, minwidth=30, anchor='c')
self.tree.column("2", width=125, minwidth=30, anchor='c')
self.tree.column("3", width=125, minwidth=30, anchor='c')
self.tree.column("4", width=125, minwidth=30, anchor='c')
self.tree.bind('<ButtonRelease-1>', self.selectItem)
self.tree.heading("1", text="ID")
self.tree.heading("2", text="Sensor No")
self.tree.heading("3", text="IP")
self.tree.heading("4", text="Time")
self.tree['show'] = 'headings'
self.root.geometry('540x400')
self.root.title("Sensor Insert")
self.root.grid()
self.tree.grid(row=1, column=1, columnspan=4, padx=20, pady=20)
verscrlbar = ttk.Scrollbar(self.root, orient="vertical", command=self.tree.yview)
self.tree.configure(xscrollcommand=verscrlbar.set)
self.time_data = dt.datetime.now().strftime('%Y-%m-%d %X')
self.add_label = tk.Label(self.root, text='Add Sensor',
font=('Helvetica', 16), width=30, anchor="c")
self.add_label.grid(row=2, column=1, columnspan=4)
self.name_label = tk.Label(self.root, text='Sensor No: ', width=10, anchor="c")
self.name_label.grid(row=3, column=1)
self.t1 = tk.Text(self.root, height=1, width=16, bg='white')
self.t1.grid(row=3, column=2)
self.l3 = tk.Label(self.root, text='Sensor IP: ', width=10)
self.l3.grid(row=5, column=1)
self.t3 = tk.Text(self.root, height=1, width=16, bg='white')
self.t3.grid(row=5, column=2)
self.b1 = tk.Button(self.root, text='Save', width=10,
command=lambda: self.add_data())
self.b1.grid(row=6, column=2)
self.my_str = tk.StringVar()
self.l5 = tk.Label(self.root, textvariable=self.my_str, width=10)
self.l5.grid(row=8, column=1)
self.i = 0
# self.root.mainloop()
def selectItem(self, event):
global cell_value
curItem = self.tree.item(self.tree.focus())
col = self.tree.identify_column(event.x)
print('curItem = ', curItem)
print('col = ', col)
if col == '#0':
cell_value = curItem['text']
elif col == '#1':
cell_value = curItem['values'][0]
elif col == '#2':
cell_value = curItem['values'][1]
elif col == '#3':
cell_value = curItem['values'][2]
elif col == '#4':
cell_value = curItem['values'][3]
print('cell_value = ', cell_value)
def add_data(self):
sensor_name = self.t1.get("1.0", END)
sensor_ip = self.t3.get("1.0", END)
global i
self.i = self.i + 1
self.tree.insert("", 'end',
values=(int(self.i), sensor_name, sensor_ip, str(self.time_data)))
self.t1.delete('1.0', END) # reset the text entry box
self.t3.delete('1.0', END) # reset the text entry box
self.my_str.set("Sensor Added !")
self.t1.focus()
self.l5.after(3000, lambda: self.my_str.set('')) # remove the message
'''def record_data():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["Sensor_Record"]
mycol = mydb["data1"]
mycol.insert_one()
'''
'''if __name__ == "__main__":
app = App()
app.mainloop()'''
'''def main(): #run mianloop
root = tk.Tk()
app = Window1(root)
root.mainloop()
if __name__ == '__main__':
main()'''
app = App()
app.mainloop()
| [
"[email protected]"
] | |
1750d9bd2d2d74e5249e0afd3dbfb651013e01bd | cf7d6b1f45efe4d97389da2918b4f1b04673e66f | /utils/utils.py | 310d39b1f1996389d1f52e5ffaffd0b292de8091 | [] | no_license | metehancekic/deep_noise_rejection | 4d1379c16fe57ed95aa152d39f33bf36d1c501a9 | fd8e260e489f421fe7bd30c7ab8e9d397305247a | refs/heads/master | 2022-06-18T09:07:52.907752 | 2020-05-12T18:46:07 | 2020-05-12T18:46:07 | 262,714,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,431 | py | """
Utilities
PyTorch
Example Run
python -m deep_adv.utils.utils
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import os
from tqdm import tqdm
from deepillusion.torchattacks import PGD
def save_perturbed_images(args, model, device, data_loader, data_params, attack_params):
# Set phase to testing
model.eval()
test_loss = 0
correct = 0
all_images = []
all_labels = []
all_preds = []
for data, target in tqdm(data_loader):
data, target = data.to(device), target.to(device)
# Attacks
pgd_args = dict(net=model,
x=data,
y_true=target,
data_params=data_params,
attack_params=attack_params)
perturbs = PGD(**pgd_args)
data += perturbs
output = model(data)
test_loss += F.cross_entropy(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
all_images.append(data.detach().cpu().numpy())
all_labels.append(target.detach().cpu().numpy())
all_preds.append(pred.detach().cpu().numpy())
# Divide summed-up loss by the number of datapoints in dataset
test_loss /= len(data_loader.dataset)
# Print out Loss and Accuracy for test set
print(
f"\nAdversarial test set (l_{attack_params['norm']}): Average loss: {test_loss:.2f}, Accuracy: {correct}/{len(data_loader.dataset)} ({100. * correct / len(data_loader.dataset):.2f}%)\n"
)
all_images = np.array(all_images)
all_labels = np.array(all_labels)
all_preds = np.array(all_preds)
if not os.path.exists(args.directory + "data/attacked_images/"):
os.makedirs(args.directory + "data/attacked_images/")
np.savez_compressed(
args.directory + "data/attacked_images/" + args.model,
images=all_images,
labels=all_labels,
preds=all_preds,
)
def main():
from ..CIFAR10.read_datasets import cifar10
from ..CIFAR10.parameters import get_arguments
from ..CIFAR10.models.resnet import ResNet34
args = get_arguments()
# Get same results for each training with same parameters !!
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = cifar10(args)
x_min = 0.0
x_max = 1.0
# Decide on which model to use
if args.model == "ResNet":
model = ResNet34().to(device)
else:
raise NotImplementedError
if device == "cuda":
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
model.load_state_dict(
torch.load(args.directory + "checkpoints/" + args.model + ".pt")
)
data_params = {"x_min": x_min, "x_max": x_max}
attack_params = {
"norm": "inf",
"eps": args.epsilon,
"step_size": args.step_size,
"num_steps": args.num_iterations,
"random_start": args.rand,
"num_restarts": args.num_restarts,
}
save_perturbed_images(
args,
model,
device,
test_loader,
data_params=data_params,
attack_params=attack_params,
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
21d2f7e2323d617b60c05ef764ccd5f70ec6a1c2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/34/usersdata/82/13368/submittedfiles/moedas.py | 7c8cb901e522ee6f3565298cf8f75f60a478e842 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # -*- coding: utf-8 -*-
from __future__ import division
a= int(input('Digite o valor de a:'))
b= int(input('Digite o valor de b:'))
c= int(input('Digite o valor de c:'))
qa= 0
qb= 0
contador=0
while qa<=(c//a):
qb=(c-qa*a)//b
if qa*a+qb*b==c:
contador=contador+1
break
else:
qa=qa+1
if contador>0:
print (%qa)
print (%qb)
else:
print ('N')
| [
"[email protected]"
] | |
afaefeacf6a86365775689c2eb6cfda2d6b6b824 | 7f031e500bb73f084f932a166c3398672a3b8027 | /config.py | 0dbbc38f5b3b39dd4a87cfdac60b95503a2eff92 | [] | no_license | Guangzhan/nlp_demo | 109fb0ed7f6bfc3469ac71cc59106449c1927ec5 | 4e88515968156461326dff3046c8bba14a12e32f | refs/heads/master | 2020-09-13T16:38:58.744704 | 2019-11-20T03:37:56 | 2019-11-20T03:37:56 | 222,843,808 | 0 | 0 | null | 2019-11-20T03:35:08 | 2019-11-20T03:35:07 | null | UTF-8 | Python | false | false | 1,025 | py | __author__ = 'yangbin1729'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = '123@456#789$012%'
class DevelopmentConfig(Config):
DEBUG = True
MODEL_DIR = os.path.join(basedir, 'models')
Word2Vec_DIR = os.path.join(MODEL_DIR,
'word2vec\wiki_corpus_above200.model')
LTP_DATA_DIR = os.path.join(MODEL_DIR, 'ltp')
CLASSIFIER_DIR = os.path.join(MODEL_DIR, 'classifier')
TOKENIZER = os.path.join(CLASSIFIER_DIR, 'tokenizer.pickle')
class ProductionConfig(Config):
DEBUG = False
MODEL_DIR = r'/home/student/project/project-01/noam/project01/models'
Word2Vec_DIR = os.path.join(MODEL_DIR,
'word2vec/wiki_corpus_above200.model')
LTP_DATA_DIR = os.path.join(MODEL_DIR, 'ltp')
CLASSIFIER_DIR = os.path.join(MODEL_DIR, 'classifier')
TOKENIZER = os.path.join(CLASSIFIER_DIR, 'tokenizer.pickle')
config = {'development': DevelopmentConfig, 'production': ProductionConfig, } | [
"[email protected]"
] | |
15106812cf7653e88c461073845014f9006b8bb3 | 8b4bb6cc0478b0bb535cc1bcf619b67bddf6c155 | /sorting/Frequency_Queries .py | a2fa8ac8735c99dcab94e98f8a7927fb8a7dc2e4 | [] | no_license | PiyushChandra17/HackerRank_DSA | 2da943fcbc09918ba09757b6b0849c42f49bbd22 | 609b8272bf56006833aa8d5385ef331605bcc0e1 | refs/heads/master | 2022-12-02T17:56:26.648609 | 2020-08-08T18:37:11 | 2020-08-08T18:37:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the freqQuery function below.
def freqQuery(queries):
freq = Counter()
cnt = Counter()
arr = []
for q in queries:
if q[0] == 1:
cnt[freq[q[1]]] -= 1
freq[q[1]] += 1
cnt[freq[q[1]]] += 1
elif q[0] == 2:
if freq[q[1]] > 0:
cnt[freq[q[1]]] -= 1
freq[q[1]] -= 1
cnt[freq[q[1]]] += 1
else:
if cnt[q[1]] > 0:
arr.append(1)
else:
arr.append(0)
return arr
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
ans = freqQuery(queries)
fptr.write('\n'.join(map(str, ans)))
fptr.write('\n')
fptr.close()
| [
"[email protected]"
] | |
bd5bfdad4fb1f2096c2f1618a38e3041863b7c38 | 958fc2764dedf880b0027bcc00d4f042e0fb61b0 | /natas5.py | 2e2861fe33ce2f0ffd5ed00472e3b163cebfe8c4 | [] | no_license | marciopocebon/overthewire_natas_solutions | ee950249341abd639042efea8fd817c0951c68b9 | 67c726c74f9e7c0840a4b1c3b633a4bbc185f4a3 | refs/heads/master | 2021-01-04T01:06:18.654269 | 2018-10-15T22:33:38 | 2018-10-15T22:33:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #!/usr/bin/env python
import requests
import re
username = 'natas5'
password = 'iX6IOfmpN7AYOQGPwtn3fXpbaJVJcHfq'
# headers = { "Referer" : "http://natas5.natas.labs.overthewire.org/" }
cookies = { "loggedin" : "1" }
url = 'http://%s.natas.labs.overthewire.org/' % username
# response = requests.get(url, auth = (username, password), headers = headers )
session = requests.Session()
response = session.get(url, auth = (username, password), cookies = cookies )
content = response.text
# print content
print re.findall(' natas6 is (.*)</div>', content)[0]
| [
"[email protected]"
] | |
2cbf15f90fb9026a383dca7a34fb3f4ca4d06a7d | fa89ef4a8eb06dc2015d7116637f230b6891eb8d | /refinery/units/formats/pe/dotnet/__init__.py | 187abebc117fd0d022f710618937f9f2e1c730b3 | [
"BSD-3-Clause"
] | permissive | binref/refinery | f61878d9fddf616fee8edf226df22f6a35238940 | 4c7c3717ae45543b9d7bae60a4af4c00993cf719 | refs/heads/master | 2023-08-17T17:02:34.357138 | 2023-08-14T08:43:05 | 2023-08-14T08:43:05 | 228,019,736 | 439 | 48 | NOASSERTION | 2023-09-11T10:26:02 | 2019-12-14T12:32:06 | Python | UTF-8 | Python | false | false | 2,459 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from typing import Any
from enum import Enum
from hashlib import md5, sha1, sha256, sha512
from zlib import crc32
from refinery.units import Arg, Unit
from refinery.units.encoding.hex import hex
from refinery.units.encoding.esc import esc
from refinery.units.encoding.url import url
from refinery.units.encoding.b64 import b64
from refinery.lib.json import BytesAsArrayEncoder
from refinery.lib.dotnet.types import Blob
class UNIT(Enum):
HEX = hex
ESC = esc
URL = url
B64 = b64
class HASH(Enum):
MD5 = md5
CRC32 = crc32
SHA1 = sha1
SHA256 = sha256
SHA512 = sha512
class DotNetEncoder(BytesAsArrayEncoder):
def default(self, obj):
if isinstance(obj, Blob):
obj = bytes(obj)
try:
return super().default(obj)
except TypeError:
return str(obj)
class JSONEncoderUnit(Unit, abstract=True):
"""
An abstract unit that provides the interface for displaying parsed data
as JSON. By default, binary data is converted into integer arrays.
"""
def __init__(
self,
encode: Arg.Option('-e', group='BIN', choices=UNIT, help=(
'Select an encoder unit used to represent binary data in the JSON output. Available are: {choices}.')) = None,
digest: Arg.Option('-d', group='BIN', choices=HASH, help=(
'Select a hashing algorithm to digest binary data; instead of the data, only the hash will be displayed. The '
'available algorithms are: {choices}.')) = None,
**keywords
):
encode = Arg.AsOption(encode, UNIT)
digest = Arg.AsOption(digest, HASH)
super().__init__(**keywords)
if encode is not None and digest is not None:
raise ValueError('Only one binary data conversion can be specified.')
elif encode is not None:
unit = encode.value()
class CustomEncoder(DotNetEncoder): # noqa
def encode_bytes(self, obj): return unit.reverse(obj).decode('utf8')
elif digest is not None:
class CustomEncoder(DotNetEncoder):
def encode_bytes(self, obj): return digest(obj).hexdigest()
else:
CustomEncoder = DotNetEncoder
self.encoder = CustomEncoder
def to_json(self, obj: Any) -> bytes:
return json.dumps(obj, cls=self.encoder, indent=4).encode(self.codec)
| [
"[email protected]"
] | |
c90c46d63978b7b8df5aa9b6761c81c9b33f0160 | ce9d475cebeaec9cf10c467c577cb05c3b431fad | /code/chapter_22_example_07.py | 14c8271ee19d7a4d7aae1a0b9ae7a72b488b3952 | [] | no_license | Sundarmax/two-scoops-of-django-2.0-code-examples | 9c8f98d145aaa5498bb558fc5125379cd39003e5 | a15b2d4c240e879c03d2facf8592a644e27eb348 | refs/heads/master | 2022-04-19T10:14:53.795688 | 2020-03-04T15:16:25 | 2020-03-04T15:16:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.11 for Django 2.0 projects. Code Samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial
code".
Permissions
============
In general, you may use the code we've provided with this book in your
programs and documentation. You do not need to contact us for permission
unless you're reproducing a significant portion of the code or using it in
commercial distributions. Examples:
* Writing a program that uses several chunks of code from this course does
not require permission.
* Selling or distributing a digital package from material taken from this
book does require permission.
* Answering a question by citing this book and quoting example code does not
require permission.
* Incorporating a significant amount of example code from this book into your
product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.11, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2017 Two Scoops Press
(978-0-692-91572-1)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at [email protected].
"""
@mock.patch.object(requests, 'get')
def test_request_failure(self, get)
"""Test if the target site is innaccessible."""
get.side_effect = requests.exception.ConnectionError()
with self.assertRaises(CantListFlavors):
list_flavors_sorted()
@mock.patch.object(requests, 'get')
def test_request_failure(self, get)
"""Test if we can handle SSL problems elegantly."""
get.side_effect = requests.exception.SSLError()
with self.assertRaises(CantListFlavors):
list_flavors_sorted()
| [
"[email protected]"
] | |
d6ab0ddce5ea53451beeea5d56de10025e55a93e | f805cf2eef884fa0670332208b1b70ed8549751d | /parametres/urls.py | 3cc11de2d7d0e22a6ae2ac1a737dbbceacd4524a | [] | no_license | parheto10/agro_tracability | 8314ad73f2504dd90af4600938065ca18d93ab07 | 42520a3a2753707ed1100b5cdfb680bf7e00c80f | refs/heads/master | 2023-08-05T16:17:02.481499 | 2021-10-08T16:54:10 | 2021-10-08T16:54:10 | 414,891,821 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,456 | py | from django.urls import path
# from cooperatives.views import parcelle_list
from .views import (
connexion,
loggout,
index,
detail_coop,
map_parcelles,
coop_parcelles,
catre_parcelles,
catre_parcelles_coop
# projet,
# # pepiniere,
# # detail_pepiniere,
# # formation,
# detail_proj,
# localisation,
# # chart,
# prod_coop,
# parcelle_coop,
# localisation_coop,
# section_coop,
# # PlantingApiView,
# sous_section_coop,
# Stats_coop, Production_plan,
# Stats_semences, stat_prod_coop, plants_coop, semences_coop, formations, detail_formation, site_pepinieres,
# coop_pepiniere, pepiniere, pepiniere_coop, export_prods_to_pdf, export_parcelles_to_pdf, export_prod_xls,
# export_parcelle_xls, export_plant_xls, export_formation_xls, ParcellesMapView, ProducteurApiView, ParcelleApiView,
# ParcelleJson, PepiniereJson, PepiniereApiView, FormationApiView, folium_map,
# planting_coop, planting_list, produteur_list, parcelles_list, details_planting_list,
# DetailPlantingJson, DetailPlantingApiView, detail_planting, folium_palntings_map,
# Plantings, detailPlantings, plantings_coop, plantingcoop, update_projet, delete_projet # , ParcelleCooperativeApi
# detail_formation,
)
urlpatterns = [
path('', connexion, name='connexion'),
path('logout', loggout, name='logout'),
path('index/', index, name='accueil'),
path('detail_coop/<int:id>', detail_coop, name='detail_coop'),
# path('projets/', projet, name='projets'),
# path('projets/<int:id>/update/', update_projet, name='update_projet'),
# path('projets/<int:id>/delete/', delete_projet, name='delete_projet'),
# path('pepinieres/', pepiniere, name='pepinieres'),
# path('pepiniere_coop/<int:id>', pepiniere_coop, name='pepiniere_coop'),
# path('formation/<int:id>', formations, name='formations'),
# path('formation/<int:id>/<int:_id>', detail_formation, name='formation'),
# path('Stats_coop/', Stats_coop, name='stats_coop'),
# path('Stats_semences/', Stats_semences, name='stats_semences'),
# path('Plantings/', Plantings, name='Plantings'),
# path('detailPlantings/', detailPlantings, name='detailPlantings'),
# path('plantings_coop/<int:id>', plantings_coop, name='plantings_coop'),
# path('Production_plan/', Production_plan, name='production_plan'),
# path('stat_prod_coop/<int:id>', stat_prod_coop, name='stat_prod_coop'),
# path('plants_coop/<int:id>', plants_coop, name='plants_coop'),
# path('semences_coop/<int:id>', semences_coop, name='semences_coop'),
# # path('pepiniere/', pepiniere, name='pepiniere'),
# # path('formations/', formation, name='formations'),
# path('producteurs/<int:id>', prod_coop, name='prod_coop'),
# path('parcelles/<int:id>', parcelle_coop, name='parcelle_coop'),
# path('sections/<int:id>', section_coop, name='section_coop'),
# path('sous_sections/<int:id>', sous_section_coop, name='sous_section_coop'),
# path('plantings/<int:id>', planting_coop, name='planting_coop'),
# path('planting/<int:id>/<int:_id>', detail_planting, name='planting'),
# path('coordonnes/<int:id>', localisation_coop, name='localisation_coop'),
# path('planting_coop/<int:id>', plantingcoop, name='plantingcoop'),
# path('localisation/', localisation, name='localisation'),
# path('detail_proj/<int:id>', detail_proj, name='detail_proj'),
# path('site_pepinieres/', site_pepinieres, name='site_pepinieres'),
# path('coop_pepiniere/<int:id>', coop_pepiniere, name='coop_pepiniere'),
# # path('detail_pepiniere/<int:id>', detail_pepiniere, name='detail_pepiniere'),
# # path('formation/<int:id>', detail_formation, name='formation'),
# path('chart/<int:id>', chart, name='chart'),
#Export to Excel
# path('cooperative/<int:id>/producteurs/xls/', export_prod_xls, name='export_prod_xls'),
# # path('sections/xls/', export_section_xls, name='export_section_xls'),
# # path('sous_sections/xls/', export_sous_section_xls, name='export_sous_section_xls'),
# path('cooperative/<int:id>/parcelles/xls/', export_parcelle_xls, name='export_parcelle_xls'),
# path('cooperative/<int:id>/plants/xls/', export_plant_xls, name='export_plant_xls'),
# path('cooperative/<int:id>/formations/xls/', export_formation_xls, name='export_formation_xls'),
# Export Données EN PDF
# path('producteurs/pdf/<int:id>', export_prods_to_pdf, name='export_prods_to_pdf'),
# path('parcelles/pdf/<int:id>', export_parcelles_to_pdf, name='export_parcelles_to_pdf'),
#Api Urls
# path('api/producteurs', ProducteurApiView.as_view(), name="producteurs_api"),
# path('api/parcelles', ParcelleApiView.as_view(), name="parcelles_api"),
# path('api/details_plantings', DetailPlantingApiView.as_view(), name="detail_plantings_api"),
# # path('api/parcelles_coop', ParcelleCooperativeApi.as_view(), name="coop_parcelles_api"),
# path('api/pepinieres', PepiniereApiView.as_view(), name="pepinieres_api"),
# path('api/formations', FormationApiView.as_view(), name="formations_api"),
#map leaflet
# path('pepinieres_json/', PepiniereJson.as_view(), name="pepinieres_json"),
# # path('geolocalisation/', ParcelleJson.as_view(), name='geolocalisation'),
# path('geolocalisation/', ParcelleJson, name='geolocalisation'),
# path('details_planting/', DetailPlantingJson.as_view(), name='details_planting'),
# # path('parcelles/data', ParcellesView.as_view(), name="data"),
# path('parcelles/data', parcelle_list, name="data"),
#Folium Map
# path('folium_map/', folium_map, name="folium_map"),
# path('folium_palntings_map/', folium_palntings_map, name="folium_palntings_map"),
# Api
path('api/v1/map_parcelles/', map_parcelles, name='map_parcelles'),
path('map_parcelles/', catre_parcelles, name='catre_parcelles'),
path('api/v1/coop_parcelles/<int:id>', coop_parcelles, name='coop_parcelles'),
path('coop_parcelles/', catre_parcelles_coop, name='carte_parcelles_coop'),
# path('plantings/api/v1/', planting_list, name="plantings"),
# path('producteurs/api/v1/', produteur_list, name="producteurs"),
# path('parcelles/api/v1/', parcelles_list, name="parcelles"),
# path('plantings/api/v1/', planting_list, name="plantings"),
# path('details_plantings/api/v1/', details_planting_list, name="details_plantings"),
]
| [
"[email protected]"
] | |
6578c2d9e7c4ebe710d7ec7ba3661cb86e8b6c35 | 411a600c355e34f8a3d158a1de6b22f0f509aa18 | /hw2/hw2-copy-figures.py | 0440c833791a9c594c3ed8532298d41ec10fc7cf | [] | no_license | eggachecat/pynn | e216e6cd5f0c9671ef5009e1422bdaa801f7b0f4 | 74a75ee56483be22b520b418b5a52ae176f4a8e1 | refs/heads/master | 2021-01-11T00:37:58.010712 | 2016-12-31T17:25:38 | 2016-12-31T17:25:38 | 70,506,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | import shutil, errno
import os
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
results = [988970911,
941530379,
349685028,
783952587,
156884050,
449777493,
983956549,
106841919,
994865007,
87401771,
782990781,
666671943,
944565074,
195339946,
312443606,
721505406,
41157021,
790121321,
805213998,
963255433]
root_src = os.path.join(os.path.dirname(__file__ ) + "\exp_figures\\")
root_dst = "d:\\good_exp_figures"
if not os.path.exists(root_dst):
os.makedirs(root_dst)
for dirname in results:
src = root_src + "\\" + str(dirname)
dst = root_dst + "\\" + str(dirname)
copyanything(src, dst) | [
"[email protected]"
] | |
76cc67eda36092c76628d9d8c651dd0f974afeda | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/cmd/data/CreateDataInStructureBackgroundCmd.pyi | 5722a33e1967b7c8c164d39a1314696abfc50abb | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | pyi | import ghidra.framework.cmd
import ghidra.framework.model
import ghidra.util.task
import java.lang
class CreateDataInStructureBackgroundCmd(ghidra.framework.cmd.BackgroundCommand):
"""
Background command to create data across a selection inside of a structure.
"""
@overload
def __init__(self, addr: ghidra.program.model.address.Address, startPath: List[int], length: int, dt: ghidra.program.model.data.DataType):
"""
Constructs a command for applying dataTypes within an existing structure
across a range of components.
Simple pointer conversion will NOT be performed.
@param addr The address of the existing structure.
@param startPath the componentPath where to begin applying the datatype.
@param length the number of bytes to apply the data type to.
@param dt the datatype to be applied to the range of components.
"""
...
@overload
def __init__(self, addr: ghidra.program.model.address.Address, startPath: List[int], length: int, dt: ghidra.program.model.data.DataType, stackPointers: bool):
"""
This is the same as {@link #CreateDataInStructureBackgroundCmd(Address, int[], int, DataType )} except that
it allows the caller to control whether or not a pointer data type is created when a
non-pointer data type is applied at a location that previously contained a pointer data
type.
@param addr The address of the existing structure.
@param startPath the componentPath where to begin applying the datatype.
@param length the number of bytes to apply the data type to.
@param dt the datatype to be applied to the range of components.
@param stackPointers True will convert the given data type to a pointer if it is not one
and the previous type was a pointer; false will not make this conversion
"""
...
@overload
def applyTo(self, obj: ghidra.framework.model.DomainObject) -> bool: ...
@overload
def applyTo(self, obj: ghidra.framework.model.DomainObject, monitor: ghidra.util.task.TaskMonitor) -> bool:
"""
@see ghidra.framework.cmd.BackgroundCommand#applyTo(ghidra.framework.model.DomainObject, ghidra.util.task.TaskMonitor)
"""
...
def canCancel(self) -> bool:
"""
Check if the command can be canceled.
@return true if this command can be canceled
"""
...
def dispose(self) -> None:
"""
Called when this command is going to be removed/canceled without
running it. This gives the command the opportunity to free any
temporary resources it has hold of.
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getStatusMsg(self) -> unicode: ...
def hasProgress(self) -> bool:
"""
Check if the command provides progress information.
@return true if the command shows progress information
"""
...
def hashCode(self) -> int: ...
def isModal(self) -> bool:
"""
Check if the command requires the monitor to be modal. No other
command should be allowed, and the GUI will be locked.
@return true if no other operation should be going on while this
command is in progress.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def taskCompleted(self) -> None:
"""
Called when the task monitor is completely done with indicating progress.
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
554ddb63741d0c1664dbeb9b2eae63e3ea3ff840 | d60ee49abaee6c74c5b777f8f112a7f75f71f029 | /genome/variants2/filter/VCF/somatic/genes/indels/common_rsid.py | e24057ae9cd0d708b82f81fa0fea1cf630326aea | [] | no_license | ak352/melanomics | 41530f623b4bfdbd5c7b952debcb47622d1a8e88 | fc5e6fdb1499616fb25a8dc05259add8a65aeca0 | refs/heads/master | 2020-12-24T16:14:42.271416 | 2015-08-06T12:48:52 | 2015-08-06T12:48:52 | 18,439,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import sys
def ParseFields(line):
fields = {}
var = line[:-1].split("\t")
for x in range(0, len(var)):
fields[var[x]] = x
return fields
def read_tsv(infile):
with open(infile) as f:
var = ParseFields(next(f))
for line in f:
record = {}
line = line[:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
yield record
def read_vcf(infile, count=False):
with open(infile) as f:
line = next(f)
while line.startswith("##"):
line = next(f)
assert line.startswith("#CHROM")
var = ParseFields(line)
num_lines = 0
for line in f:
record = {}
line = line[1:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
num_lines += 1
if count:
if num_lines % 1000000:
sys.stderr.write("%d lines processed...\n" % num_lines)
yield record
def read_dbsnp_vcf():
# common_variants = set()
# for record in read_vcf(vcf):
# for info in record["INFO"].split(";"):
# info = info.split("=")
# if info[0] == "COMMON":
# if info[1] == "1":
# iden = record["ID"]
# assert
# common_variants.add()
# values.add(info[1])
# print values
return
def report(line, log):
for s in sys.stderr, log:
s.write(line)
def get_non_flagged():
vcf="/work/projects/isbsequencing/data/dbsnp/hg19/dbSNP138/00-All.vcf"
infile = "/work/projects/melanomics/tools/annovar/2015Mar22/annovar/humandb/hg19_snp138NonFlagged.wheader.txt.rsids"
stderr = sys.stderr
stderr.write("dbSNP Non-flagged file: %s\n" % infile)
ids = set([line[:-1] for line in open(infile)])
return ids
| [
"[email protected]"
] | |
4e44339e8aa9e97762dd77cc73821e37bf44948a | 8613ec7f381a6683ae24b54fb2fb2ac24556ad0b | /boot/hard/knight.py | 906fb70920ec67cbdcdac0eaf27502fa1a44eb0f | [] | no_license | Forest-Y/AtCoder | 787aa3c7dc4d999a71661465349428ba60eb2f16 | f97209da3743026920fb4a89fc0e4d42b3d5e277 | refs/heads/master | 2023-08-25T13:31:46.062197 | 2021-10-29T12:54:24 | 2021-10-29T12:54:24 | 301,642,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | x, y = map(int, input().split())
mod = 10 ** 9 + 7
if (x + y) % 3 != 0:
ans = 0
else:
n, m = (2 * x - y) / 3, (2 * y - x) / 3
ans = 0
if n >= 0 and m >= 0:
n, m = int(n), int(m)
ans = 1
for i in range(min(m, n)):
ans = ans * (n + m - i) % mod
ans *= pow(i + 1, mod - 2, mod)
print(ans % mod) | [
"[email protected]"
] | |
dbf7273504313e2c22795c47ad2dc7bcb84860ae | 2f86dda1ede21eb5fd0ad9bd32efb7de4c268efd | /citizen/spiders/spider.py | 71e50c1ffc85a415075fb41f56e5e3ced9e7f37f | [] | no_license | SimeonYS/citizen | 5a08c0108f2d1509ee34c4c40234a4bd406ca026 | 69dd47e459e251e18d6ecd18a8b6b86df64ceb59 | refs/heads/main | 2023-04-01T08:50:36.399086 | 2021-03-30T13:18:29 | 2021-03-30T13:18:29 | 353,007,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import CitizenItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class CitizenSpider(scrapy.Spider):
name = 'citizen'
start_urls = ['https://www.citizens-bank.com/news/']
def parse(self, response):
post_links = response.xpath('//h2/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//span[@class="fl-post-info-date"]/text()').get()
title = response.xpath('//h1/span/text()').get()
content = response.xpath('//div[@class="fl-module fl-module-fl-post-content fl-node-599c6b46b54ad"]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=CitizenItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
| [
"[email protected]"
] | |
0dc7a3414e2450000801ac10defe6303bb854684 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2023_02_01_preview/operations/_code_versions_operations.py | 75708494a685cf14484d28bba1f2f28657048b8f | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 22,838 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-02-01-preview") # type: str
order_by = kwargs.pop('order_by', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
skip = kwargs.pop('skip', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if order_by is not None:
_query_parameters['$orderBy'] = _SERIALIZER.query("order_by", order_by, 'str')
if top is not None:
_query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if skip is not None:
_query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_delete_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-02-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-02-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-02-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"version": _SERIALIZER.url("version", version, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class CodeVersionsOperations(object):
"""CodeVersionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
order_by=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CodeVersionResourceArmPaginatedResult"]
"""List versions.
List versions.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:param order_by: Ordering of list.
:type order_by: str
:param top: Maximum number of records to return.
:type top: int
:param skip: Continuation token for pagination.
:type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeVersionResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CodeVersionResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop("api_version", "2023-02-01-preview") # type: str
cls = kwargs.pop("cls", None) # type: ClsType["_models.CodeVersionResourceArmPaginatedResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
order_by=order_by,
top=top,
skip=skip,
template_url=self.list.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
order_by=order_by,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CodeVersionResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete version.
Delete version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:param version: Version identifier. This is case-sensitive.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2023-02-01-preview") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
template_url=self.delete.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CodeVersion"
"""Get version.
Get version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:param version: Version identifier. This is case-sensitive.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.CodeVersion"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2023-02-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
template_url=self.get.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CodeVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
version, # type: str
body, # type: "_models.CodeVersion"
**kwargs # type: Any
):
# type: (...) -> "_models.CodeVersion"
"""Create or update version.
Create or update version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:param version: Version identifier. This is case-sensitive.
:type version: str
:param body: Version entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.CodeVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.CodeVersion"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2023-02-01-preview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = self._serialize.body(body, "CodeVersion")
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("CodeVersion", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("CodeVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"} # type: ignore
| [
"[email protected]"
] | |
0dbf512149e37a7f23588db495fee8f788b3ee6c | edf510cc5bbbe24469d8ff262c022b33b4d80a75 | /tacotron2/data/text/symbols.py | 3608a7a394aeac2cffae9ed87a7c4183ba9886b1 | [
"Apache-2.0",
"MIT"
] | permissive | rheehot/Tacotron2 | e8b8a4be614708800b10b9fa7829264407510fa8 | ddbe55b426397d40cadd14f5040c55ba7c25615d | refs/heads/master | 2022-12-26T14:13:39.966498 | 2020-10-06T18:34:57 | 2020-10-06T18:34:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | """ from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from tacotron2.data.text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| [
"[email protected]"
] | |
8d2bea60b5b2b31185d035449eb07b04603efb6d | f76ba1e72ed81d85450f2584ddbb9b033396a3db | /alembic/versions/20210306_212825_.py | 303ec6cd6ce7b1e84e2bc2a92f015e4e69b8404b | [
"MIT"
] | permissive | webclinic017/magnet-migrade | 0e4823c32a6734628b0d3fc119f9c20ea1f9a167 | b5669b34a6a3b845df8df96dfedaf967df6b88e2 | refs/heads/main | 2023-05-07T02:45:20.594756 | 2021-06-08T02:14:24 | 2021-06-08T02:14:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | """empty message
Revision ID: 20210306_212825
Revises: 20210306_151508
Create Date: 2021-03-06 21:28:25.744854
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "20210306_212825"
down_revision = "20210306_151508"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"trade_account",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=1024), nullable=False),
sa.Column("provider", sa.String(length=255), nullable=False),
sa.Column("market", sa.String(length=255), nullable=False),
sa.Column("margin", sa.DECIMAL(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], onupdate="RESTRICT", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"trade_virtual_account",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("trade_account_id", sa.Integer(), nullable=True),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=1024), nullable=False),
sa.Column("product", sa.String(length=255), nullable=False),
sa.Column("periods", sa.Integer(), nullable=False),
sa.Column("allocated_margin", sa.DECIMAL(), nullable=False),
sa.Column("allocation_rate", sa.DECIMAL(), nullable=False),
sa.Column("ask_limit_rate", sa.DECIMAL(), nullable=True),
sa.Column("ask_loss_rate", sa.DECIMAL(), nullable=True),
sa.Column("bid_limit_rate", sa.DECIMAL(), nullable=True),
sa.Column("bid_loss_rate", sa.DECIMAL(), nullable=True),
sa.Column("position", sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(
["trade_account_id"],
["trade_account.id"],
onupdate="RESTRICT",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], onupdate="RESTRICT", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("trade_account_id", "name"),
)
op.drop_table("trade_acount")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"trade_acount",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("version", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("name", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column(
"description", sa.VARCHAR(length=1024), autoincrement=False, nullable=False
),
sa.Column(
"provider", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
sa.Column(
"market", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
sa.Column(
"accounts",
postgresql.JSON(astext_type=sa.Text()),
autoincrement=False,
nullable=False,
),
sa.Column("margin", sa.NUMERIC(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="trade_acount_pkey"),
)
op.drop_table("trade_virtual_account")
op.drop_table("trade_account")
# ### end Alembic commands ###
| [
"[email protected]"
] | |
31bb29c24f4df0a7d8e86690e72edab6a5cdcf44 | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /easy/Linked List Cycle.py | ef76a4e5ca516e9b8d0abae3a86665e3f9b96c72 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | # -*- coding: utf-8 -*-
# @Time : 2019/9/25 0025 15:17
# @Author : 没有蜡笔的小新
# @E-mail : [email protected]
# @FileName: Linked List Cycle.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given a linked list, determine if it has a cycle in it.
To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to. If pos is -1, then there is no cycle in the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
Follow up:
Can you solve it using O(1) (i.e. constant) memory?
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def listToListNode(data: list) -> ListNode:
listRoot = ListNode(0)
ptr = listRoot
for d in data:
ptr.next = ListNode(d)
ptr = ptr.next
ptr = listRoot.next
return ptr
def printListNode(l: ListNode) -> None:
p = l
val_list = []
while p:
val_list.append(str(p.val))
p = p.next
print(' -> '.join(val_list))
def hasCycle(head):
if not head:
return False
f, s = head, head
while s.next and s.next.next:
f = f.next
s = s.next.next
if f == s:
return True
return False
if __name__ == '__main__':
input = [3, 2, 0, -4]
head = listToListNode(input)
result = hasCycle(head)
print(result)
| [
"[email protected]"
] | |
8b90d101850fc61cbc1c2b0fdcc37cce6600ae8c | 058f1b9c83aa55c4803851f7880de00767491c00 | /test/test_search.py | 28b1174531fa143844710aced51a3f97565242a1 | [
"MIT"
] | permissive | samsammurphy/sat-search | f9a017ded12486e95826f62cc67fc2533010cbd5 | d81e4774a41990b73b55db4b1e05b21062dd957c | refs/heads/master | 2020-05-22T15:17:31.071946 | 2019-02-14T21:37:34 | 2019-02-14T21:37:34 | 186,404,233 | 0 | 0 | MIT | 2019-05-13T11:12:42 | 2019-05-13T11:12:42 | null | UTF-8 | Python | false | false | 3,123 | py | import os
import glob
import json
import unittest
import satsearch.config as config
from satstac import Item
from satsearch.search import SatSearchError, Search
class Test(unittest.TestCase):
path = os.path.dirname(__file__)
results = []
@classmethod
def setUpClass(cls):
fnames = glob.glob(os.path.join(cls.path, '*-item*.json'))
for fname in fnames:
with open(fname) as f:
cls.results.append(json.load(f))
def get_searches(self):
""" Initialize and return search object """
return [Search(datetime=r['properties']['datetime']) for r in self.results]
def test_search_init(self):
""" Initialize a search object """
search = self.get_searches()[0]
dts = [r['properties']['datetime'] for r in self.results]
assert(len(search.kwargs) == 1)
assert('time' in search.kwargs)
for kw in search.kwargs:
self.assertTrue(search.kwargs[kw] in dts)
def test_search_for_items_by_date(self):
""" Search for specific item """
search = self.get_searches()[0]
sids = [r['id'] for r in self.results]
items = search.items()
assert(len(items) == 1)
for s in items:
self.assertTrue(s.id in sids)
def test_empty_search(self):
""" Perform search for 0 results """
search = Search(datetime='2001-01-01')
self.assertEqual(search.found(), 0)
def test_geo_search(self):
""" Perform simple query """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search(datetime='2018-09-25', intersects=aoi)
assert(search.found() == 2)
items = search.items()
assert(len(items) == 2)
assert(isinstance(items[0], Item))
def test_search_sort(self):
""" Perform search with sort """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search.search(datetime='2018-01-01/2018-01-15', intersects=aoi, sort=['<datetime'])
items = search.items()
assert(len(items) == 33)
def test_get_items_by_id(self):
""" Get Items by ID """
ids = ['LC80340332018034LGN00', 'LC80340322018034LGN00']
items = Search.items_by_id(ids, collection='landsat-8-l1')
assert(len(items) == 2)
def test_get_ids_search(self):
""" Get Items by ID through normal search """
ids = ['LC80340332018034LGN00', 'LC80340322018034LGN00']
search = Search.search(ids=ids, collection='landsat-8-l1')
items = search.items()
assert(search.found() == 2)
assert(len(items) == 2)
def test_get_ids_without_collection(self):
with self.assertRaises(SatSearchError):
search = Search.search(ids=['LC80340332018034LGN00'])
items = search.items()
def test_query_bad_url(self):
with self.assertRaises(SatSearchError):
Search.query(url=os.path.join(config.API_URL, 'collections/nosuchcollection')) | [
"[email protected]"
] | |
dd6003a357da5d293ef2ebc35647330ad910df9f | 17e9441138f8ad09eab3d017c0fa13fa27951589 | /blog19-Iris/test07-show.py | 6f86adb80886cf72e2411f2a744018f4f6a03e69 | [] | no_license | My-lsh/Python-for-Data-Mining | 159a09e76b35efd46ca3e32ad6dd2174847d5ec4 | f2dd0b8f3c4f5f51a10613dff99041bca4fd64c5 | refs/heads/master | 2023-03-26T08:48:32.088713 | 2021-03-25T14:57:07 | 2021-03-25T14:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import pandas
import matplotlib.pyplot as plt
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
from pandas.plotting import scatter_matrix
scatter_matrix(dataset, alpha=0.2, figsize=(6, 6), diagonal='kde')
plt.show()
| [
"[email protected]"
] | |
bceaaba35117b38f3ff8200721312099d8f48e8f | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/SBSteat_20190605153204.py | aa2a19ba3ef7566408312123afa1ed7895fdf977 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | def SBS(A,B):
if A==0 or B ==0:
return 0
elif set(A)<=set(B) or set(B)<=set(A):
return 1
else:
return len(set(A)&set(B)) /len(set(A)|set(B))
def StrToList(A):
C=[]
for i in A:
C.append(i)
return C
import re
f = open('D:\DeepLearning ER\Z1006014.txt','r',errors='ignore')
g = open(r'C:\Users\Administrator\Desktop\ICD-10.txt','r',errors='ignore')
line_re=[]
lines = f.readlines()
dics=g.readlines()
out = []
for line in lines:
line=re.sub('\n','',line)
line=re.sub(' ','',line)
line = re.sub(r'\?|?', '',line)
line = re.sub(r'\,|\.|;','',line)
line_re.append(line)
while '' in line_re:
line_re.remove('')
for line in line_re:
for dic in dics:
dic=re.sub('\n','',dic)
if set(line) == set(dic):
out.append(dic)
elif SBS(line,dic)>0.8 and SBS(line,dic) <1:
out.append(dic)
import EMRdef
out=EMRdef.delre(out)
emr | [
"[email protected]"
] | |
566ff4df4c434e2553096332053b6321ca1e06a3 | 0e8e9bef32f40f5d0fd8c302293ae66732770b66 | /2015/pythonlearn/fromLiaoxuefeng/0078aiohttp.py | f04a0ede82976e6b39b25ff3a8cc8145103a4a43 | [] | no_license | tuouo/selfstudy | 4a33ec06d252388816ad38aa44838e2b728178d4 | 139a0d63477298addfff5b9ea8d39fab96734f25 | refs/heads/master | 2021-01-24T18:37:31.023908 | 2018-03-01T02:55:16 | 2018-03-01T02:55:16 | 84,453,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from aiohttp import web
def index(rerquest):
return web.Response(body = b'<h1>Index</h1>')
def hi(request):
yield from asyncio.sleep(0.5)
text = '<h1>hello, %s!</h1>' % request.match_info['name']
return web.Response(body = text.encode('utf-8'))
@asyncio.coroutine
def init(loop):
app = web.Application(loop = loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hi)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8000)
print('Server started at http:// 127.0.0.1:8000 ...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop)) # init() is coroutine for aiohttp
loop.run_forever() | [
"[email protected]"
] | |
0df5d9ca5055a7189593c2e924ba6b44c8cfa9cc | 172d4078d886b05cea2abdcf7aa458308c48458b | /apache/staging.wsgi | 16c1eba3c84abcd084102c377378178d0c68aecb | [] | no_license | elhoyos/forest | 9066569e42266bcd529c1d6d510119a23a819a65 | 55fca9770a4e24e7c35b8a47d3b27225a760625e | refs/heads/master | 2020-05-16T13:50:33.403078 | 2019-03-26T19:58:26 | 2019-03-26T19:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | wsgi | import os, sys, site
# enable the virtualenv
site.addsitedir('/var/www/forest/forest/ve/lib/python2.6/site-packages')
# paths we might need to pick up the project's settings
sys.path.append('/var/www/')
sys.path.append('/var/www/forest/')
sys.path.append('/var/www/forest/forest/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'forest.settings_staging'
import django.core.handlers.wsgi
import django
django.setup()
application = django.core.handlers.wsgi.WSGIHandler()
| [
"[email protected]"
] | |
d097f3000c69a5e673b71706ec7a9dd3bdfa960b | 3b3eac834008a2f4df4506d8dc2ba4364a7b67e2 | /nail_model_test.py | c0f962cae3a7e7d12e3c498b6bca757bb120dbf5 | [] | no_license | pokepetter/ggj2020_orion_oregano | 37811f1a8b65b95bada0c1e5f6cd35d57e160e8f | 439e4e64018e51e52a7cfb3c6c0b1617aba6056f | refs/heads/master | 2020-12-26T11:01:28.343068 | 2020-02-03T18:34:34 | 2020-02-03T18:34:34 | 237,488,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from ursina import *
app = Ursina()
nail_model = Entity(
model=Prismatoid(
base_shape=Circle(10),
path=(Vec3(0,0,0), Vec3(0,.9+.5,0), Vec3(0,.91+.5,0), Vec3(0,1+.5,0)),
thicknesses=(.2,.2,1,1),
))
EditorCamera()
app.run()
| [
"[email protected]"
] | |
16ec1caa2a401310021e53bf465c49945a16b916 | 11a809229e50f113eaa4528bd7dc05c30183ad4a | /SWS4D/_version.py | 16771c1acf41206d071238bc85bb1c58ad7ae281 | [] | no_license | davidmashburn/SWS4D | 24468fe809ad2ddbd6a8b77fe8f738509c9c7878 | aea8ee3d8c46aa7273c9522bdcbf63609431c126 | refs/heads/master | 2021-01-19T23:32:35.938005 | 2016-12-06T20:39:45 | 2016-12-06T20:39:45 | 88,991,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | __version_info__ = (0,2,7,0)
__version__ = '.'.join(map(str,__version_info__))
| [
"[email protected]"
] | |
29bb0154e69c61470509628247e814b0b21d2bdd | 7af3888ea81123df246a73aa12c3b88c3b1e8440 | /darwinist/sdef.py | dd58f41cda23cd9c54436cbec32e6dbae69547c3 | [] | no_license | gregorynicholas/darwinist | b29a08b2fe966e662cd94cc25d659406b3dce263 | 2a5e3d027a569b61ad54096463e2d97c95f9c029 | refs/heads/master | 2020-05-29T09:53:16.669213 | 2015-03-21T16:37:36 | 2015-03-21T16:37:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/env python
"""
Module to create application dictionaries for appscript using command line sdef tool
"""
import os
from subprocess import check_output
from lxml import etree as ET
class SDEFError(Exception):
def __str__(self):
return self.args[0]
class SDEF(object):
def __init__(self,path):
self.path = path
if not os.path.isdir(path):
raise SDEFError('Not a directory: %s' % path)
self.tree = ET.fromstring(check_output(['sdef',path]))
def __getattr__(self,attr):
if attr == 'terms':
return self.__generate_terms()
def __generate_terms(self):
output = 'version = 1.1\npath = %s' % self.path
classes = []
enums = []
properties = []
elements = []
commands = []
for suite in self.tree.xpath('suite'):
for node in suite.xpath('class'):
name = node.get('name').replace(' ','_')
code = node.get('code')
classes.append((name,code))
if node.get('inherits') is None:
continue
element = node.get('plural').replace(' ','_')
elements.append((element,code))
for node in suite.xpath('enumeration/enumerator'):
name = node.get('name').replace(' ','_')
code = node.get('code')
enums.append((name,code))
for node in suite.xpath('class/property'):
name = node.get('name').replace(' ','_')
code = node.get('code')
properties.append((name,code))
for node in suite.xpath('command'):
name = node.get('name').replace(' ','_')
code = node.get('code')
cparams = []
for p in node.xpath('parameter'):
pname = p.get('name').replace(' ','_')
pcode = p.get('code')
cparams.append((pname,pcode))
commands.append((name,code,cparams))
output += '\nclasses = %s' % classes
output += '\nenums = %s' % enums
output += '\nproperties = %s' % properties
output += '\nelements = %s' % elements
output += '\ncommands = %s' % commands
return output
| [
"[email protected]"
] | |
9646828aff8f61b37679c1833438c421c508b961 | 46234633cc7b66684af52f5b131834955115c80e | /train/gen/adv/models/particles/v4_Adam_trunc4_limit100/lib.py | 12d4000398f71ed515e5ef88ffa57b4285c6e13c | [
"MIT"
] | permissive | sammysiegel/SubtLeNet | 80d2ee5d3beb1699702ddb78162d10eee95eb051 | 94d1507a8a7c60548b59400109b6c4086ad83141 | refs/heads/master | 2022-09-05T05:25:53.701377 | 2020-06-01T15:39:36 | 2020-06-01T15:39:36 | 268,620,433 | 0 | 0 | MIT | 2020-06-01T20:04:08 | 2020-06-01T20:04:08 | null | UTF-8 | Python | false | false | 6,556 | py | #!/usr/bin/env python2.7
from _common import *
from ..generators.gen import make_coll, generate, get_dims
from ..generators import gen as generator
'''
some global definitions
'''
NEPOCH = 50
VERSION = 4
MODELDIR = environ.get('MODELDIR', 'models/') + '/particles/'
BASEDIR = environ['BASEDIR']
OPTIMIZER = 'Adam'
_APOSTLE = None
train_opts = {
'learn_mass' : True,
'learn_pt' : True,
}
# must be called!
def instantiate(trunc=4, limit=50):
global _APOSTLE
generator.truncate = trunc
config.limit = limit
_APOSTLE = 'v%s_trunc%i_limit%i'%(str(VERSION), generator.truncate, config.limit)
system('mkdir -p %s/%s/'%(MODELDIR,_APOSTLE))
system('cp -v %s %s/%s/trainer.py'%(sys.argv[0], MODELDIR, _APOSTLE))
system('cp -v %s %s/%s/lib.py'%(__file__.replace('.pyc','.py'), MODELDIR, _APOSTLE))
# instantiate data loaders
top = make_coll(BASEDIR + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(BASEDIR + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
dims = get_dims(top)
with open('%s/%s/setup.py'%(MODELDIR, _APOSTLE),'w') as fsetup:
fsetup.write('''
from subtlenet import config
from subtlenet.generators import gen as generator
config.limit = %i
generator.truncate = %i
'''%(config.limit, generator.truncate))
return data, dims
'''
first build the classifier!
'''
# set up data
def setup_data(data):
opts = {}; opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=500, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
def setup_adv_data(data):
opts = {'decorr_mass':True}; opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=1000, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
# this is purely a discriminatory classifier
def build_classifier(dims):
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6)(input_particles)
h = Conv1D(32, 2, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = Conv1D(16, 4, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = CuDNNLSTM(100)(h)
h = BatchNormalization(momentum=0.6)(h)
h = Dense(100, activation='relu', kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6)(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
h = Dense(50, activation='tanh')(h)
# if i%2:
# h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6)(h)
y_hat = Dense(config.n_truth, activation='softmax', name='y_hat')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
#classifier.compile(optimizer=Adam(lr=0.0002),
classifier.compile(optimizer=getattr(keras_objects, OPTIMIZER)(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
return classifier
def build_adversary(clf, loss, scale, w_clf, w_adv):
y_hat = clf.outputs[0]
inputs= clf.inputs
kin_hats = Adversary(config.n_decorr_bins, n_outputs=1, scale=scale)(y_hat)
adversary = Model(inputs=inputs,
outputs=[y_hat]+kin_hats)
adversary.compile(optimizer=getattr(keras_objects, OPTIMIZER)(lr=0.00025),
loss=['categorical_crossentropy']+[loss for _ in kin_hats],
loss_weights=[w_clf]+[w_adv for _ in kin_hats])
print '########### ADVERSARY ############'
adversary.summary()
print '###################################'
return adversary
# train any model
def train(model, name, train_gen, validation_gen, save_clf_params=None):
if save_clf_params is not None:
callbacks = [PartialModelCheckpoint(filepath='%s/%s/%s_clf_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True,
**save_clf_params)]
save_clf = save_clf_params['partial_model']
else:
save_clf = model
callbacks = []
callbacks += [ModelCheckpoint('%s/%s/%s_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True)]
def save_classifier(name_=name, model_=save_clf):
model_.save('%s/%s/%s.h5'%(MODELDIR,_APOSTLE,name_))
def save_and_exit(signal=None, frame=None):
save_classifier()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
model.fit_generator(train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=validation_gen,
validation_steps=2000,
callbacks = callbacks,
)
save_classifier()
def infer(modelh5, name):
model = load_model(modelh5,
custom_objects={'DenseBroadcast':DenseBroadcast})
model.summary()
coll = generator.make_coll(BASEDIR + '/PARTITION/*_CATEGORY.npy')
msd_norm_factor = 1. / config.max_mass
pt_norm_factor = 1. / (config.max_pt - config.min_pt)
msd_index = config.gen_singletons['msd']
pt_index = config.gen_singletons['pt']
def predict_t(data):
msd = data['singletons'][:,msd_index] * msd_norm_factor
pt = (data['singletons'][:,pt_index] - config.min_pt) * pt_norm_factor
if msd.shape[0] > 0:
particles = data['particles'][:,:config.limit,:generator.truncate]
r_t = model.predict([particles,msd,pt])[:,config.n_truth-1]
else:
r_t = np.empty((0,))
return r_t
print 'loaded from',modelh5,
print 'saving to',name
coll.infer(['singletons','particles'], f=predict_t, name=name, partition='test')
| [
"[email protected]"
] | |
3f51f53d28ce16889a1cd818a02b4b9acc096912 | 7bf1dc58ba0884ed957efdb5459ae44851b2b36e | /practice_450/strings/15_paranthesis_checker.py | 907deb421b02d77c80a2ca9f344a5fbccafb12d0 | [] | no_license | ksaubhri12/ds_algo | 672260f07f41bcfc33f8ac23a64085a1f27ab4a5 | 46505b89371cae3321f48609dd755c7e5cfed302 | refs/heads/master | 2023-05-12T08:37:06.789111 | 2023-05-03T03:06:49 | 2023-05-03T03:06:49 | 211,793,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | def parenthesis_checker(string_value: str):
opening_brackets = {'[': ']', '(': ')', '{': '}'}
opening_brackets_stack = []
answers = True
n = len(string_value)
for i in range(0, n):
element = string_value[i]
if element in opening_brackets:
opening_brackets_stack.append(element)
else:
if len(opening_brackets_stack) > 0:
element_in_stack = opening_brackets_stack.pop()
if not check_pair(element_in_stack, element):
answers = False
break
else:
answers = False
break
if answers and len(opening_brackets_stack) == 0:
return 'balanced'
else:
return 'not balanced'
def check_pair(opening_element, closing_element):
opening_brackets = {'[': ']', '(': ')', '{': '}'}
return closing_element == opening_brackets[opening_element]
if __name__ == '__main__':
print(parenthesis_checker('[()]{}{[()()]()}'))
print(parenthesis_checker(' [(])'))
print(parenthesis_checker('('))
| [
"[email protected]"
] | |
6e8a3770b5d99511676953e6783f1fc9a9f5ef80 | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-imageprocess/aliyunsdkimageprocess/request/v20200320/ScreenChestCTRequest.py | 9210dc80c110a33d88b645318846fed2be3f7c99 | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 2,190 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimageprocess.endpoint import endpoint_data
class ScreenChestCTRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imageprocess', '2020-03-20', 'ScreenChestCT','imageprocess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OrgName(self):
return self.get_body_params().get('OrgName')
def set_OrgName(self,OrgName):
self.add_body_params('OrgName', OrgName)
def get_Mask(self):
return self.get_body_params().get('Mask')
def set_Mask(self,Mask):
self.add_body_params('Mask', Mask)
def get_DataFormat(self):
return self.get_body_params().get('DataFormat')
def set_DataFormat(self,DataFormat):
self.add_body_params('DataFormat', DataFormat)
def get_URLLists(self):
return self.get_body_params().get('URLList')
def set_URLLists(self, URLLists):
for depth1 in range(len(URLLists)):
if URLLists[depth1].get('URL') is not None:
self.add_body_params('URLList.' + str(depth1 + 1) + '.URL', URLLists[depth1].get('URL'))
def get_OrgId(self):
return self.get_body_params().get('OrgId')
def set_OrgId(self,OrgId):
self.add_body_params('OrgId', OrgId) | [
"[email protected]"
] | |
98ea4fafb6fc58089c1c4fd892af8ba9a7e65f51 | bdaed512916fcf96e5dc915538fe8598aeb2d3cf | /mcex/distributions/special.py | a97815d40b32299438cac098d8f416c9cb2e2a25 | [] | no_license | jsalvatier/mcex | 9657cc2e8083f4e4dd013baaaceba08f9a48754e | 040f49bfd6eb467ef4d50d15de25033b1ba52c55 | refs/heads/master | 2021-06-18T19:02:07.055877 | 2017-01-22T01:10:01 | 2017-01-22T01:10:01 | 1,455,409 | 9 | 3 | null | 2012-06-21T18:07:36 | 2011-03-08T17:02:42 | Python | UTF-8 | Python | false | false | 3,820 | py | '''
Created on Mar 17, 2011
@author: jsalvatier
'''
from theano import scalar,tensor
import numpy
from scipy import special, misc
class GammaLn(scalar.UnaryScalarOp):
"""
Compute gammaln(x)
"""
@staticmethod
def st_impl(x):
return special.gammaln(x)
def impl(self, x):
return GammaLn.st_impl(x)
def grad(self, inp, grads):
x, = inp
gz, = grads
return [gz * scalar_psi(x)]
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
lgamma(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tensor.Elemwise(scalar_gammaln, name='gammaln')
class Psi(scalar.UnaryScalarOp):
"""
Compute derivative of gammaln(x)
"""
@staticmethod
def st_impl(x):
return special.psi(x)
def impl(self, x):
return Psi.st_impl(x)
#def grad() no gradient now
def c_support_code(self):
return (
"""
#ifndef _PSIFUNCDEFINED
#define _PSIFUNCDEFINED
double _psi(double x){
/*taken from
Bernardo, J. M. (1976). Algorithm AS 103: Psi (Digamma) Function. Applied Statistics. 25 (3), 315-317.
http://www.uv.es/~bernardo/1976AppStatist.pdf */
double y, R, psi_ = 0;
double S = 1.0e-5;
double C = 8.5;
double S3 = 8.333333333e-2;
double S4 = 8.333333333e-3;
double S5 = 3.968253968e-3;
double D1 = -0.5772156649 ;
y = x;
if (y <= 0.0)
return psi_;
if (y <= S )
return D1 - 1.0/y;
while (y < C){
psi_ = psi_ - 1.0 / y;
y = y + 1;}
R = 1.0 / y;
psi_ = psi_ + log(y) - .5 * R ;
R= R*R;
psi_ = psi_ - R * (S3 - R * (S4 - R * S5));
return psi_;}
#endif
""" )
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
_psi(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_psi = Psi(scalar.upgrade_to_float, name='scalar_psi')
psi = tensor.Elemwise(scalar_psi, name='psi')
class FactLn(scalar.UnaryScalarOp):
"""
Compute factln(x)
"""
@staticmethod
def st_impl(x):
return numpy.log(misc.factorial(x))
def impl(self, x):
return FactLn.st_impl(x)
#def grad() no gradient now
def c_support_code(self):
return (
"""
double factln(int n){
static double cachedfl[100];
if (n < 0) return -1.0; // need to return -inf here at some point
if (n <= 1) return 0.0;
if (n < 100) return cachedfl[n] ? cachedfl[n] : (cachedfl[n]=lgammln(n + 1.0));
else return lgammln(n+1.0);}
""" )
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
factln(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_factln = Psi(scalar.upgrade_to_float, name='scalar_factln')
factln = tensor.Elemwise(scalar_factln, name='factln')
| [
"[email protected]"
] | |
52a83fa3c2634369a6ddf6a2f9101569c9c107c6 | 32a6db4d595ef4d308ac0e2ef37c57f65a777bfc | /ZYCami_00_彭小钗/PO/Wx_Element.py | 5f9021ea65cb3db03881e667b4966af054d6d0fb | [] | no_license | wangdan377/Python_UI | 1c8f0b3d46272d72f849f242c39e035c6b20720b | 6c3e23b301ffe14cbd27a5211e48c8f79169dcf9 | refs/heads/master | 2023-02-17T02:37:34.353523 | 2021-01-19T11:58:22 | 2021-01-19T11:58:22 | 311,855,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,981 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
from PO.Base_page import Base_Page
class Wx_Page(Base_Page):
# 微信支付
File_wx = (By.ID, 'com.zhiyun.cama:id/btn_wx')
# 微信支付方式的关闭按钮
File_wx_closed = (By.XPATH,'/hierarchy/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout')
# 立即支付
File_pay_wx = (By.ID, '立即支付')
# 选择零钱按钮
File_Change_button = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[4]/android.view.ViewGroup[2]/com.tencent.mm.ui.MMImageView')
# 选择零钱支付
File_Change_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[1]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.TextView')
# 选择建设银行支付
File_Construction_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[2]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup')
# 选择江苏银行支付
File_jsu_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[3]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup')
# 请输入支付密码框
File_pay_password = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[5]/android.widget.RelativeLayout/android.widget.RelativeLayout')
# 关闭支付密码页面弹框
File_closed_password = (By.XPATH,'//android.view.ViewGroup[@content-desc=\"关闭\"]/android.view.ViewGroup/com.tencent.mm.ui.MMImageView')
# 支付页面左上角的X
File_pay_x = (By.ID, '返回')
# 继续支付
File_connectinue_pay = (By.ID, 'com.tencent.mm:id/dom')
# 放弃
File_give_up_pay = (By.ID, 'com.tencent.mm:id/doz')
#支付成功,返回商家
File_return_app = (By.ID, '返回商家')
#密码输入错误,重试
File_doz = (By.ID, 'com.tencent.mm:id/doz')
# 支付页面左上角的X
def click_File_pay_x(self):
self.find_element(*self.File_pay_x).click()
# 继续支付
def click_File_connectinue_pay(self):
self.find_element(*self.File_connectinue_pay).click()
# 放弃
def click_File_give_up_pay(self):
self.find_element(*self.File_give_up_pay).click()
# 选择微信支付
def click_File_wx(self):
self.find_element(*self.File_wx).click()
# 微信支付方式的关闭按钮
def click_File_wx_closed(self):
self.find_element(*self.File_wx_closed).click()
# 立即支付
def click_File_pay_wx(self):
self.find_element(*self.File_pay_wx).click()
# 选择零钱按钮
def click_File_Change_button(self):
self.find_element(*self.File_Change_button).click()
# 选择零钱支付
def click_File_Change_pay(self):
self.find_element(*self.File_Change_pay).click()
# 选择建设银行支付
def click_File_Construction_pay(self):
self.find_element(*self.File_Construction_pay).click()
# 选择江苏银行支付
def click_File_jsu_pay(self):
self.find_element(*self.File_jsu_pay).click()
# 请输入支付密码框
def click_File_pay_password(self):
self.find_element(*self.File_pay_password).click()
# 关闭支付密码页面弹框
def click_File_closed_password(self):
self.find_element(*self.File_closed_password).click()
#返回商家
def click_File_return_app(self):
self.find_element(*self.File_return_app).click()
# 密码输入错误,重试
def click_File_doz(self):
self.find_element(*self.File_doz).click()
| [
"[email protected]"
] | |
edcb570bd7fd632da38d368049889cb40b88c7f0 | 2d0da5d8f45e1906bb2a2eee0901e7fddd5dc7ad | /scripts/run_scripts/pha/run_pha_full1.py | d4a5fdb586a3f61ebb2138440d1c83f19148e4f7 | [
"MIT"
] | permissive | akazachk/pha | 09afd2fa6764ef9133a8ae91bb189e2896e076c6 | 4120f70554cb0a149d5ab52e04409302e78059fa | refs/heads/master | 2021-09-25T01:02:42.488470 | 2021-09-15T17:51:34 | 2021-09-15T17:51:34 | 194,751,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,410 | py | ####
## The inputted file is either [filename].instances or [file].batch.
## (The extension is not important.)
## The first line of this file will be the directory ``stub'',
## and output will be sent to ${PROJ_DIR}/results/instances/stub if batch mode is off,
## and to ${PROJ_DIR}/results/instances/batches/stub/batchname if it is on.
## Each line contains either a relative input path to an instance (with or without the extension) or a batch name.
## The path is relative to ${PROJ_DIR}/data/instances, e.g., the line will be miplib2/bm23.
## A batch name is distinguished by having the batch end with a '/', e.g., '2/' or 'batch2/'.
## Set up proper path variables
import os
PROJ_DIR = os.path.abspath(os.environ['PHA_DIR'])
EXECUTABLE = PROJ_DIR + "/Release/PHA"
## Solver options
phaActOptions = [1]
numAlg2Rounds = [0,1,2,3,4,-1,-2,-3,-4]
#numRaysToBeCut = [0]
cutLimit = [1000] # negative means divide the limit across splits
useSplitShare = [0,1000]
numCutsIterBilinear = [0]
useUnitVectorsHeur = [0,1]
useCutVertHeur = [0,1]
useTightPointsHeur = [0,1000]
usePresolve = [0,1]
## Set up output and input folders
results_path = PROJ_DIR + '/results'
paramfile = PROJ_DIR + '/data/params/pha_params.txt'
#instances_path = os.getcwd()
instances_path = PROJ_DIR + "/data/instances"
instances_file = instances_path + '/' + "test.instances"
outinfo_stub = 'pha-full' + str(phaActOptions[0])
outinfo_dir = results_path
## Get arguments
from sys import argv
use_batches = False # set to true/false depending on if mps files are all in one folder or divided up into subfolders
if (len(argv) > 1):
use_batches = True if argv[1] in ['true', 'True', '1', 't'] else False
if (use_batches and len(argv) < 2):
raise ValueError('When using batches, specifying the folder is required')
if (len(argv) > 2):
instances_file = os.path.abspath(argv[2])
## Where are the instances?
with open(instances_file) as f_in:
list_to_use = list(filter(None, (line.rstrip() for line in f_in)))
## The first line will be the name of the directory we should use
dir_stub = list_to_use[0]
list_to_use = list_to_use[1:]
#instances_file_name = instances_file.split('/')[-1]
#instances_file_name_split_by_dot = instances_file_name.split('.')
#dir_stub = '.'.join(instances_file_name_split_by_dot[0:len(instances_file_name_split_by_dot)-1])
if use_batches:
dir_stub = "batches/" + dir_stub
## Finalize outinfo
outinfo_dir = outinfo_dir + '/' + dir_stub
os.system("mkdir -p " + outinfo_dir) # make the dir if it does not exist
## Choose order so that deepest for loop are the results you want to see first, fixing all others
batch_name = ''
for usepresolve in usePresolve:
# for numrays in numRaysToBeCut:
for cutlimit in cutLimit:
for numiterblp in numCutsIterBilinear:
for splitshareopt in useSplitShare:
for usecutvert in useCutVertHeur:
for useunitvec in useUnitVectorsHeur:
for usetight in useTightPointsHeur:
for numalg2 in numAlg2Rounds:
for actoption in phaActOptions:
## Skip if all zeroes for cut generation
if (splitshareopt == 0) and (numiterblp == 0) and (useunitvec == 0) and (usecutvert == 0) and (usetight == 0):
continue
for inst in list_to_use:
## Check if batch name
if (inst[-1] == '/'):
batch_name = inst
continue
## Check if need to add "mps"
inst_name = inst
if (inst[-4:] != '.mps') and (inst[-3:] != '.lp') and (inst[-7:] != '.mps.gz') and (inst[-6:] != '.lp.gz'):
inst_name = inst_name + '.mps'
## Run on instances_path/inst.mps
infile = instances_path + '/' + inst_name
curr_out_dir = outinfo_dir + '/' + batch_name
outinfo = curr_out_dir + outinfo_stub
## In case the out directory does not exist
os.system("mkdir -p " + curr_out_dir)
## Arguments
extraparams = \
' --opt_file=' + PROJ_DIR + '/data/ip_opt.csv' + \
" --hplane_scoring_fn=" + str(actoption) + \
" --num_alg2_rounds=" + str(numalg2) + \
" --cut_limit=" + str(cutlimit) + \
" --use_split_share=" + str(splitshareopt) + \
" --num_cuts_iter_bilinear=" + str(numiterblp) + \
" --use_unit_vectors_heur=" + str(useunitvec) + \
" --use_cut_vert_heur=" + str(usecutvert) + \
" --use_tight_points_heur=" + str(usetight) + \
" --cut_presolve=" + str(usepresolve) + \
" --rounds=" + str(1)
print(EXECUTABLE + " -i " + infile + " -o " + curr_out_dir + " --log_file=" + outinfo + " -p " + paramfile + extraparams)
os.system(EXECUTABLE + " -i " + infile + " -o " + curr_out_dir + " --log_file=" + outinfo + " -p " + paramfile + extraparams + " > /dev/null 2>&1")
| [
"None"
] | None |
bc842ccaa11ef55ed6395e29a362c7dbae9cc52a | da49837553eab1d9e89b7cf1106776959b8da3b7 | /STPT.py | 8c7707bdf2641c2b0a62295dadb00be4a8238d98 | [] | no_license | sunomon/100at10low10 | ee9717450eb081c5a3105411d84b71079414c456 | 0ea7bd0458a42e0773285d12812168c2c0a3d3a2 | refs/heads/main | 2023-06-19T09:05:14.678018 | 2021-07-06T22:50:51 | 2021-07-06T22:50:51 | 383,526,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,117 | py | import time
import pyupbit
import datetime
import schedule
from fbprophet import Prophet
access = "PgXnWWPxxv88s7z2PSnz4aoqaYL0gxkRxReK0WDK"
secret = "wgCfiEmQVH76s9sblwFKQsOKOp91t2ic3XAHuNsK"
def get_target1_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target1_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target1_price
def get_target2_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target2_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target2_price
def get_target3_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target3_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target3_price
def get_target4_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target4_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target4_price
def get_target5_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target5_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target5_price
def get_target6_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target6_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target6_price
def get_target7_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target7_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target7_price
def get_target8_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target8_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target8_price
def get_target9_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target9_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target9_price
def get_target10_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target10_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target10_price
def get_start_time(ticker):
df = pyupbit.get_ohlcv(ticker, interval="day", count=1)
start_time = df.index[0]
return start_time
def get_balance(ticker):
balances = upbit.get_balances()
for b in balances:
if b['currency'] == ticker:
if b['balance'] is not None:
return float(b['balance'])
else:
return 0
return 0
def get_current_price(ticker):
return pyupbit.get_orderbook(tickers=ticker)[0]["orderbook_units"][0]["ask_price"]
predicted_close_price = 0
def predict_price(ticker):
global predicted_close_price
df = pyupbit.get_ohlcv(ticker, interval="minute60")
df = df.reset_index()
df['ds'] = df['index']
df['y'] = df['close']
data = df[['ds','y']]
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=24, freq='H')
forecast = model.predict(future)
closeDf = forecast[forecast['ds'] == forecast.iloc[-1]['ds'].replace(hour=9)]
if len(closeDf) == 0:
closeDf = forecast[forecast['ds'] == data.iloc[-1]['ds'].replace(hour=9)]
closeValue = closeDf['yhat'].values[0]
predicted_close_price = closeValue
predict_price("KRW-STPT")
schedule.every().hour.do(lambda: predict_price("KRW-STPT"))
upbit = pyupbit.Upbit(access, secret)
print("autotrade start")
while True:
try:
now = datetime.datetime.now()
start_time = get_start_time("KRW-STPT")
middle1_time = start_time + datetime.timedelta(hours=3)
middle2_time = start_time + datetime.timedelta(hours=9)
middle3_time = start_time + datetime.timedelta(hours=15)
end_time = start_time + datetime.timedelta(days=1)
schedule.run_pending()
if start_time < now < end_time - datetime.timedelta(hours=1):
target1_price = get_target1_price("KRW-STPT", 0.1)
target2_price = get_target2_price("KRW-STPT", 0.2)
target3_price = get_target3_price("KRW-STPT", 0.3)
target4_price = get_target4_price("KRW-STPT", 0.4)
target5_price = get_target5_price("KRW-STPT", 0.5)
target6_price = get_target6_price("KRW-STPT", 0.6)
target7_price = get_target7_price("KRW-STPT", 1)
target8_price = get_target8_price("KRW-STPT", 1.5)
target9_price = get_target9_price("KRW-STPT", 2)
target10_price = get_target10_price("KRW-STPT", 3)
current_price = get_current_price("KRW-STPT")
krw = get_balance("KRW")
stpt = get_balance("STPT")
if target1_price <= current_price < target1_price*1.02 and target1_price*1.1 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target1_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target1_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target2_price <= current_price < target2_price*1.02 and target2_price*1.125 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target2_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target2_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target3_price <= current_price < target3_price*1.02 and target3_price*1.15 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target3_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target3_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target4_price <= current_price < target4_price*1.02 and target4_price*1.175 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target4_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target4_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target5_price <= current_price < target5_price*1.02 and target5_price*1.2 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target5_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target5_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target6_price <= current_price < target6_price*1.02 and target6_price*1.25 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target6_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target6_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target7_price <= current_price < target7_price*1.02 and target7_price*1.2 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target7_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target7_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target8_price <= current_price < target8_price*1.02 and target8_price*1.3 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target8_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target8_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target9_price <= current_price < target9_price*1.02 and target9_price*1.4 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target9_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target9_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if target10_price <= current_price < target10_price*1.02 and target10_price*2 <= predicted_close_price:
if krw >= 1000000 and stpt < 10000/(target10_price*1.02):
upbit.buy_market_order("KRW-STPT", 1000000)
if 5000 < krw < 1000000 and stpt < 10000/(target10_price*1.02):
upbit.buy_market_order("KRW-STPT", krw*0.9995)
if stpt > 1000000*1.001*1.2/current_price:
upbit.sell_market_order("KRW-STPT", stpt*0.9995)
elif middle1_time < now < middle2_time:
stpt = get_balance("STPT")
current_price = get_current_price("KRW-STPT")
if stpt > 1000000*1.001*1.1/current_price:
upbit.sell_market_order("KRW-STPT", stpt*0.9995)
elif middle2_time < now < middle3_time:
stpt = get_balance("STPT")
current_price = get_current_price("KRW-STPT")
if stpt > 1000000*1.001*1.05/current_price:
upbit.sell_market_order("KRW-STPT", stpt*0.9995)
elif middle3_time < now < end_time - datetime.timedelta(hours=1):
stpt = get_balance("STPT")
current_price = get_current_price("KRW-STPT")
if stpt > 1000000*1.001*1.03/current_price or current_price > predicted_close_price:
upbit.sell_market_order("KRW-STPT", stpt*0.9995)
else:
stpt = get_balance("STPT")
current_price = get_current_price("KRW-STPT")
if stpt > 1000000*1.001/current_price:
upbit.sell_market_order("KRW-STPT", stpt*0.9995)
time.sleep(1)
except Exception as e:
print(e)
time.sleep(1)
| [
"[email protected]"
] | |
8a46b42a2d6965726648fa8823414ef23617c636 | 8c382ed6073bfc2dc3fda97d8344628ac669d548 | /api/views.py | 9cd911c86b7585e256c99cc284ffdbbc84072291 | [] | no_license | dmaras1808/ghiro | 7a428d69944a2e4173b6603240a2c195c21ed7f4 | 439d395a1311ac6f802d0ee1402d37e99aeb5f95 | refs/heads/master | 2021-01-24T01:59:59.225272 | 2015-08-03T21:40:28 | 2015-08-03T21:40:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py | # Ghiro - Copyright (C) 2013-2015 Ghiro Developers.
# This file is part of Ghiro.
# See the file 'docs/LICENSE.txt' for license terms.
import json
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from ghiro.common import log_activity
from ghiro.authorization import api_authenticate
from analyses.models import Case, Analysis
from lib.db import save_file
from lib.utils import create_thumb
@require_POST
@csrf_exempt
def new_case(request):
"""Creates a new case."""
user = api_authenticate(request.POST.get("api_key"))
if request.POST.get("name"):
case = Case(name=request.POST.get("name"),
description=request.POST.get("description"),
owner=user)
case.save()
# Auditing.
log_activity("C",
"Created new case via API %s" % case.name,
request,
user)
response_data = {"id": case.id}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse("Request not valid", status=400)
@require_POST
@csrf_exempt
def new_image(request):
"""Upload a new image."""
user = api_authenticate(request.POST.get("api_key"))
if request.POST.get("case_id"):
case = get_object_or_404(Case, pk=request.POST.get("case_id"))
# Security check.
if not case.can_write(user):
return HttpResponse("You are not authorized to add image to this", status=400)
if case.state == "C":
return HttpResponse("You cannot add an image to a closed case", status=400)
else:
case = None
task = Analysis.add_task(request.FILES["image"].temporary_file_path(),
file_name=request.FILES["image"].name, case=case, user=user,
content_type=request.FILES["image"].content_type,
image_id=save_file(file_path=request.FILES["image"].temporary_file_path(),
content_type=request.FILES["image"].content_type),
thumb_id=create_thumb(request.FILES["image"].temporary_file_path()))
# Auditing.
log_activity("I",
"Created new analysis via API %s" % task.file_name,
request,
user=user)
response_data = {"id": task.id}
return HttpResponse(json.dumps(response_data), content_type="application/json")
| [
"[email protected]"
] | |
5ef49b95a868b1d76d979ef9518a54c565787183 | 481517a085014aefba963d29ff52b56bef6a393e | /abstractdemo.py | 77dfa21d3b7352b51566e8f50caccc1c0957b0c9 | [] | no_license | 27Saidou/cours_python | 6d916fe63652e0463bd995dbb9a3ec72c74f4c3d | 91820b826ced24bed98525429096e32ff4c036db | refs/heads/main | 2022-01-09T09:58:32.514032 | 2022-01-04T18:37:56 | 2022-01-04T18:37:56 | 214,328,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | from abc import ABC, abstractmethod
class A(ABC):
def __init__(self,value):
self.value = value
@abstractmethod
def add(self):
pass
@abstractmethod
def sub(self):
pass
class Y(A):
def add(self):
return self.value +100
def sub(self):
return self.value -10
obj=Y(100)
print(obj.add())
print(obj.sub()) | [
"[email protected]"
] | |
438c15de02438222ef6d1d703b882cb426c13720 | 2a8a6327fb9a7ce8696aa15b197d5170661fb94f | /test/test_invoice_processing_options.py | c8b97d17768201adb01b23cc30c300ec51eeb16a | [] | no_license | moderndatainc/zuora-client | 8b88e05132ddf7e8c411a6d7dad8c0baabaa6dad | d50da49ce1b8465c76723496c2561a3b8ebdf07d | refs/heads/master | 2021-09-21T19:17:34.752404 | 2018-08-29T23:24:07 | 2018-08-29T23:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,764 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/G_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/3_Responses_and_errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation and Export ZOQL queries only. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2018-08-23
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.invoice_processing_options import InvoiceProcessingOptions # noqa: E501
from zuora_client.rest import ApiException
class TestInvoiceProcessingOptions(unittest.TestCase):
"""InvoiceProcessingOptions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInvoiceProcessingOptions(self):
"""Test InvoiceProcessingOptions"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.invoice_processing_options.InvoiceProcessingOptions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
591439930127cc0ca00096c759275babb9b9f783 | a943cb6da95ec1e06cb480887ba1062a5783527f | /2012-oss-hrs/plot-mg-k.py | fcb7775822791a3f8c6194f2e6dd460fa6fb664e | [] | no_license | andycasey/papers | 1b2c882c20b0c65b5899d70dc95825ec53cc9fe2 | 3d585ad4b6b1c3b40227185fd7b22ea9bdeb8e02 | refs/heads/master | 2021-01-19T17:24:48.788580 | 2013-08-13T08:51:02 | 2013-08-13T08:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import numpy as np
mucciarelli_headers = 'ID Teff logg vt [Fe/H] [Fe/H]_err [Mg/Fe] [Mg/Fe]_err [K/Fe] [K/Fe]_err [Ti/Fe] [Ti/Fe]_err [Ca/Fe] [Ca/Fe]_err'
mg_fe, mg_fe_err, k_fe, k_fe_err = range(4)
mucciarelli_columns = ['[Mg/Fe]', '[Mg/Fe]_err', '[K/Fe]', '[K/Fe]_err']
mucciarelli_column_indices = [mucciarelli_headers.split().index(item) for item in mucciarelli_columns]
mucciarelli = np.loadtxt('ngc2419-mucciarelli.txt', usecols=mucciarelli_column_indices)
fig = plt.figure()
fig.subplots_adjust(left=0.10, right=0.95, bottom=0.07, wspace=0, hspace=0.14, top=0.95)
ax = fig.add_subplot(111)
ax.errorbar(mucciarelli[:, mg_fe], mucciarelli[:, k_fe], xerr=mucciarelli[:, mg_fe_err], yerr=mucciarelli[:, k_fe_err], fmt=None, ecolor='k')
ax.scatter(mucciarelli[:, mg_fe], mucciarelli[:, k_fe], facecolor='k', edgecolor='k', label='Mucciarelli et al. (DEIMOS; 2012)')
# Cohen data
cohen = np.loadtxt('ngc2419-cohen.txt', usecols=(1,2,3,4, ))
ax.errorbar(cohen[:, mg_fe], cohen[:, k_fe], xerr=cohen[:, mg_fe_err], yerr=cohen[:, k_fe_err], fmt=None, ecolor='g')
ax.scatter(cohen[:, mg_fe], cohen[:, k_fe], marker='s', edgecolor='g', facecolor='g', label='Cohen et al. (HIRES; 2011, 2012)')
ax.set_ylabel('[K/Fe]')
ax.set_xlabel('[Mg/Fe]')
ax.legend(loc=3)
| [
"[email protected]"
] | |
f4f7422e1d5486fb475b159c62317c606ffa5580 | 421d58c6b93b81e0724f8f4576119300eb344252 | /influencers/core/migrations/0012_auto_20190130_1417.py | ebc63a175e53a43e7c2a89b47f2eb951d1aca9ef | [] | no_license | momen/influencers | 7728228c92a552bdff9ae62f85986ad03bce186e | f9c76cfc2970440112967f9579dc31f77063cb25 | refs/heads/master | 2020-06-03T22:20:03.881411 | 2019-06-15T07:48:43 | 2019-06-15T07:48:43 | 191,754,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Generated by Django 2.1.4 on 2019-01-30 14:17
from django.db import migrations
import partial_index
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20190130_1305'),
]
operations = [
migrations.AddIndex(
model_name='bank',
index=partial_index.PartialIndex(fields=['swift', 'deleted'], name='core_bank_swift_db8a53_partial', unique=True, where=partial_index.PQ(deleted__isnull=True)),
),
]
| [
"[email protected]"
] | |
e7f6bcc4646a64093c53c52df316962e702eb6fe | 2ea8f0e3d7c2eb709e126b069a551e24bb9d40aa | /bin/basenji_test_folds.py | ef85e1bc357503cee43d76187ccb16d7c6ebc442 | [
"Apache-2.0"
] | permissive | eniktab/basenji | b593136330a9a8292d665a14ab147ae0b08bfce9 | 90ce1c19a17f7a9edd4d95039f11755f6865fd8a | refs/heads/master | 2022-12-19T05:16:19.952806 | 2020-09-26T17:32:26 | 2020-09-26T17:32:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,161 | py | #!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import glob
import json
import os
import pdb
import shutil
import sys
from natsort import natsorted
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon, ttest_rel
import matplotlib.pyplot as plt
import seaborn as sns
import slurm
"""
basenji_test_folds.py
Train Basenji model replicates using given parameters and data.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <exp_dir> <params_file> <data_dir>'
parser = OptionParser(usage)
parser.add_option('-a', '--alt', dest='alternative',
default='two-sided', help='Statistical test alternative [Default: %default]')
parser.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
parser.add_option('-e', dest='conda_env',
default='tf2-gpu',
help='Anaconda environment [Default: %default]')
parser.add_option('--l1', dest='label1',
default='Reference', help='Reference label [Default: %default]')
parser.add_option('--l2', dest='label2',
default='Experiment', help='Experiment label [Default: %default]')
parser.add_option('--name', dest='name',
default='test', help='SLURM name prefix [Default: %default]')
parser.add_option('-o', dest='out_stem',
default=None, help='Outplut plot stem [Default: %default]')
parser.add_option('-q', dest='queue',
default='gtx1080ti')
parser.add_option('-r', dest='ref_dir',
default=None, help='Reference directory for statistical tests')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--spec', dest='specificity',
default=False, action='store_true',
help='Test specificity [Default: %default]')
parser.add_option('--train', dest='train',
default=False, action='store_true',
help='Test on the training set, too [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide parameters file and data directory')
else:
exp_dir = args[0]
params_file = args[1]
data_dir = args[2]
# read data parameters
data_stats_file = '%s/statistics.json' % data_dir
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# count folds
num_folds = len([dkey for dkey in data_stats if dkey.startswith('fold')])
################################################################
# test check
################################################################
jobs = []
if options.train:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%d_c%d' % (exp_dir, fi, ci)
# check if done
acc_file = '%s/test_train/acc.txt' % it_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' basenji_test.py'
basenji_cmd += ' -o %s/test_train' % it_dir
if options.rc:
basenji_cmd += ' --rc'
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' --split train'
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s/train/model_check.h5' % it_dir
basenji_cmd += ' %s/data' % it_dir
name = '%s-testtr-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s/test_train.out'%it_dir,
err_file='%s/test_train.err'%it_dir,
queue=options.queue,
cpu=1,
gpu=1,
mem=23000,
time='4:00:00')
jobs.append(basenji_job)
################################################################
# test best
################################################################
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%d_c%d' % (exp_dir, fi, ci)
# check if done
acc_file = '%s/test/acc.txt' % it_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' basenji_test.py'
basenji_cmd += ' -o %s/test' % it_dir
if options.rc:
basenji_cmd += ' --rc'
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s/train/model_best.h5' % it_dir
basenji_cmd += ' %s/data' % it_dir
name = '%s-test-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s/test.out'%it_dir,
err_file='%s/test.err'%it_dir,
queue=options.queue,
cpu=1,
gpu=1,
mem=23000,
time='4:00:00')
jobs.append(basenji_job)
################################################################
# test best specificity
################################################################
if options.specificity:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%d_c%d' % (exp_dir, fi, ci)
# check if done
acc_file = '%s/test_spec/acc.txt' % it_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' basenji_test_specificity.py'
basenji_cmd += ' -o %s/test_spec' % it_dir
if options.rc:
basenji_cmd += ' --rc'
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s/train/model_best.h5' % it_dir
basenji_cmd += ' %s/data' % it_dir
name = '%s-spec-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s/test_spec.out'%it_dir,
err_file='%s/test_spec.err'%it_dir,
queue=options.queue,
cpu=1,
gpu=1,
mem=60000,
time='6:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
if options.ref_dir is not None:
# classification or regression
with open('%s/f0_c0/test/acc.txt' % exp_dir) as test0_open:
header = test0_open.readline().split()
if 'pearsonr' in header:
metric = 'pearsonr'
else:
metric = 'auprc'
################################################################
# compare checkpoint on training set
################################################################
if options.train:
ref_glob_str = '%s/*/test_train/acc.txt' % options.ref_dir
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, metric)
exp_glob_str = '%s/*/test_train/acc.txt' % exp_dir
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nTrain:')
print('%12s %s: %.4f (%.4f)' % (options.label1, metric, ref_mean, ref_stdm))
print('%12s %s: %.4f (%.4f)' % (options.label2, metric, exp_mean, exp_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_train.pdf' % options.out_stem,
options.label1, options.label2)
################################################################
# compare best on test set
################################################################
ref_glob_str = '%s/*/test/acc.txt' % options.ref_dir
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, metric)
exp_glob_str = '%s/*/test/acc.txt' % exp_dir
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nTest:')
print('%12s %s: %.4f (%.4f)' % (options.label1, metric, ref_mean, ref_stdm))
print('%12s %s: %.4f (%.4f)' % (options.label2, metric, exp_mean, exp_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_test.pdf' % options.out_stem,
options.label1, options.label2)
################################################################
# compare best on test set specificity
################################################################
if options.specificity:
ref_glob_str = '%s/*/test_spec/acc.txt' % options.ref_dir
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, metric)
exp_glob_str = '%s/*/test_spec/acc.txt' % exp_dir
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nSpecificity:')
print('%12s %s: %.4f (%.4f)' % (options.label1, metric, ref_mean, ref_stdm))
print('%12s %s: %.4f (%.4f)' % (options.label2, metric, exp_mean, exp_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_spec.pdf' % options.out_stem,
options.label1, options.label2)
def jointplot(ref_cors, exp_cors, out_pdf, label1, label2):
vmin = min(np.min(ref_cors), np.min(exp_cors))
vmax = max(np.max(ref_cors), np.max(exp_cors))
vspan = vmax - vmin
vbuf = vspan * 0.1
vmin -= vbuf
vmax += vbuf
g = sns.jointplot(ref_cors, exp_cors, space=0)
eps = 0.05
g.ax_joint.text(1-eps, eps, 'Mean: %.4f' % np.mean(ref_cors),
horizontalalignment='right', transform=g.ax_joint.transAxes)
g.ax_joint.text(eps, 1-eps, 'Mean: %.4f' % np.mean(exp_cors),
verticalalignment='top', transform=g.ax_joint.transAxes)
g.ax_joint.plot([vmin,vmax], [vmin,vmax], linestyle='--', color='orange')
g.ax_joint.set_xlabel(label1)
g.ax_joint.set_ylabel(label2)
plt.tight_layout(w_pad=0, h_pad=0)
plt.savefig(out_pdf)
def read_metrics(acc_glob_str, metric='pearsonr'):
rep_cors = []
acc_files = natsorted(glob.glob(acc_glob_str))
for acc_file in acc_files:
try:
# tf2 version
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
rep_cors.append(acc_df.loc[:,metric].mean())
except:
# tf1 version
cors = []
for line in open(acc_file):
a = line.split()
cors.append(float(a[3]))
rep_cors.append(np.mean(cors))
cors_mean = np.mean(rep_cors)
cors_stdm = np.std(rep_cors) / np.sqrt(len(rep_cors))
return rep_cors, cors_mean, cors_stdm
def stat_tests(ref_cors, exp_cors, alternative):
_, mwp = wilcoxon(ref_cors, exp_cors, alternative=alternative)
tt, tp = ttest_rel(ref_cors, exp_cors)
if alternative == 'less':
if tt > 0:
tp = 1 - (1-tp)/2
else:
tp /= 2
elif alternative == 'greater':
if tt <= 0:
tp /= 2
else:
tp = 1 - (1-tp)/2
return mwp, tp
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1d6583954d27d890d2ca28e0d0374a7fff088f35 | a721e4ca65b79ce725c7b5b43539c963a3b55290 | /Happy_Ladybugs.py | 29beb1c6e1f59afc9ca77efce3b5bba9bf91db6d | [] | no_license | joydas65/Hackerrank-Problems | 0832d7cfd1de7e5df4dba76326ede735edc9afea | a16b3b0ebb65e7597f8f6417047da4d415a818c7 | refs/heads/master | 2022-06-21T12:47:55.241409 | 2022-06-18T18:21:08 | 2022-06-18T18:21:08 | 159,071,834 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | for _ in range(int(input())):
n = int(input())
b = input()
d = dict()
flag = 0
for i in b:
if i != '_':
d[i] = 0
for i in b:
if i != '_':
d[i] += 1
for i in d.keys():
if d[i] == 1:
flag = 1
break
if flag == 1:
print("NO")
else:
if b.count('_') == 0:
flag = 0
for i in range(1,len(b)-1):
if b[i] == b[i-1]:
continue
if b[i] == b[i+1]:
continue
flag = 1
break
if flag == 0:
print("YES")
else:
print("NO")
else:
print("YES")
| [
"[email protected]"
] | |
aeb6d7fa4b40ab287c90e598989a0f927c0abae8 | cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3 | /Learning_Python_Generators/Exercise_Files/Ch1/01_03/reverse_capital.py | 90ff072bda4cd2f9b2ffdfe04074e5da1265685b | [] | no_license | sedstan/LinkedIn-Learning-Python-Course | 2b936d0f00703a6e66a872220ed47572123dc7fd | b4584218355bf07aa3d2939b950911eae67adb0b | refs/heads/master | 2021-10-11T10:19:13.675662 | 2019-01-24T17:55:20 | 2019-01-24T17:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # list of names
names_list = ['Adam','Anne','Barry','Brianne','Charlie','Cassandra','David','Dana']
# too long
# reverse_uppercase = (name[::-1] for name in (name.upper() for name in names_list))
# breaking it up
uppercase = (name.upper() for name in names_list)
reverse_uppercase = (name[::-1] for name in uppercase)
| [
"[email protected]"
] | |
f9ab9a7d39cfc7a0dec43be133584c96e9afa1ef | 93b3a69da031d3fa8402ca787cd5d22db9c09bb9 | /__init__.py | 68076caa6613a8ece58a6522f432a1bd21625059 | [] | no_license | Teifion/communique | ab93335d7042776410da34ac28ff8cacda62f73f | d7f96d6c9c524fa5ea03dce37edf57f1424d6710 | refs/heads/master | 2021-01-01T05:49:43.936323 | 2013-10-10T21:58:41 | 2013-10-10T21:58:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | from .api import register, send
def communique_nimblescan():
try:
from ..nimblescan import api as ns_api
except ImportError:
try:
from ...nimblescan import api as ns_api
except ImportError:
return
ns_api.register('communique.home', "Notifications", ['communique'], (lambda r: True), ns_api.make_forwarder("communique.home"))
def includeme(config):
from . import views
communique_nimblescan()
"""
Pass this to your configurator object like so:
from . import communique
config.include(communique, route_prefix="communique")
"""
# Standard views
config.add_route('communique.home', '/home')
config.add_route('communique.action', '/action/{action}')
config.add_route('communique.view', '/view/{notification_id}')
config.add_route('communique.mini_home', '/mini_home')
config.add_route('communique.home_count', '/home_count/{user_id}')
config.add_route('communique.create', '/create')
# Now link the views
config.add_view(views.home, route_name='communique.home', renderer='templates/home.pt', permission='loggedin')
config.add_view(views.action, route_name='communique.action', permission='loggedin')
config.add_view(views.mini_home, route_name='communique.mini_home', renderer='string', permission='loggedin')
config.add_view(views.home_count, route_name='communique.home_count', renderer='string')
config.add_view(views.view, route_name='communique.view', renderer="string", permission='loggedin')
# Not sure what you use but this is the dev type permission I've got on my system
config.add_view(views.create, route_name='communique.create', permission='code')
return config
| [
"[email protected]"
] | |
f45761d59529cdbe88da05d923c51475464181fa | 22fc1933698e339f9de1c7cd8eb0062ef3a8711e | /examples/old-examples/snippets/mgl_new_example_glut.py | 1dd466a064a85f41965edb3b9bfdbb29ba08927b | [
"MIT"
] | permissive | einarf/ModernGL | f9a4929e529c560ca3dcc139994b7ff84a271a3f | e4a7f53289043a0ac06130c67edc75b878484a0e | refs/heads/master | 2020-04-14T03:53:20.054962 | 2019-02-28T07:05:19 | 2019-02-28T07:05:19 | 163,619,410 | 1 | 0 | MIT | 2018-12-30T21:40:33 | 2018-12-30T21:40:32 | null | UTF-8 | Python | false | false | 958 | py | import sys
import struct
from OpenGL.GLUT import (
GLUT_DEPTH, GLUT_DOUBLE, GLUT_RGB,
glutCreateWindow, glutDisplayFunc, glutInit, glutInitDisplayMode,
glutInitWindowSize, glutMainLoop, glutSwapBuffers,
)
import ModernGL
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(800, 600)
glutCreateWindow(b'')
ctx = ModernGL.create_context()
vert = ctx.vertex_shader('''
#version 330
in vec2 vert;
void main() {
gl_Position = vec4(vert, 0.0, 1.0);
}
''')
frag = ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
prog = ctx.program(vert, frag])
vbo = ctx.buffer(struct.pack('6f', 0.0, 0.8, -0.6, -0.8, 0.6, -0.8))
vao = ctx.simple_vertex_array(prog, vbo, ['vert'])
def display():
ctx.clear(0.9, 0.9, 0.9)
vao.render()
glutSwapBuffers()
glutDisplayFunc(display)
glutMainLoop()
| [
"[email protected]"
] | |
de7019587d7e630b0b56aa865769304de2aa1f8f | 6b79174551f8c5eee7ba5c3d4efe3c921b281d62 | /models/register/employee.py | 82aee95339589340a18b0e04bce9c8eac6b72852 | [] | no_license | Trilokan/manjal | 5d99dea0703cdf4e4f4553b2710cfb3ac5f05023 | 064fd6f3ad429837dd46c59790a54927e9622e1b | refs/heads/master | 2020-05-04T20:45:08.449320 | 2019-05-06T12:41:50 | 2019-05-06T12:41:50 | 179,449,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,783 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
BLOOD_GROUP = [('a+', 'A+'), ('b+', 'B+'), ('ab+', 'AB+'), ('o+', 'O+'),
('a-', 'A-'), ('b-', 'B-'), ('ab-', 'AB-'), ('o-', 'O-')]
GENDER = [('male', 'Male'), ('female', 'Female')]
MARITAL_STATUS = [('single', 'Single'), ('married', 'Married'), ('divorced', 'Divorced')]
class Employee(models.Model):
_name = "hr.employee"
name = fields.Char(string="Name", required=True)
employee_uid = fields.Char(string="Employee ID", readonly=True)
image = fields.Binary(string="Image")
small_image = fields.Binary(string="Small Image")
user_id = fields.Many2one(comodel_name="res.users", string="User")
person_id = fields.Many2one(comodel_name="arc.person", string="Person")
# Contact Details
email = fields.Char(string="e-Mail")
mobile = fields.Char(string="Mobile")
phone = fields.Char(string="Phone")
# Address in Detail
door_no = fields.Char(string="Door No")
building_name = fields.Char(string="Building Name")
street_1 = fields.Char(string="Street 1")
street_2 = fields.Char(string="Street 2")
locality = fields.Char(string="locality")
landmark = fields.Char(string="landmark")
city = fields.Char(string="City")
state_id = fields.Many2one(comodel_name="res.country.state", string="State",
default=lambda self: self.env.user.company_id.state_id.id)
country_id = fields.Many2one(comodel_name="res.country", string="Country")
pin_code = fields.Char(string="Pincode")
# Account Details
bank = fields.Char(string="Bank")
account_no = fields.Char(string="Account No")
aadhaar_card = fields.Char(string="Aadhaar Card")
pan_card = fields.Char(string="Pan Card")
driving_license = fields.Char(string="Driving License")
passport = fields.Char(string="Passport")
epf_no = fields.Char(string="EPF No")
epf_nominee = fields.Char(string="EPF Nominee")
identity_ids = fields.One2many(comodel_name="arc.identity", inverse_name="employee_id")
# HR Details
doj = fields.Date(string="Date of Joining", required=True)
date_of_relieving = fields.Date(string="Date of Relieving")
department_id = fields.Many2one(comodel_name="hr.department", string="Department")
designation_id = fields.Many2one(comodel_name="hr.designation", string="Designation")
reporting_to_id = fields.Many2one(comodel_name="hr.employee", string="Reporting To")
category_id = fields.Many2one(comodel_name="hr.category", string="Employee Category", required=True)
qualification_ids = fields.One2many(comodel_name="arc.qualification", inverse_name="employee_id")
experience_ids = fields.One2many(comodel_name="hr.experience", inverse_name="employee_id")
# Personnel Details
age = fields.Integer(string="Age")
blood_group = fields.Selection(selection=BLOOD_GROUP, string="Blood Group")
marital_status = fields.Selection(selection=MARITAL_STATUS, string="Marital Status")
gender = fields.Selection(selection=GENDER, string="Gender")
caste = fields.Char(string="Caste")
religion_id = fields.Many2one(comodel_name="arc.religion", string="Religion")
physically_challenged = fields.Boolean(string="Physically Challenged")
nationality_id = fields.Many2one(comodel_name="res.country")
mother_tongue_id = fields.Many2one(comodel_name="arc.language", string="Mother Tongue")
language_known_ids = fields.Many2many(comodel_name="arc.language", string="Language Known")
personnel_mobile = fields.Char(string="Personnel Mobile")
personnel_email = fields.Char(string="Personnel Email")
permanent_address = fields.Text(string="Permanent Address")
family_member_ids = fields.One2many(comodel_name="arc.address", inverse_name="employee_id")
# Leave
leave_level_id = fields.Many2one(comodel_name="leave.level", string="Leave Level")
# Attachment
attachment_ids = fields.Many2many(comodel_name="ir.attachment", string="Attachment")
# Smart Button
# View Complaint
def action_view_complaint(self):
pass
# View Promotion
def action_view_promotion(self):
pass
# View Payslip
def action_view_payslip(self):
pass
# View Work Sheet
def action_view_work_sheet(self):
pass
# View Attendance
def action_view_attendance(self):
pass
def update_person_address(self):
recs = {}
recs["name"] = self.name
recs["person_uid"] = self.employee_uid
recs["image"] = self.image
recs["small_image"] = self.small_image
recs["email"] = self.email
recs["mobile"] = self.mobile
recs["phone"] = self.phone
recs["door_no"] = self.door_no
recs["building_name"] = self.building_name
recs["street_1"] = self.street_1
recs["street_2"] = self.street_2
recs["locality"] = self.locality
recs["landmark"] = self.landmark
recs["city"] = self.city
recs["state_id"] = self.state_id.id
recs["country_id"] = self.country_id.id
recs["pin_code"] = self.pin_code
recs["is_employee"] = True
self.person_id.write(recs)
@api.multi
def write(self, vals):
rec = super(Employee, self).write(vals)
self.update_person_address()
return rec
@api.model
def create(self, vals):
data = {"person_uid": self.env["ir.sequence"].next_by_code(self._name),
"is_employee": True,
"name": vals["name"]}
data.update(vals)
person_id = self.env["arc.person"].create(data)
vals["person_id"] = person_id.id
vals["employee_uid"] = data["person_uid"]
return super(Employee, self).create(vals)
| [
"[email protected]"
] | |
0e251945353b6973051c3cbc43b6db7207b75a99 | 5a07e47aa8c065622d8d384c6b3b17981b24f0ae | /Batch_6_30/bye.py | ee8b169ee8edd349c32d61494e1f173ffd5f661d | [] | no_license | neelshrma/Old_Python_Codes | 629a7c113d56e96014c0d4b8d11126c79789335c | 410de97e8d581e55fe53822528a8e38f15e349ef | refs/heads/master | 2020-03-29T22:31:03.993335 | 2018-09-25T13:34:25 | 2018-09-25T13:34:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
x = int(input("Enter X : "))
y = int(input("Enter Y : "))
z = int(input("Enter Z : "))
print("Value of x = ",x)
print("Value of x = ",y)
print("Value of z = ",z)
print("Yeah vim is awesome")
| [
"[email protected]"
] | |
d635ad120b4155bf1fcdd14cfcada14d8a4f3fb9 | f6eb98db168908a99e5c62f440d39acca608b934 | /python/pyspark/mllib/recommendation.py | 1ee41df2831f55611298884b56c0d69095ec94c0 | [] | no_license | BigDataTeng/spark-parent_2.11 | 64abb51cf77a16d86a2a5314dafb4f33e03d98f3 | 9e679b5daefea69a29236305e1c8bf2e21ad9190 | refs/heads/master | 2022-12-14T09:06:08.199862 | 2019-08-27T09:48:37 | 2019-08-27T09:48:37 | 204,673,011 | 0 | 0 | null | 2022-12-05T23:35:44 | 2019-08-27T09:53:23 | Scala | UTF-8 | Python | false | false | 12,047 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
from collections import namedtuple
from pyspark import SparkContext, since
from pyspark.rdd import RDD
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, inherit_doc
from pyspark.mllib.util import JavaLoader, JavaSaveable
from pyspark.sql import DataFrame
__all__ = ['MatrixFactorizationModel', 'ALS', 'Rating']
class Rating(namedtuple("Rating", ["user", "product", "rating"])):
"""
Represents a (user, product, rating) tuple.
>>> r = Rating(1, 2, 5.0)
>>> (r.user, r.product, r.rating)
(1, 2, 5.0)
>>> (r[0], r[1], r[2])
(1, 2, 5.0)
.. versionadded:: 1.2.0
"""
def __reduce__(self):
return Rating, (int(self.user), int(self.product), float(self.rating))
@inherit_doc
class MatrixFactorizationModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""A matrix factorisation model trained by regularized alternating
least-squares.
>>> r1 = (1, 1, 1.0)
>>> r2 = (1, 2, 2.0)
>>> r3 = (2, 1, 2.0)
>>> ratings = sc.parallelize([r1, r2, r3])
>>> model = ALS.trainImplicit(ratings, 1, seed=10)
>>> model.predict(2, 2)
0.4...
>>> testset = sc.parallelize([(1, 2), (1, 1)])
>>> model = ALS.train(ratings, 2, seed=0)
>>> model.predictAll(testset).collect()
[Rating(user=1, product=1, rating=1.0...), Rating(user=1, product=2, rating=1.9...)]
>>> model = ALS.train(ratings, 4, seed=10)
>>> model.userFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> model.recommendUsers(1, 2)
[Rating(user=2, product=1, rating=1.9...), Rating(user=1, product=1, rating=1.0...)]
>>> model.recommendProducts(1, 2)
[Rating(user=1, product=2, rating=1.9...), Rating(user=1, product=1, rating=1.0...)]
>>> model.rank
4
>>> first_user = model.userFeatures().take(1)[0]
>>> latents = first_user[1]
>>> len(latents)
4
>>> model.productFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> first_product = model.productFeatures().take(1)[0]
>>> latents = first_product[1]
>>> len(latents)
4
>>> products_for_users = model.recommendProductsForUsers(1).collect()
>>> len(products_for_users)
2
>>> products_for_users[0]
(1, (Rating(user=1, product=2, rating=...),))
>>> users_for_products = model.recommendUsersForProducts(1).collect()
>>> len(users_for_products)
2
>>> users_for_products[0]
(1, (Rating(user=2, product=1, rating=...),))
>>> model = ALS.train(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
3.73...
>>> df = sqlContext.createDataFrame([Rating(1, 1, 1.0), Rating(1, 2, 2.0), Rating(2, 1, 2.0)])
>>> model = ALS.train(df, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
3.73...
>>> model = ALS.trainImplicit(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
0.4...
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = MatrixFactorizationModel.load(sc, path)
>>> sameModel.predict(2, 2)
0.4...
>>> sameModel.predictAll(testset).collect()
[Rating(...
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
@since("0.9.0")
def predict(self, user, product):
"""
Predicts rating for the given user and product.
"""
return self._java_model.predict(int(user), int(product))
@since("0.9.0")
def predictAll(self, user_product):
"""
Returns a list of predicted ratings for input user and product
pairs.
"""
assert isinstance(user_product, RDD), "user_product should be RDD of (user, product)"
first = user_product.first()
assert len(first) == 2, "user_product should be RDD of (user, product)"
user_product = user_product.map(lambda u_p: (int(u_p[0]), int(u_p[1])))
return self.call("predict", user_product)
@since("1.2.0")
def userFeatures(self):
"""
Returns a paired RDD, where the first element is the user and the
second is an array of features corresponding to that user.
"""
return self.call("getUserFeatures").mapValues(lambda v: array.array('d', v))
@since("1.2.0")
def productFeatures(self):
"""
Returns a paired RDD, where the first element is the product and the
second is an array of features corresponding to that product.
"""
return self.call("getProductFeatures").mapValues(lambda v: array.array('d', v))
@since("1.4.0")
def recommendUsers(self, product, num):
"""
Recommends the top "num" number of users for a given product and
returns a list of Rating objects sorted by the predicted rating in
descending order.
"""
return list(self.call("recommendUsers", product, num))
@since("1.4.0")
def recommendProducts(self, user, num):
"""
Recommends the top "num" number of products for a given user and
returns a list of Rating objects sorted by the predicted rating in
descending order.
"""
return list(self.call("recommendProducts", user, num))
def recommendProductsForUsers(self, num):
"""
Recommends the top "num" number of products for all users. The
number of recommendations returned per user may be less than "num".
"""
return self.call("wrappedRecommendProductsForUsers", num)
def recommendUsersForProducts(self, num):
"""
Recommends the top "num" number of users for all products. The
number of recommendations returned per product may be less than
"num".
"""
return self.call("wrappedRecommendUsersForProducts", num)
@property
@since("1.4.0")
def rank(self):
"""Rank for the features in this model"""
return self.call("rank")
@classmethod
@since("1.3.1")
def load(cls, sc, path):
"""Load a model from the given path"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.org.apache.spark.mllib.api.python.MatrixFactorizationModelWrapper(model)
return MatrixFactorizationModel(wrapper)
class ALS(object):
"""Alternating Least Squares matrix factorization
.. versionadded:: 0.9.0
"""
@classmethod
def _prepare(cls, ratings):
if isinstance(ratings, RDD):
pass
elif isinstance(ratings, DataFrame):
ratings = ratings.rdd
else:
raise TypeError("Ratings should be represented by either an RDD or a DataFrame, "
"but got %s." % type(ratings))
first = ratings.first()
if isinstance(first, Rating):
pass
elif isinstance(first, (tuple, list)):
ratings = ratings.map(lambda x: Rating(*x))
else:
raise TypeError("Expect a Rating or a tuple/list, but got %s." % type(first))
return ratings
@classmethod
@since("0.9.0")
def train(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, nonnegative=False,
seed=None):
"""
Train a matrix factorization model given an RDD of ratings by users
for a subset of products. The ratings matrix is approximated as the
product of two lower-rank matrices of a given rank (number of
features). To solve for these features, ALS is run iteratively with
a configurable level of parallelism.
:param ratings:
RDD of `Rating` or (userID, productID, rating) tuple.
:param rank:
Number of features to use (also referred to as the number of latent factors).
:param iterations:
Number of iterations of ALS.
(default: 5)
:param lambda_:
Regularization parameter.
(default: 0.01)
:param blocks:
Number of blocks used to parallelize the computation. A value
of -1 will use an auto-configured number of blocks.
(default: -1)
:param nonnegative:
A value of True will solve least-squares with nonnegativity
constraints.
(default: False)
:param seed:
Random seed for initial matrix factorization model. A value
of None will use system time as the seed.
(default: None)
"""
model = callMLlibFunc("trainALSModel", cls._prepare(ratings), rank, iterations,
lambda_, blocks, nonnegative, seed)
return MatrixFactorizationModel(model)
@classmethod
@since("0.9.0")
def trainImplicit(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, alpha=0.01,
nonnegative=False, seed=None):
"""
Train a matrix factorization model given an RDD of 'implicit
preferences' of users for a subset of products. The ratings matrix
is approximated as the product of two lower-rank matrices of a
given rank (number of features). To solve for these features, ALS
is run iteratively with a configurable level of parallelism.
:param ratings:
RDD of `Rating` or (userID, productID, rating) tuple.
:param rank:
Number of features to use (also referred to as the number of latent factors).
:param iterations:
Number of iterations of ALS.
(default: 5)
:param lambda_:
Regularization parameter.
(default: 0.01)
:param blocks:
Number of blocks used to parallelize the computation. A value
of -1 will use an auto-configured number of blocks.
(default: -1)
:param alpha:
A constant used in computing confidence.
(default: 0.01)
:param nonnegative:
A value of True will solve least-squares with nonnegativity
constraints.
(default: False)
:param seed:
Random seed for initial matrix factorization model. A value
of None will use system time as the seed.
(default: None)
"""
model = callMLlibFunc("trainImplicitALSModel", cls._prepare(ratings), rank,
iterations, lambda_, blocks, alpha, nonnegative, seed)
return MatrixFactorizationModel(model)
def _test():
import doctest
import pyspark.mllib.recommendation
from pyspark.sql import SQLContext
globs = pyspark.mllib.recommendation.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
ae8681babbdc166cc37dcc1a309547c0d4cd968b | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/resource_tag.py | 9f8339f814ea3c3cb36b356419316355ec49a31a | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,930 | py | # coding: utf-8
import pprint
import re
import six
class ResourceTag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str'
}
attribute_map = {
'key': 'key',
'value': 'value'
}
def __init__(self, key=None, value=None):
"""ResourceTag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self.discriminator = None
self.key = key
self.value = value
@property
def key(self):
"""Gets the key of this ResourceTag.
键,表示要匹配的字段。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - key不能重复,value为匹配的值。 - 此字段为固定字典值。 - 不允许为空字符串。
:return: The key of this ResourceTag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this ResourceTag.
键,表示要匹配的字段。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - key不能重复,value为匹配的值。 - 此字段为固定字典值。 - 不允许为空字符串。
:param key: The key of this ResourceTag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this ResourceTag.
值。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - 每个值最大长度255个unicode字符。 - 不可以为空。
:return: The value of this ResourceTag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ResourceTag.
值。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - 每个值最大长度255个unicode字符。 - 不可以为空。
:param value: The value of this ResourceTag.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceTag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b1baa12a4f9b71aa7c00c72ad4e34ce790b6cb4e | b66746b1d1c0a2800faab41488f2a67ed43255b8 | /Knn.py | 5c5aa5ea0c3cc8ec1b0c652f9d7fde2654e23aea | [] | no_license | yzsxjhft/emg | 28a0501810a86962f6a510fbe9f6d22346b4d963 | 96075c7124d0e50983221a7b4b4a8a5fba7bb352 | refs/heads/master | 2020-04-14T13:00:41.374480 | 2019-09-28T07:02:02 | 2019-09-28T07:02:02 | 163,856,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | import numpy as np
import sys
class Knn:
def __init__(self, k=5):
self.k = k
self.train_data = None
self.train_target = None
def fit(self, train_data, train_target):
self.train_data = train_data
self.train_target = train_target
def predict(self, test_data):
y = list()
for params in test_data:
distance = list()
for i in range(len(self.train_data)):
data = self.train_data[i]
dist = 0
for j in range(len(data)):
row = data[j]
dist += self.dtw_distance(params[j], row)
distance.append(dist/8.0)
indexs = np.argsort(np.array(distance), axis=0)[:self.k]
labels = np.array([self.train_target[x] for x in indexs])
y.append(np.argmax(np.bincount(labels)))
return y
def dtw_distance(self, ts_a, ts_b):
"""Returns the DTW similarity distance between two 2-D
timeseries numpy arrays.
Arguments
---------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
d = lambda x, y: abs(x - y)
max_warping_window = 10000
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxsize * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in range(1, M):
cost[i, 0] = cost[i - 1, 0] + d(ts_a[i], ts_b[0])
for j in range(1, N):
cost[0, j] = cost[0, j - 1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in range(1, M):
for j in range(max(1, i - max_warping_window),
min(N, i + max_warping_window)):
choices = cost[i - 1, j - 1], cost[i, j - 1], cost[i - 1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
| [
"="
] | = |
5f75c3f5fff85007685d9fa593e2ad9569ba3121 | 866b6a443c10038b91346d86f6f5054d08de66cc | /main.py | 57fe09173644599c48c67cde6f8901aba4bd1393 | [] | no_license | Introduction-to-Programming-OSOWSKI/2-2-addition-checker-DevinGoede23 | b7422b62f1681713c6d8eea2211be2b11d734f4d | 5248b1b21f55f93bd87aaac4e36f7cfb6ec77986 | refs/heads/master | 2023-08-30T20:08:42.350900 | 2021-11-11T20:25:25 | 2021-11-11T20:25:25 | 427,133,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | #WRITE YOUR CODE HERE | [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
1e95b79a26ebe3af47d29ddc95baa5d79727e5b6 | 6fe0a724e9c5d3975ddb95eebe3032eb213f8840 | /tensorflow_datasets/core/visualization/show_examples.py | 6d9ac13abe6f9146269e8b465bd6b53e02a37b35 | [
"Apache-2.0"
] | permissive | Yohnhahahage/datasets | caf8b7001046bbf1729d016abdeae7f69d75152b | 08cf7709095860fe50ec10ea503c4095b69a5cb1 | refs/heads/master | 2022-12-09T09:36:40.123816 | 2020-09-22T19:03:15 | 2020-09-22T19:03:15 | 297,893,222 | 1 | 0 | Apache-2.0 | 2020-09-23T07:46:27 | 2020-09-23T07:46:26 | null | UTF-8 | Python | false | false | 5,033 | py | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Show example util.
"""
from typing import Any
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import dataset_info
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import splits
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.visualization import image_visualizer
from tensorflow_metadata.proto.v0 import statistics_pb2
_ALL_VISUALIZERS = [
image_visualizer.ImageGridVisualizer(),
]
def show_examples(
ds: tf.data.Dataset,
ds_info: dataset_info.DatasetInfo,
**options_kwargs: Any
):
"""Visualize images (and labels) from an image classification dataset.
This function is for interactive use (Colab, Jupyter). It displays and return
a plot of (rows*columns) images from a tf.data.Dataset.
Usage:
```python
ds, ds_info = tfds.load('cifar10', split='train', with_info=True)
fig = tfds.show_examples(ds, ds_info)
```
Args:
ds: `tf.data.Dataset`. The tf.data.Dataset object to visualize. Examples
should not be batched. Examples will be consumed in order until
(rows * cols) are read or the dataset is consumed.
ds_info: The dataset info object to which extract the label and features
info. Available either through `tfds.load('mnist', with_info=True)` or
`tfds.builder('mnist').info`
**options_kwargs: Additional display options, specific to the dataset type
to visualize. Are forwarded to `tfds.visualization.Visualizer.show`.
See the `tfds.visualization` for a list of available visualizers.
Returns:
fig: The `matplotlib.Figure` object
"""
if not isinstance(ds_info, dataset_info.DatasetInfo): # Arguments inverted
# `absl.logging` does not appear on Colab by default, so uses print instead.
print('WARNING: For consistency with `tfds.load`, the `tfds.show_examples` '
'signature has been modified from (info, ds) to (ds, info).\n'
'The old signature is deprecated and will be removed. '
'Please change your call to `tfds.show_examples(ds, info)`')
ds, ds_info = ds_info, ds
# Pack `as_supervised=True` datasets
if (
ds_info.supervised_keys
and isinstance(ds.element_spec, tuple)
and len(ds.element_spec) == 2
):
x_key, y_key = ds_info.supervised_keys
ds = ds.map(lambda x, y: {x_key: x, y_key: y})
for visualizer in _ALL_VISUALIZERS:
if visualizer.match(ds_info):
return visualizer.show(ds, ds_info, **options_kwargs)
raise ValueError(
'Visualisation not supported for dataset `{}`'.format(ds_info.name)
)
def show_statistics(
ds_info: dataset_info.DatasetInfo,
split: splits.Split = splits.Split.TRAIN,
disable_logging: bool = True,
) -> None:
"""Display the datasets statistics on a Colab/Jupyter notebook.
`tfds.show_statistics` is a wrapper around
[tensorflow_data_validation](https://www.tensorflow.org/tfx/data_validation/get_started)
which calls `tfdv.visualize_statistics`. Statistics are displayed using
[FACETS OVERVIEW](https://pair-code.github.io/facets/).
Usage:
```
builder = tfds.builder('mnist')
tfds.show_statistics(builder.info)
```
Or:
```
ds, ds_info = tfds.load('mnist', with_info)
tfds.show_statistics(ds_info)
```
Note: In order to work, `tensorflow_data_validation` must be installed and
the dataset info object must contain the statistics. For "official" datasets,
only datasets which have been added/updated recently will contains statistics.
For "custom" datasets, you need to generate the dataset with
`tensorflow_data_validation` installed to have the statistics.
Args:
ds_info: The `tfds.core.DatasetInfo` object containing the statistics.
split: Split for which generate the statistics.
disable_logging: `bool`, if True, disable the tfdv logs which can be
too verbose.
Returns:
`None`
"""
tfdv = lazy_imports_lib.lazy_imports.tensorflow_data_validation
if split not in ds_info.splits:
raise ValueError(
'Invalid requested split: \'{}\'. Only {} are availables.'.format(
split, list(ds_info.splits)))
# Creates the statistics.
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.add().CopyFrom(ds_info.splits[split].statistics)
with utils.disable_logging() if disable_logging else utils.nullcontext():
return tfdv.visualize_statistics(statistics)
| [
"[email protected]"
] | |
7e508f48a7e431f6b7f9011cf995cb87ad12f846 | 973da85ebe773dc35d6539397e754726e87171b8 | /lcopt/parameters.py | ea0570fdab8b51a2f67fdec7f0d0392e27c0746c | [
"BSD-3-Clause"
] | permissive | pjamesjoyce/lcopt | 14c49f79a43e9dce220ef760fa9b207105779568 | a167ecfa258e62e91af7dac6cbf70be5d63fff93 | refs/heads/development | 2023-05-22T20:45:50.252163 | 2020-04-02T09:53:07 | 2020-04-02T09:53:07 | 76,573,839 | 23 | 6 | BSD-3-Clause | 2018-10-04T13:38:48 | 2016-12-15T15:55:48 | JavaScript | UTF-8 | Python | false | false | 4,633 | py | from bw2parameters.parameter_set import ParameterSet
from collections import OrderedDict
from copy import deepcopy
class LcoptParameterSet(ParameterSet):
"""
Subclass of `bw2parameters.parameter_set.ParameterSet` that takes a `lcopt.LcoptModel` and delegates parameter ordering and evaluation to `bw2parameters`
TODO: Add more documentation and write tests
"""
def __init__(self, modelInstance):
self.modelInstance = modelInstance
self.norm_params = self.normalise_parameters()
self.check_production_parameters_exist()
self.all_params = {**self.modelInstance.params, **self.modelInstance.production_params, **self.norm_params, **self.modelInstance.allocation_params}
self.bw2_params, self.bw2_global_params, self.bw2_export_params = self.lcopt_to_bw2_params(0)
super().__init__(self.bw2_params, self.bw2_global_params)
self.evaluated_parameter_sets = self.preevaluate_exchange_params()
def lcopt_to_bw2_params(self, ps_key):
k0 = list(self.modelInstance.parameter_sets.keys())[ps_key]
ps1 = self.modelInstance.parameter_sets[k0]
bw2_params = {k:{(x if x != 'function' else 'formula'):y for x, y in v.items()} for k,v in self.all_params.items()}
for k in bw2_params.keys():
bw2_params[k]['amount'] = ps1.get(k,0)
bw2_global_params = {x['name']: ps1.get(x['name'],x['default']) for x in self.modelInstance.ext_params}
bw2_export_params = []
for k, v in bw2_params.items():
to_append = {'name': k}
if v.get('formula'):
to_append['formula'] = v['formula']
else:
to_append['amount'] = v['amount']
bw2_export_params.append(to_append)
for k, v in bw2_global_params.items():
bw2_export_params.append({'name':k, 'amount':v})
return bw2_params, bw2_global_params, bw2_export_params
def normalise_parameters(self):
param_copy = deepcopy(self.modelInstance.params)
#production_params = deepcopy(self.modelInstance.production_params)
#allocation_params = deepcopy(self.modelInstance.allocation_params)
norm_params = OrderedDict()
for k, v in param_copy.items():
norm_params['n_{}'.format(k)] = {}
for key, item in v.items():
if key == 'function':
if not item:
norm_function = '{} / {}'.format(k, v['normalisation_parameter'])
else:
norm_function = '({}) / {}'.format(item, v['normalisation_parameter'])
norm_params['n_{}'.format(k)][key] = norm_function
else:
norm_params['n_{}'.format(k)][key] = item
return norm_params
def preevaluate_exchange_params(self):
evaluated_params = OrderedDict()
for n, k in enumerate(self.modelInstance.parameter_sets.keys()):
self.params, self.global_params, _ = self.lcopt_to_bw2_params(n)
self.evaluate_and_set_amount_field()
this_set = {}
for j, v in self.params.items():
this_set[j] = v['amount']
evaluated_params[k] = this_set
self.params, self.global_params , _ = self.lcopt_to_bw2_params(0)
self.evaluate_and_set_amount_field()
return evaluated_params
def check_production_parameters_exist(self):
""" old versions of models won't have produciton parameters, leading to ZeroDivision errors and breaking things"""
for k, v in self.modelInstance.parameter_sets.items():
for p_id in self.modelInstance.production_params.keys():
if v.get(p_id):
#print('{} already exists'.format(p_id))
pass
else:
#print('No production parameter called {} - setting it to 1'.format(p_id))
v[p_id] = 1.0
for p_id in self.modelInstance.allocation_params.keys():
if v.get(p_id):
#print('{} already exists'.format(p_id))
pass
else:
#print('No production parameter called {} - setting it to 1'.format(p_id))
v[p_id] = 1.0 | [
"[email protected]"
] | |
9298cc195bdd4bd86918dd770c297a04612887b4 | 1ebf853638e6e0344e3498f5840f055d29d5a311 | /code/broadcast_send.py | 1a39a6443466904ea4c72734c4f67e1909024233 | [] | no_license | LzWaiting/02.PythonNet | 07d8b159d5fe3f89100dc1daf262b46bfa7d6fcb | adc317b5c19c339395fbb50c94f843880f03af7a | refs/heads/master | 2020-06-09T01:56:20.984921 | 2019-06-23T12:52:31 | 2019-06-23T12:52:31 | 193,347,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from socket import *
from time import sleep
# 设置目标地址
dest = ('192.168.10.255',9999)
s = socket(AF_INET,SOCK_DGRAM)
# 设置能够发送广播
s.setsockopt(SOL_SOCKET,SO_BROADCAST,1)
while True:
sleep(2)
try:
s.sendto('来呀,带你去看蓝色土耳其'.encode(),dest)
except (KeyboardInterrupt,SyntaxError):
raise
except Exception as e:
print(e)
s.close() | [
"[email protected]"
] | |
dce6fb53f680c6e959e0f66f2fa901accf10520f | 15c140fa5f116bcfacb9340aac77692bb4fa3d00 | /rafa_care/ext/helpers/tz.py | be3f65e53299ec530b7105d95e1ecba1eda74d25 | [] | no_license | thcborges/rafa-care | 76ceac3b93a4995729b36539c603ada77350b2fe | 9cd2d39938aa998c9ffaf0ecd82027db26a9b514 | refs/heads/main | 2023-07-14T02:08:25.554888 | 2021-08-01T14:17:14 | 2021-08-01T14:17:14 | 383,315,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from datetime import datetime, timedelta, timezone
def convert_tz(timestamp: datetime, diff: int = -3) -> datetime:
return timestamp.astimezone(timezone(timedelta(hours=diff)))
| [
"[email protected]"
] | |
844c6961ba63d9ffe3c2f7755dd9152b2c57135d | f6dfd2373ba1d23ea71a7fd8447e16d797d34138 | /hot_100/206_recursive.py | 4a1a7fd59583000a6564c313e17743c343ead589 | [] | no_license | hwngenius/leetcode | 284e65dc1727446a95a1c18edd6ef994692430ba | 52f7974f986bfb3802defd214dea5b0f9b280193 | refs/heads/master | 2023-02-12T02:23:38.003197 | 2021-01-07T13:55:42 | 2021-01-07T13:55:42 | 266,340,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
ans=self.reverseList(head.next)
head.next.next=head
return ans | [
"[email protected]"
] | |
86d9aa7ff24333c6fabe99130510cfa0aca8fd98 | 01aa3e4a81500081265bdaec2d127a76ffafbfab | /meraki/api/splash_login_attempts.py | 5c7ddb302332314e8d45c7c623be67bf47e2f3e6 | [
"MIT"
] | permissive | itbj/meraki-dashboard | 0f4ded074a615bf2d3c749779f5efc3165d88446 | aa730d4f95b4a0fec180c610eeea56bd71ff48b4 | refs/heads/master | 2020-12-07T17:29:20.168805 | 2020-01-09T08:59:46 | 2020-01-09T08:59:46 | 232,761,086 | 2 | 0 | MIT | 2020-01-09T08:35:24 | 2020-01-09T08:35:23 | null | UTF-8 | Python | false | false | 1,473 | py | class SplashLoginAttempts(object):
def __init__(self, session):
super(SplashLoginAttempts, self).__init__()
self._session = session
def getNetworkSplashLoginAttempts(self, networkId: str, **kwargs):
"""
**List the splash login attempts for a network**
https://api.meraki.com/api_docs#list-the-splash-login-attempts-for-a-network
- networkId (string)
- ssidNumber (integer): Only return the login attempts for the specified SSID
- loginIdentifier (string): The username, email, or phone number used during login
- timespan (integer): The timespan, in seconds, for the login attempts. The period will be from [timespan] seconds ago until now. The maximum timespan is 3 months
"""
kwargs.update(locals())
if 'ssidNumber' in kwargs:
options = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
assert kwargs['ssidNumber'] in options, f'''"ssidNumber" cannot be "{kwargs['ssidNumber']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Splash login attempts'],
'operation': 'getNetworkSplashLoginAttempts',
}
resource = f'/networks/{networkId}/splashLoginAttempts'
query_params = ['ssidNumber', 'loginIdentifier', 'timespan']
params = {k: v for (k, v) in kwargs.items() if k in query_params}
return self._session.get(metadata, resource, params)
| [
"[email protected]"
] | |
e2600ece0f6e74188c8315daaa20a443fa2f8d35 | 044833841a6ccbb53f42eb83aa8b7d7fc2f3fc1d | /devito/core/gpu_openmp.py | f41ed6ffa4a92c80ebeb86bdce46bc50b2f80788 | [
"MIT"
] | permissive | rawsh/devito | dea02233f34a8e7f62f70ee75e735e469d870044 | 6aa03bb38b74224f742beb1dbe8df814b45941a0 | refs/heads/master | 2022-09-04T06:27:09.224315 | 2020-05-14T17:55:53 | 2020-05-14T17:55:53 | 264,086,021 | 1 | 0 | MIT | 2020-05-15T03:25:48 | 2020-05-15T03:25:48 | null | UTF-8 | Python | false | false | 12,569 | py | from functools import partial, singledispatch
import cgen as c
from devito.core.operator import OperatorCore
from devito.data import FULL
from devito.exceptions import InvalidOperator
from devito.ir.iet import Callable, ElementalFunction, MapExprStmts
from devito.logger import warning
from devito.mpi.routines import CopyBuffer, SendRecv, HaloUpdate
from devito.passes.clusters import (Lift, cire, cse, eliminate_arrays, extract_increments,
factorize, fuse, optimize_pows)
from devito.passes.iet import (DataManager, Storage, Ompizer, ParallelIteration,
ParallelTree, optimize_halospots, mpiize, hoist_prodders,
iet_pass)
from devito.tools import as_tuple, filter_sorted, generator, timed_pass
__all__ = ['DeviceOpenMPNoopOperator', 'DeviceOpenMPOperator',
'DeviceOpenMPCustomOperator']
class DeviceOpenMPIteration(ParallelIteration):
@classmethod
def _make_construct(cls, **kwargs):
return 'omp target teams distribute parallel for'
@classmethod
def _make_clauses(cls, **kwargs):
kwargs['chunk_size'] = False
return super(DeviceOpenMPIteration, cls)._make_clauses(**kwargs)
class DeviceOmpizer(Ompizer):
COLLAPSE_NCORES = 1
"""
Always collapse when possible.
"""
COLLAPSE_WORK = 1
"""
Always collapse when possible.
"""
lang = dict(Ompizer.lang)
lang.update({
'map-enter-to': lambda i, j:
c.Pragma('omp target enter data map(to: %s%s)' % (i, j)),
'map-enter-alloc': lambda i, j:
c.Pragma('omp target enter data map(alloc: %s%s)' % (i, j)),
'map-update': lambda i, j:
c.Pragma('omp target update from(%s%s)' % (i, j)),
'map-release': lambda i, j:
c.Pragma('omp target exit data map(release: %s%s)' % (i, j)),
'map-exit-delete': lambda i, j:
c.Pragma('omp target exit data map(delete: %s%s)' % (i, j)),
})
_Iteration = DeviceOpenMPIteration
@classmethod
def _map_data(cls, f):
if f.is_Array:
return f.symbolic_shape
else:
return tuple(f._C_get_field(FULL, d).size for d in f.dimensions)
@classmethod
def _map_to(cls, f):
return cls.lang['map-enter-to'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_alloc(cls, f):
return cls.lang['map-enter-alloc'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_present(cls, f):
raise NotImplementedError
@classmethod
def _map_update(cls, f):
return cls.lang['map-update'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_release(cls, f):
return cls.lang['map-release'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_delete(cls, f):
return cls.lang['map-exit-delete'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_pointers(cls, f):
raise NotImplementedError
def _make_threaded_prodders(self, partree):
# no-op for now
return partree
def _make_partree(self, candidates, nthreads=None):
"""
Parallelize the `candidates` Iterations attaching suitable OpenMP pragmas
for GPU offloading.
"""
assert candidates
root = candidates[0]
# Get the collapsable Iterations
collapsable = self._find_collapsable(root, candidates)
ncollapse = 1 + len(collapsable)
# Prepare to build a ParallelTree
# Create a ParallelTree
body = self._Iteration(ncollapse=ncollapse, **root.args)
partree = ParallelTree([], body, nthreads=nthreads)
collapsed = [partree] + collapsable
return root, partree, collapsed
def _make_parregion(self, partree):
# no-op for now
return partree
def _make_guard(self, partree, *args):
# no-op for now
return partree
def _make_nested_partree(self, partree):
# no-op for now
return partree
class DeviceOpenMPDataManager(DataManager):
_Parallelizer = DeviceOmpizer
def _alloc_array_on_high_bw_mem(self, obj, storage):
if obj in storage._high_bw_mem:
return
super(DeviceOpenMPDataManager, self)._alloc_array_on_high_bw_mem(obj, storage)
decl, alloc, free = storage._high_bw_mem[obj]
alloc = c.Collection([alloc, self._Parallelizer._map_alloc(obj)])
free = c.Collection([self._Parallelizer._map_delete(obj), free])
storage._high_bw_mem[obj] = (decl, alloc, free)
def _map_function_on_high_bw_mem(self, obj, storage, read_only=False):
"""Place a Function in the high bandwidth memory."""
if obj in storage._high_bw_mem:
return
alloc = self._Parallelizer._map_to(obj)
if read_only is False:
free = c.Collection([self._Parallelizer._map_update(obj),
self._Parallelizer._map_release(obj)])
else:
free = self._Parallelizer._map_delete(obj)
storage._high_bw_mem[obj] = (None, alloc, free)
@iet_pass
def place_ondevice(self, iet, **kwargs):
@singledispatch
def _place_ondevice(iet):
return iet
@_place_ondevice.register(Callable)
def _(iet):
# Collect written and read-only symbols
writes = set()
reads = set()
for i, v in MapExprStmts().visit(iet).items():
if not i.is_Expression:
# No-op
continue
if not any(isinstance(j, self._Parallelizer._Iteration) for j in v):
# Not an offloaded Iteration tree
continue
if i.write.is_DiscreteFunction:
writes.add(i.write)
reads = (reads | {r for r in i.reads if r.is_DiscreteFunction}) - writes
# Populate `storage`
storage = Storage()
for i in filter_sorted(writes):
self._map_function_on_high_bw_mem(i, storage)
for i in filter_sorted(reads):
self._map_function_on_high_bw_mem(i, storage, read_only=True)
iet = self._dump_storage(iet, storage)
return iet
@_place_ondevice.register(ElementalFunction)
def _(iet):
return iet
@_place_ondevice.register(CopyBuffer)
@_place_ondevice.register(SendRecv)
@_place_ondevice.register(HaloUpdate)
def _(iet):
return iet
iet = _place_ondevice(iet)
return iet, {}
class DeviceOpenMPNoopOperator(OperatorCore):
CIRE_REPEATS_INV = 2
"""
Number of CIRE passes to detect and optimize away Dimension-invariant expressions.
"""
CIRE_REPEATS_SOPS = 2
"""
Number of CIRE passes to detect and optimize away redundant sum-of-products.
"""
@classmethod
def _normalize_kwargs(cls, **kwargs):
options = kwargs['options']
# Strictly unneccesary, but make it clear that this Operator *will*
# generate OpenMP code, bypassing any `openmp=False` provided in
# input to Operator
options.pop('openmp')
options['cire-repeats'] = {
'invariants': options.pop('cire-repeats-inv') or cls.CIRE_REPEATS_INV,
'sops': options.pop('cire-repeats-sops') or cls.CIRE_REPEATS_SOPS
}
return kwargs
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
# To create temporaries
counter = generator()
template = lambda: "r%d" % counter()
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, template, 'invariants', options, platform)
clusters = Lift().process(clusters)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, template)
clusters = cire(clusters, template, 'sops', options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, template)
# Lifting may create fusion opportunities, which in turn may enable
# further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters, template)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
# Distributed-memory parallelism
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# GPU parallelism via OpenMP offloading
DeviceOmpizer().make_parallel(graph)
# Symbol definitions
data_manager = DeviceOpenMPDataManager()
data_manager.place_ondevice(graph)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
class DeviceOpenMPOperator(DeviceOpenMPNoopOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
# Distributed-memory parallelism
optimize_halospots(graph)
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# GPU parallelism via OpenMP offloading
DeviceOmpizer().make_parallel(graph)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
data_manager = DeviceOpenMPDataManager()
data_manager.place_ondevice(graph)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
class DeviceOpenMPCustomOperator(DeviceOpenMPOperator):
_known_passes = ('optcomms', 'openmp', 'mpi', 'prodders')
_known_passes_disabled = ('blocking', 'denormals', 'wrapping', 'simd')
assert not (set(_known_passes) & set(_known_passes_disabled))
@classmethod
def _make_passes_mapper(cls, **kwargs):
options = kwargs['options']
ompizer = DeviceOmpizer()
return {
'optcomms': partial(optimize_halospots),
'openmp': partial(ompizer.make_parallel),
'mpi': partial(mpiize, mode=options['mpi']),
'prodders': partial(hoist_prodders)
}
@classmethod
def _build(cls, expressions, **kwargs):
# Sanity check
passes = as_tuple(kwargs['mode'])
for i in passes:
if i not in cls._known_passes:
if i in cls._known_passes_disabled:
warning("Got explicit pass `%s`, but it's unsupported on an "
"Operator of type `%s`" % (i, str(cls)))
else:
raise InvalidOperator("Unknown pass `%s`" % i)
return super(DeviceOpenMPCustomOperator, cls)._build(expressions, **kwargs)
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
passes = as_tuple(kwargs['mode'])
# Fetch passes to be called
passes_mapper = cls._make_passes_mapper(**kwargs)
# Call passes
for i in passes:
try:
passes_mapper[i](graph)
except KeyError:
pass
# Force-call `mpi` if requested via global option
if 'mpi' not in passes and options['mpi']:
passes_mapper['mpi'](graph)
# GPU parallelism via OpenMP offloading
if 'openmp' not in passes:
passes_mapper['openmp'](graph)
# Symbol definitions
data_manager = DeviceOpenMPDataManager()
data_manager.place_ondevice(graph)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
| [
"[email protected]"
] | |
93881258ce594158332f7e31cfd3a016023ab6d3 | 9e5304efaffe2a3661211b63bbd9617624339448 | /test/espnet2/train/test_reporter.py | a1f9dc35ab2add53dc3f3fc29f64e8ba3ad2e15c | [
"Apache-2.0"
] | permissive | sw005320/espnet-1 | bf2749cf6a95e9a42b59d402f4fb7113acc656e9 | 6ecde88045e1b706b2390f98eb1950ce4075a07d | refs/heads/master | 2023-08-05T18:54:33.626616 | 2020-10-02T18:16:33 | 2020-10-02T18:16:33 | 114,056,236 | 4 | 1 | Apache-2.0 | 2020-11-22T11:20:02 | 2017-12-13T01:03:47 | Python | UTF-8 | Python | false | false | 12,347 | py | from distutils.version import LooseVersion
import logging
from pathlib import Path
import uuid
import numpy as np
import pytest
import torch
from espnet2.train.reporter import aggregate
from espnet2.train.reporter import Average
from espnet2.train.reporter import ReportedValue
from espnet2.train.reporter import Reporter
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
from torch.utils.tensorboard import SummaryWriter
else:
from tensorboardX import SummaryWriter
@pytest.mark.parametrize("weight1,weight2", [(None, None), (19, np.array(9))])
def test_register(weight1, weight2):
reporter = Reporter()
reporter.set_epoch(1)
with reporter.observe(uuid.uuid4().hex) as sub:
stats1 = {
"float": 0.6,
"int": 6,
"np": np.random.random(),
"torch": torch.rand(1),
}
sub.register(stats1, weight1)
sub.next()
stats2 = {
"float": 0.3,
"int": 100,
"np": np.random.random(),
"torch": torch.rand(1),
}
sub.register(stats2, weight2)
sub.next()
assert sub.get_epoch() == 1
with pytest.raises(RuntimeError):
sub.register({})
desired = {}
for k in stats1:
if stats1[k] is None:
continue
if weight1 is None:
desired[k] = (stats1[k] + stats2[k]) / 2
else:
weight1 = float(weight1)
weight2 = float(weight2)
desired[k] = float(weight1 * stats1[k] + weight2 * stats2[k])
desired[k] /= weight1 + weight2
for k1, k2 in reporter.get_all_keys():
if k2 in ("time", "total_count"):
continue
np.testing.assert_allclose(reporter.get_value(k1, k2), desired[k2])
@pytest.mark.parametrize("mode", ["min", "max", "foo"])
def test_sort_epochs_and_values(mode):
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
if mode not in ("min", "max"):
with pytest.raises(ValueError):
reporter.sort_epochs_and_values(key1, "aa", mode)
return
else:
sort_values = reporter.sort_epochs_and_values(key1, "aa", mode)
if mode == "min":
sign = 1
else:
sign = -1
desired = sorted(
[(e + 1, stats_list[e]["aa"]) for e in range(len(stats_list))],
key=lambda x: sign * x[1],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e]
def test_sort_epochs_and_values_no_key():
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
with pytest.raises(KeyError):
reporter.sort_epochs_and_values("foo", "bar", "min")
def test_get_value_not_found():
reporter = Reporter()
with pytest.raises(KeyError):
reporter.get_value("a", "b")
def test_sort_values():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
sort_values = reporter.sort_values(key1, "aa", mode)
desired = sorted(
[stats_list[e]["aa"] for e in range(len(stats_list))],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e]
def test_sort_epochs():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
sort_values = reporter.sort_epochs(key1, "aa", mode)
desired = sorted(
[(e + 1, stats_list[e]["aa"]) for e in range(len(stats_list))],
key=lambda x: x[1],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e][0]
def test_best_epoch():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
best_epoch = reporter.get_best_epoch(key1, "aa", mode)
assert best_epoch == 3
def test_check_early_stopping():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.2}, {"aa": 0.4}, {"aa": 0.3}]
patience = 1
results = []
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
truefalse = reporter.check_early_stopping(patience, key1, "aa", mode)
results.append(truefalse)
assert results == [False, False, False, True]
def test_logging(tmp_path):
reporter = Reporter()
key1 = uuid.uuid4().hex
key2 = uuid.uuid4().hex
stats_list = [
{"aa": 0.3, "bb": 3.0},
{"aa": 0.5, "bb": 3.0},
{"aa": 0.2, "bb": 3.0},
]
writer = SummaryWriter(tmp_path)
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
with reporter.observe(key2) as sub:
sub.register(stats_list[e])
sub.next()
logging.info(sub.log_message())
logging.info(sub.log_message(-1))
logging.info(sub.log_message(0, 1))
sub.tensorboard_add_scalar(writer, -1)
with pytest.raises(RuntimeError):
logging.info(sub.log_message())
logging.info(reporter.log_message())
with reporter.observe(key1) as sub:
sub.register({"aa": 0.1, "bb": 0.4})
sub.next()
sub.register({"aa": 0.1})
sub.next()
def test_has_key():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.has(key1, "aa")
def test_get_Keys():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.get_keys() == (key1,)
def test_get_Keys2():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.get_keys2(key1) == ("aa",)
def test_matplotlib_plot(tmp_path: Path):
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.set_epoch(1)
with reporter.observe(key1) as sub:
# Skip epoch=2
sub.register({})
sub.next()
reporter.set_epoch(3)
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.matplotlib_plot(tmp_path)
assert (tmp_path / "aa.png").exists()
def test_tensorboard_add_scalar(tmp_path: Path):
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.set_epoch(1)
with reporter.observe(key1) as sub:
# Skip epoch=2
sub.register({})
sub.next()
reporter.set_epoch(3)
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
writer = SummaryWriter(tmp_path)
reporter.tensorboard_add_scalar(writer)
def test_state_dict():
reporter = Reporter()
reporter.set_epoch(1)
with reporter.observe("train") as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
with reporter.observe("eval") as sub:
stats1 = {"bb": 0.6}
sub.register(stats1)
sub.next()
state = reporter.state_dict()
reporter2 = Reporter()
reporter2.load_state_dict(state)
state2 = reporter2.state_dict()
assert state == state2
def test_get_epoch():
reporter = Reporter(2)
assert reporter.get_epoch() == 2
def test_total_count():
reporter = Reporter(2)
assert reporter.get_epoch() == 2
with reporter.observe("train", 1) as sub:
sub.register({})
sub.next()
with reporter.observe("train", 2) as sub:
sub.register({})
sub.next()
sub.register({})
sub.next()
assert sub.get_total_count() == 3
def test_change_epoch():
reporter = Reporter()
with pytest.raises(RuntimeError):
with reporter.observe("train", 1):
reporter.set_epoch(2)
def test_minus_epoch():
with pytest.raises(ValueError):
Reporter(-1)
def test_minus_epoch2():
reporter = Reporter()
with pytest.raises(ValueError):
reporter.set_epoch(-1)
reporter.start_epoch("aa", 1)
with pytest.raises(ValueError):
reporter.start_epoch("aa", -1)
def test_register_array():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
with pytest.raises(ValueError):
sub.register({"a": np.array([0, 1])})
sub.next()
with pytest.raises(ValueError):
sub.register({"b": 1}, weight=np.array([1, 2]))
sub.next()
def test_zero_weight():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": 1}, weight=0)
sub.next()
def test_register_nan():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": np.nan}, weight=1.0)
sub.next()
def test_no_register():
reporter = Reporter()
with reporter.observe("train", 1):
pass
def test_mismatch_key2():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": 2})
sub.next()
with reporter.observe("train", 2) as sub:
sub.register({"b": 3})
sub.next()
def test_reserved():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
with pytest.raises(RuntimeError):
sub.register({"time": 2})
sub.next()
with pytest.raises(RuntimeError):
sub.register({"total_count": 3})
sub.next()
def test_different_type():
reporter = Reporter()
with pytest.raises(ValueError):
with reporter.observe("train", 1) as sub:
sub.register({"a": 2}, weight=1)
sub.next()
sub.register({"a": 3})
sub.next()
def test_start_middle_epoch():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
sub.register({"a": 3})
sub.next()
def test__plot_stats_input_str():
reporter = Reporter()
with pytest.raises(TypeError):
reporter._plot_stats("aaa", "a")
class DummyReportedValue(ReportedValue):
pass
def test_aggregate():
vs = [Average(0.1), Average(0.3)]
assert aggregate(vs) == 0.2
vs = []
assert aggregate(vs) is np.nan
with pytest.raises(NotImplementedError):
vs = [DummyReportedValue()]
aggregate(vs)
def test_measure_time():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
with sub.measure_time("foo"):
pass
sub.next()
def test_measure_iter_time():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
for _ in sub.measure_iter_time(range(3), "foo"):
sub.next()
| [
"[email protected]"
] | |
420b6a0e980b2356a5ca6e2268c92fde01251ec9 | 683876019cad0b0d562ac7f9da8c679cb310cfb2 | /2015/day07/part2.py | c267b7c7b3336aca216adb0658c6aee619228c27 | [] | no_license | CoachEd/advent-of-code | d028bc8c21235361ad31ea55922625adf743b5c8 | 10850d5d477c0946ef73756bfeb3a6db241cc4b2 | refs/heads/master | 2023-05-11T05:20:26.951224 | 2023-05-09T18:54:16 | 2023-05-09T18:54:16 | 160,375,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | import sys
import time
import numpy as np
start_secs = time.time()
def hasInstr(s):
if s.find('AND') != -1 or s.find('OR') != -1 or s.find('NOT') != -1 or s.find('LSHIFT') != -1 or s.find('RSHIFT') != -1:
return True
else:
return False
# read in input file
l=[]
my_file = open("inp.txt", "r")
lines = my_file.readlines()
for line in lines:
l.append(line.strip())
d = dict()
i = 0
while len(l) > 0:
if hasInstr(l[i]):
arr = l[i].split('->')
var = arr[-1].strip()
arr2 = arr[0].strip().split()
if l[i].find('NOT') != -1:
operand = arr2[1].strip()
if operand in d:
d[var] = ~d[operand]
del l[i]
else:
b1 = False
b2 = False
lft = arr2[0]
op = arr2[1]
rgt = arr2[2]
if lft.isdigit():
lft = int(lft)
b1 = True
if rgt.isdigit():
rgt = int(rgt)
b2 = True
if not b1:
b1 = lft in d
if b1:
lft = d[lft]
if not b2:
b2 = rgt in d
if b2:
rgt = d[rgt]
if b1 and b2:
# have operands, do cmd
if op == 'AND':
d[var] = lft & rgt
elif op == 'OR':
d[var] = lft | rgt
elif op == 'LSHIFT':
d[var] = lft << rgt
elif op == 'RSHIFT':
d[var] = lft >> rgt
del l[i]
else:
# no instr
arr = l[i].split('->')
var = arr[1].strip()
val = arr[0].strip()
if val.isdigit():
val = int(val)
d[var] = val
if var == 'b':
d[var] = 956 # override for Part 2
del l[i]
else:
if val in d:
d[var] = d[val]
del l[i]
i = i + 1
if i >= len(l):
i = 0
print('part 1: ' + str(np.uint16(d['a'])))
end_secs = time.time()
print(str(end_secs-start_secs)) | [
"[email protected]"
] | |
8fa326a6592555adfd01b1e25a06052b11d92f22 | 04f4558aa0dc904b8d7c0ab79b80ec11c34f8ccf | /test/test_boatroom.py | af2a4b21de7d53df004de2db2a6c8b49de755108 | [
"Apache-2.0"
] | permissive | scubawhere/scubawhere-api-python-client | 0fc23ffb97446b0bb0825c93528f954e7d642cf4 | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | refs/heads/master | 2020-12-24T11:10:34.880348 | 2016-11-08T12:20:45 | 2016-11-08T12:20:45 | 73,180,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.boatroom import Boatroom
class TestBoatroom(unittest.TestCase):
""" Boatroom unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBoatroom(self):
"""
Test Boatroom
"""
model = swagger_client.models.boatroom.Boatroom()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7d4c853c7c60f91bc251797865adca42be00b83e | b9972ae24a4f261a87997fea963f537abe741dbe | /Chapter01/randint.py | 1a1c576a8bd8c0bfcc8b096767c7fb0218d03782 | [
"MIT"
] | permissive | PacktPublishing/Secret-Recipes-of-the-Python-Ninja | 90503ea9b7dfc59e45f596e8548a3371641162c1 | 3325cd5a4ed314be5c75552bfa4675f7fe17f8e2 | refs/heads/master | 2023-02-03T03:01:09.242951 | 2023-01-30T08:23:38 | 2023-01-30T08:23:38 | 133,922,003 | 18 | 7 | null | 2018-05-18T08:00:42 | 2018-05-18T07:52:51 | Python | UTF-8 | Python | false | false | 183 | py | >>> randint(0, 1000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'randint' is not defined
>>> import random
>>> random.randint(0, 1000)
607
| [
"[email protected]"
] | |
194f89b148d7a3b080f6222eddd19b767acc7d69 | 58a16d44136c7481d855910c39b07798c09c878c | /shotaro/microblog/bin/wheel | e39af8b96b470cd08d9db497e62669844d914041 | [] | no_license | shotaro0726/microblog | 6af1ffe170bab36d6e7a3055069b3bc1411fbc02 | 98b493799b9416b644b0a100fdbe4c64c6e94b89 | refs/heads/master | 2022-04-16T22:18:03.587413 | 2020-04-18T02:49:25 | 2020-04-18T02:49:25 | 256,655,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | #!/Users/macuser/shotaro/microblog/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f34fc930bab5ce3a714ffba25d5ebe66941772a3 | 9949275a60ee6267d59091cab5977b6a45515452 | /antspynet/utilities/get_antsxnet_data.py | a74be4812a9140f35641bf127c4c325278cc8ed8 | [] | no_license | papasanimohansrinivas/ANTsPyNet | d81be45c984ce260385d28ef9e1bb51463f055b4 | 8e16b2d17be769d1d8913de7c3a68135e5ebe5ed | refs/heads/master | 2023-06-25T02:04:43.456534 | 2021-07-22T13:51:29 | 2021-07-22T13:51:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,818 | py | import tensorflow as tf
def get_antsxnet_data(file_id=None,
target_file_name=None,
antsxnet_cache_directory=None):
"""
Download data such as prefabricated templates and spatial priors.
Arguments
---------
file_id string
One of the permitted file ids or pass "show" to list all
valid possibilities. Note that most require internet access
to download.
target_file_name string
Optional target filename.
antsxnet_cache_directory string
Optional target output. If not specified these data will be downloaded
to the subdirectory ~/.keras/ANTsXNet/.
Returns
-------
A filename string
Example
-------
>>> template_file = get_antsxnet_data('biobank')
"""
def switch_data(argument):
switcher = {
"biobank": "https://ndownloader.figshare.com/files/22429242",
"croppedMni152": "https://ndownloader.figshare.com/files/22933754",
"croppedMni152Priors": "https://ndownloader.figshare.com/files/27688437",
"deepFlashPriors": "",
"deepFlashTemplateT1": "",
"deepFlashTemplateT2": "",
"mprage_hippmapp3r": "https://ndownloader.figshare.com/files/24984689",
"protonLungTemplate": "https://ndownloader.figshare.com/files/22707338",
"ctLungTemplate": "https://ndownloader.figshare.com/files/22707335",
"luna16LungPriors": "https://ndownloader.figshare.com/files/28253796",
"priorDktLabels": "https://ndownloader.figshare.com/files/24139802",
"S_template3": "https://ndownloader.figshare.com/files/22597175",
"priorDeepFlashLeftLabels": "https://ndownloader.figshare.com/files/25422098",
"priorDeepFlashRightLabels": "https://ndownloader.figshare.com/files/25422101",
"adni": "https://ndownloader.figshare.com/files/25516361",
"ixi": "https://ndownloader.figshare.com/files/25516358",
"kirby": "https://ndownloader.figshare.com/files/25620107",
"mni152": "https://ndownloader.figshare.com/files/25516349",
"nki": "https://ndownloader.figshare.com/files/25516355",
"nki10": "https://ndownloader.figshare.com/files/25516346",
"oasis": "https://ndownloader.figshare.com/files/25516352"
}
return(switcher.get(argument, "Invalid argument."))
if file_id == None:
raise ValueError("Missing file id.")
valid_list = ("biobank",
"croppedMni152",
"croppedMni152Priors",
"deepFlashPriors",
"deepFlashTemplateT1",
"deepFlashTemplateT2",
"mprage_hippmapp3r",
"protonLungTemplate",
"ctLungTemplate",
"luna16LungPriors",
"S_template3",
"priorDktLabels",
"priorDeepFlashLeftLabels",
"priorDeepFlashRightLabels",
"adni",
"ixi",
"kirby",
"mni152",
"nki",
"nki10",
"oasis",
"show")
if not file_id in valid_list:
raise ValueError("No data with the id you passed - try \"show\" to get list of valid ids.")
if file_id == "show":
return(valid_list)
url = switch_data(file_id)
if target_file_name == None:
target_file_name = file_id + ".nii.gz"
if antsxnet_cache_directory == None:
antsxnet_cache_directory = "ANTsXNet"
target_file_name_path = tf.keras.utils.get_file(target_file_name, url,
cache_subdir=antsxnet_cache_directory)
return(target_file_name_path)
| [
"[email protected]"
] | |
3100deb0f29b301a5dd06ce1413538c7df022b75 | ec61b57a99d7683a668f4910c9fad4b1c9335525 | /todo/41-firstMissingPositive.py | 39b5bd971a07978100e79c13e9e1cb2c2b77d0d0 | [] | no_license | savadev/leetcode-2 | 906f467e793b9636965ab340c7753d9fc15bc70a | 20f37130236cc68224082ef056dacd6accb374e3 | refs/heads/master | 2020-12-28T12:54:08.355317 | 2018-08-06T00:44:24 | 2018-08-06T00:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == []:
return 1
MIN = min(nums)
MAX = max(nums)
SUM = sum(nums)
acc = 0
for x in range(MIN, MAX + 1):
acc+= x
print ('acc', acc)
print ('sum', SUM)
print('res', acc - SUM)
if acc - SUM == 0:
if MIN != 0:
return MIN - 1
return MAX + 1
return acc - SUM
r = Solution()
res = r.firstMissingPositive([1])
print (res)
| [
"[email protected]"
] | |
8b4c76e48970902fe165375a9bff089f9bd8547a | 25e99a0af5751865bce1702ee85cc5c080b0715c | /python/src/pyprogbook/第二版(博碩)課本範例程式/ch8/RC_8_1.py | 917a5b5f0a7c5398cc8649d18a378e6983072f0a | [] | no_license | jasonblog/note | 215837f6a08d07abe3e3d2be2e1f183e14aa4a30 | 4471f95736c60969a718d854cab929f06726280a | refs/heads/master | 2023-05-31T13:02:27.451743 | 2022-04-04T11:28:06 | 2022-04-04T11:28:06 | 35,311,001 | 130 | 67 | null | 2023-02-10T21:26:36 | 2015-05-09T02:04:40 | C | UTF-8 | Python | false | false | 624 | py | #RC_8_1: 定義貨幣時間價值的類別
class TimeValue:
#終值
def fvfix(self, pv, i, n):
#fvfix: 計算終值公式
fv=pv*(1+i)**n
return(fv)
#現值
def pvfix(self, fv, i, n):
#pvfix: 計算現值公式
pv=fv/((1+i)**n)
return(pv)
#設定初始值
pv=100
fv=115.92740743
i=0.03
n=5
#呼叫TimeValue類別,建構物實體
tv1=TimeValue()
#呼叫物件實體的方法
print('%d年後的終值 = %10.4f' %(n, tv1.fvfix(pv, i, n)))
print('%d年後的現值 = %10.4f' %(n, tv1.pvfix(fv, i, n)))
| [
"jason_yao"
] | jason_yao |
de72efb471a01079e3b835086bdf362a43ce075b | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode/813-dp_interval.py | eebabd32aa4ba9891a08ae004244921141caf878 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | # max(score of sum of avg of each group)
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
n = len(A)
prefix_sum = [0] * (n+1)
# dp = [[0] * (K+1) for _ in range(n)]
memo = {}
for i in range(n):
prefix_sum[i+1] = A[i] + prefix_sum[i]
def avg(i,j):
ag = (prefix_sum[j] - prefix_sum[i]) / (j - i)
return ag
def dfs(idx, k):
if (idx, k) in memo:
return memo[(idx, k)]
if k==1:
# base case
return (prefix_sum[-1] - prefix_sum[idx]) / (n - idx)
res = 0
for i in range(idx, n-k+1):
# avg of 0~i + divide what's left into k groups
res = max(res, avg(idx,i+1) + dfs(i+1, k-1))
memo[(idx, k)] = res
return res
return dfs(0, K)
"""
def largestSumOfAverages(self, A: List[int], K: int) -> float:
N = len(A)
prefix_sum = [0] * (N+1)
for i in range(1, len(A)+1):
prefix_sum[i] = prefix_sum[i-1] + A[i-1]
dp = [self._average(prefix_sum, i, N) for i in range(N)]
print(dp)
# only 1 group, 2 groups and etc
for k in range(1, min(N, K)):
for i in range(N):
# if we have already decided a group, find the rest k
for j in range(i+1, N):
dp[i] = max(dp[i], self._average(prefix_sum, i, j) + dp[j])
return dp[0]
def _average(self, prefix_arr, i, j):
return (prefix_arr[j] - prefix_arr[i]) / float(j - i)
"""
# 0..........j j+1.....i (total needs k groups)
# dp[k-1][j] 1 group
# from 0 - j, divide it into k - 1 group | [
"[email protected]"
] | |
b3fcc97c7467ef879e7c063a68bc03ed11f438e1 | ffad717edc7ab2c25d5397d46e3fcd3975ec845f | /Python/pyesri/EXAMPLES/try_finally.py | 403502b28258223cd220ecccdbaf6d4ea091d6d2 | [] | no_license | shaunakv1/esri-developer-conference-2015-training | 2f74caea97aa6333aa38fb29183e12a802bd8f90 | 68b0a19aac0f9755202ef4354ad629ebd8fde6ba | refs/heads/master | 2021-01-01T20:35:48.543254 | 2015-03-09T22:13:14 | 2015-03-09T22:13:14 | 31,855,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/python
try:
x = 5
y = "cheese"
z = x + y
print "Bottom of try"
except Exception as err:
print err
exit()
finally:
print "Cleaning up..."
| [
"[email protected]"
] | |
b0cc56f055ae7cc4900fceed8dec1cb1322b507b | e63a36870512edb7fd947b809631cf153b028997 | /surveil/api/hooks.py | 18a8ba224fc1247642016889a8ab4fdf84124df5 | [
"Apache-2.0"
] | permissive | titilambert/surveil | 632c7e65d10e03c675d78f278822015346f5c47a | 8feeb64e40ca2bd95ebd60506074192ecdf627b6 | refs/heads/master | 2020-05-25T13:36:59.708227 | 2015-06-29T14:07:07 | 2015-06-29T14:07:07 | 38,249,530 | 1 | 0 | null | 2015-06-29T13:38:04 | 2015-06-29T13:38:03 | null | UTF-8 | Python | false | false | 1,253 | py | # Copyright 2014 - Savoir-Faire Linux inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import influxdb
from pecan import hooks
import pymongo
class DBHook(hooks.PecanHook):
def __init__(self, mongo_url, ws_arbiter_url, influxdb_url):
self.mongo_url = mongo_url
self.ws_arbiter_url = ws_arbiter_url
self.influxdb_url = influxdb_url
def before(self, state):
self.mongoclient = pymongo.MongoClient(self.mongo_url)
state.request.mongo_connection = self.mongoclient
state.request.ws_arbiter_url = self.ws_arbiter_url
state.request.influxdb_client = influxdb.InfluxDBClient.from_DSN(
self.influxdb_url
)
def after(self, state):
self.mongoclient.close()
| [
"[email protected]"
] | |
12ec553dc3a74926d2a8d0a31bcb4162d631e912 | c68b99bf1671d1fb5a1a5a0d6df7bb164dd1d20d | /Medium/**1111-MaximumNestingDepthOfTwoValidParenthesesStrings.py | 58c02a74e38a8ba9adc21370c7f5e9fadd219265 | [] | no_license | r50206v/Leetcode-Practice | 8db9333e2e3d2a335f439d7e9e57e8c36b69ae6d | f9302e93c441f06cc14949605da20978c4289202 | refs/heads/master | 2022-05-17T18:09:48.857263 | 2022-04-27T01:02:12 | 2022-04-27T01:02:12 | 192,258,017 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | # Solution
'''
Runtime: 44 ms, faster than 31.28% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
Memory Usage: 12.2 MB, less than 100.00% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
from:
https://leetcode.com/problems/maximum-nesting-depth-of-two-valid-parentheses-strings/discuss/329010/Python-O(n)
'''
class Solution(object):
def maxDepthAfterSplit(self, seq):
stack = []
n = len(seq)
res = [0] * n
for i in range(n):
s = seq[i]
if s == "(":
if stack:
res[i] = 1 - res[stack[-1]]
stack.append(i)
elif s == ")":
res[i] = res[stack[-1]]
stack.pop()
return res
# Solution
'''
Runtime: 36 ms, faster than 76.78% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
Memory Usage: 12 MB, less than 100.00% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
from contest winners~
https://leetcode.com/contest/weekly-contest-144/ranking/
'''
class Solution:
def maxDepthAfterSplit(self, seq: str) -> List[int]:
a = 0
b = 0
n = len(seq)
ans = [0] * n
for i, c in enumerate(seq):
if c == '(':
if a < b:
a += 1
ans[i] = 0
else:
b += 1
ans[i] = 1
else:
if a < b:
b -= 1
ans[i] = 1
else:
a -= 1
ans[i] = 0
return ans | [
"[email protected]"
] | |
5e03456ca1fac16e1a05a8cc6d80041b70bfde5c | be55991401aef504c42625c5201c8a9f14ca7c3b | /python全栈3期/IO模型/selectorsDemo01.py | 532bbf1784d32868773b2e07e5aa460d2c260c73 | [
"Apache-2.0"
] | permissive | BillionsRichard/pycharmWorkspace | adc1f8bb15b58ded489fc8dec0df397601823d2c | 709e2681fc6d85ff52fb25717215a365f51073aa | refs/heads/master | 2021-09-14T21:12:59.839963 | 2021-08-08T09:05:37 | 2021-08-08T09:05:37 | 143,610,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: [email protected]
@site:
@software: PyCharm
@file: selectorsDemo01.py
@time: 2018/6/2 18:53
"""
from pprint import pprint as P
import selectors
import socket
SELECTOR = selectors.DefaultSelector()
def accept_fun(socket, mask):
client_skt, client_addr = socket.accept()
client_port = client_addr[1]
P('client coming:%s' % client_port)
SELECTOR.register(client_skt, selectors.EVENT_READ, read_fun)
def read_fun(socket, mask):
data = socket.recv(1000).decode('utf8')
if data:
print('received:%s' % data)
socket.send(data.upper().encode('utf8'))
else:
P('no data received....')
server_sock = socket.socket()
server_sock.bind(("127.0.0.1", 8080))
server_sock.listen(100)
server_sock.setblocking(False)
SELECTOR.register(server_sock, selectors.EVENT_READ, accept_fun)
while True:
events = SELECTOR.select() #循环监听事件。
for key, mask in events:
callback = key.data
callback(key.fileobj, mask) #调用事先注册的回掉函数。
| [
"[email protected]"
] | |
c8c79098eb281dfb93aa7a5c3ba0399c3f545203 | 0ff0bd21faecdeebc3a29fc5860d25fb8f079aae | /rep_k.py | c43b3903b48080a05e6bffc887c6778d0b1d9245 | [] | no_license | Ponkiruthika112/codesetkataset2 | 652297278e84de07d5d3fc5dfa2eb3f995258cab | bd8a96d2fb357ff571f2650fdfca911fba8cc999 | refs/heads/master | 2020-04-15T09:49:55.421089 | 2019-02-18T16:18:55 | 2019-02-18T16:18:55 | 164,567,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | n=list(map(int,input().split()))
k=l[-1]
a=list(map(int,input().split()))
l=list(set(a))
s=""
for i in range(0,len(l)):
if a.count(l[i])==k:
s=s+str(l[i])+" "
print(s.strip())
#repeat k times
| [
"[email protected]"
] | |
8df66b74e02e272a2ac172cbc9aba54662670f75 | b50f43c7c8cba1c0f349870596f12d1a333e6f42 | /axonius_api_client/cli/grp_system/grp_settings/cmd_update_section.py | cce5589c8ccc9641e146f3457660e8495c6476f7 | [
"MIT"
] | permissive | zahediss/axonius_api_client | 190ca466e5de52a98af9b527a5d1c132fd8a5020 | 8321788df279ffb7794f179a4bd8943fe1ac44c4 | refs/heads/master | 2023-08-01T14:35:17.095559 | 2021-09-13T21:04:23 | 2021-09-13T21:04:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | # -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ....tools import json_dump
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, SPLIT_CONFIG_OPT, add_options
from .grp_common import EXPORT, SECTION, str_section
OPTIONS = [*AUTH, EXPORT, SECTION, SPLIT_CONFIG_OPT]
@click.command(name="update-section", context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(
ctx,
url,
key,
secret,
config,
section,
export_format,
**kwargs,
):
"""Update a section from arguments."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
new_config = dict(config)
apiname = ctx.parent.command.name.replace("-", "_")
apiobj = getattr(client, apiname)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
settings = apiobj.update_section(section=section, **new_config)
ctx.obj.echo_ok(f"Updated {section!r} with configuration {new_config}")
if export_format == "str":
str_section(meta=settings)
ctx.exit(0)
if export_format == "json-config":
config = settings["config"]
click.secho(json_dump(config))
ctx.exit(0)
if export_format == "json-full":
click.secho(json_dump(settings))
ctx.exit(0)
ctx.exit(1)
| [
"[email protected]"
] | |
28b92fed6d4c115fe7c615905f1a41b510587860 | b44ae8c215c7577616ce94bbddda57d46ff46577 | /experiments/convergence/movielens_100K/gaussian_exponential.py | a143962077d68e7428eb01f52aa54a4f6ed764b5 | [] | no_license | changchunli/BMF_Priors | 06a74d89198b11c0c3ba673a1d4869986cd7bc2d | 15b20537eefd36347ed84617882eeea1c453e162 | refs/heads/master | 2020-03-21T07:50:08.081910 | 2018-06-10T10:22:04 | 2018-06-10T10:22:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | '''
Measure convergence on the MovieLens 100K dataset, with the Gaussian +
Exponential model.
'''
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_exponential import BMF_Gaussian_Exponential
from BMF_Priors.data.movielens.load_data import load_movielens_100K
from BMF_Priors.experiments.convergence.convergence_experiment import measure_convergence_time
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_movielens_100K()
model_class = BMF_Gaussian_Exponential
settings = {
'R': R,
'M': M,
'K': 20,
'hyperparameters': { 'alpha':1., 'beta':1., 'lamb':0.1 },
'init': 'random',
'iterations': 200,
}
fout_performances = './results/performances_gaussian_exponential.txt'
fout_times = './results/times_gaussian_exponential.txt'
repeats = 10
performances, times = measure_convergence_time(
repeats, model_class, settings, fout_performances, fout_times)
''' Plot the times, and performance vs iterations. '''
plt.figure()
plt.title("Performance against average time")
plt.plot(times, performances)
plt.ylim(0,2000)
plt.figure()
plt.title("Performance against iteration")
plt.plot(performances)
plt.ylim(0,2000)
| [
"[email protected]"
] | |
041808402b9a434766690092176281e5cd6fc449 | 168bccd6fbd54025edeb526a497f4cd143390608 | /Datascience/Bee_Word_Project_old/Bee_Word_Project_bkup/asgi.py | 348f1b65e1c401a5530a6c199f263c2a0ada07ce | [] | no_license | mbegumgit/Mumtaz | 4e9cdd2b9a9b437cb5b3e0534a673aecc1366bd0 | 2edbc5e828ba6803580ff90beaf4c7cc7ace23de | refs/heads/master | 2022-03-05T18:41:28.474102 | 2022-02-18T07:33:06 | 2022-02-18T07:33:06 | 210,820,610 | 0 | 0 | null | 2022-02-18T09:50:24 | 2019-09-25T10:43:55 | HTML | UTF-8 | Python | false | false | 409 | py | """
ASGI config for Bee_Word_Project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Bee_Word_Project.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
598046ba24df38670274f2af60bbeef0dfaf8a4e | 268b22da698310c1fd0471f94d61e02782cbaf37 | /Week6/week6work/test/app.py | b05a86a61e85ac24690e059365b0a5e5c58330f3 | [] | no_license | jayquake/DI-Excersises | 0c1147863753fb29a6f688bd73bdd9acc047c180 | 02cb0ee9baed7fd7736273e8fc68317ba4356e39 | refs/heads/master | 2020-12-10T11:38:12.225341 | 2020-05-06T08:34:35 | 2020-05-06T08:34:35 | 233,582,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from flask import Flask
from flask import render_template
import users
from forms import LoginForm
from flask import request
from flask import session
from flask import flash
app = Flask(__name__)
users = users.create_database()
app.config['SECRET_KEY'] = 'you-will-never-guess'
@app.route('/')
def home():
html = render_template('index.html')
return html
@app.route('/login', methods=['POST'])
def login():
form = LoginForm()
login =render_template('login.html',form = form)
if request.form['password'] == 'password' and request.form['username'] == 'admin':
session['logged_in'] = True
else:
flash('wrong password!')
if __name__ == '__main__':
app.run() | [
"[email protected]"
] | |
8066a57e0fc178c600664597a65ee9595bc1c3a3 | 06525f75c7fe5ba75b0737d7b93e48cca9c24706 | /django_gotolong/jsched/tasks.py | f08ea74499eb6979c7339b33a028882120f323fd | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | webclinic017/gotolong | 029314121c9cb6ce66c95fab6a237aca9a3ecd6c | 3bb5ec7c7a5734e7121d308769a3ed8bb01e7f30 | refs/heads/master | 2022-04-21T08:06:11.669599 | 2022-04-02T20:13:11 | 2022-04-02T20:13:11 | 247,398,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | # Create your views here.
# Create your views here.
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from datetime import date, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
from django_gotolong.bhav.views import bhav_fetch
from django_gotolong.ftwhl.views import ftwhl_fetch
from django.utils import timezone
# from background_task import background
import requests
import sys
# notify_user(user.id, schedule=90) # 90 seconds from now
# notify_user(user.id, schedule=timedelta(minutes=20)) # 20 minutes from now
# notify_user(user.id, schedule=timezone.now()) # at a specific time
# @background(schedule=15)
def jsched_task_bg():
print('tasks.py : notify_nse')
# redirect('bhav-fetch')
# redirect('ftwhl-fetch')
def jsched_task_daily():
print('tasks.py : jsched_task_daily: to be fixed later')
return
if True:
tmodules = ['bhav', 'ftwhl']
for tmod in tmodules:
try:
url = 'http://127.0.0.1:8000/' + tmod + '/fetch/'
# connect timeout - 5 sec
# read timeout - 14 sec
response = requests.get(url, allow_redirects=False, timeout=(15, 60))
print(url, response.url, response.status_code)
except:
print("Unexpected error:", url, sys.exc_info())
def jsched_task_common():
print('tasks.py : common tasks')
# HttpResponseRedirect(reverse('bhav-fetch'))
# HttpResponseRedirect(reverse('ftwhl-fetch'))
# redirect('bhav-fetch')
# redirect('ftwhl-fetch')
def jsched_task_startup():
print('tasks.py : start')
# notify_nse(repeat=Task.DAILY, repeat_until=None)
# scheduler = BackgroundScheduler()
# scheduler.add_job(jsched_task_common, 'interval', days=1)
# scheduler.start()
| [
"[email protected]"
] | |
30297a3554de37ae2bbeae152c60ce87fa548148 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_ConstantTrend_Seasonal_WeekOfYear_ARX.py | 301a6ea127790f581fe24c9e7a8e24a8c81520e9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 180 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['ConstantTrend'] , ['Seasonal_WeekOfYear'] , ['ARX'] ); | [
"[email protected]"
] | |
0a3851483a7c2783d20f9210c909db95e5a3effb | ce2e307f8725b7bbe4c9177ed0f6b8bd74ae6cbe | /src/cw_20/models.py | 3aec0e1efda9e87be7b828ff9e1b79bcdfbc6885 | [] | no_license | alexshchegretsov/Teach_Me_Skills_Django_homeworks | f2a096f60bf8fe2e45693dd2352341529007327c | dcde073292e1cfb15708cdb3dd8d539fae37143a | refs/heads/master | 2020-05-25T00:07:36.348802 | 2019-06-06T12:21:35 | 2019-06-06T12:21:35 | 187,528,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from django.db import models
class Customer(models.Model):
first_name = models.CharField(max_length=200,null=False)
last_name = models.CharField(max_length=200, null=False)
profession = models.CharField(max_length=200, null=True)
age = models.IntegerField()
def __str__(self):
return f'{self.first_name} {self.last_name}'
| [
"[email protected]"
] | |
df4b35ec2d7b11d26059caeb6712174bde347a30 | b8cb10a3c99961f44ac758b3683523627d032680 | /runoschool/runo/migrations/0008_auto_20201020_0530.py | 735a3a0d26c067a411e76cd8bf585ce29bf7dc25 | [] | no_license | samuelatuma1/runoschool | 4a2183be4a7e856723fc5368c90edcb79d6ed29e | ed75fb4077cf5ff86b7d546d3346fc4625bee97e | refs/heads/master | 2023-01-29T20:22:25.160805 | 2020-12-14T08:33:13 | 2020-12-14T08:33:13 | 312,167,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # Generated by Django 3.1.1 on 2020-10-20 04:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('runo', '0007_auto_20201019_1750'),
]
operations = [
migrations.RenameField(
model_name='welcleaders',
old_name='extraImg1',
new_name='extraLeader1',
),
migrations.RenameField(
model_name='welcleaders',
old_name='extraImg2',
new_name='extraLeader2',
),
migrations.RenameField(
model_name='welcleaders',
old_name='welcImg',
new_name='welcLeader',
),
]
| [
"[email protected]"
] | |
6a0feedd97724c95d4013b2c5c578158cfa386b9 | c558fb26ab6cdc46c0a5ad292a34c20b52f96f42 | /crud/employee/forms.py | 8625d6cb27a789ac7671469062b188b1de343091 | [] | no_license | VaultHRMS/HRMS | 9dad7e5416f008075ce4e50226e76ca86ae7d9b0 | de714c63494a1a1260116a66e54fac8c032fd661 | refs/heads/master | 2020-08-15T07:37:02.311773 | 2019-11-05T10:09:44 | 2019-11-05T10:09:44 | 215,301,571 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
from employee.models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__" | [
"[email protected]"
] | |
5d62effa01623bbc899e69cedeac320b1a623569 | e8501308efed70829ba70332d5ed7b956a245a41 | /Lab/Lab11/Lab11-4.py | 9ce9d856102df3c1961115034eac8160b5366c1b | [] | no_license | Jinmin-Goh/DeepLearningPractice | 293f7c41144d64c1044d27dadf16f563d7caabb4 | b13cff775ad350deb0fde982610276c7b2fc7798 | refs/heads/master | 2021-02-28T07:21:51.231203 | 2020-03-20T17:15:42 | 2020-03-20T17:15:42 | 245,673,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,843 | py | # Lab 11-4
# Made by: Jinmin Goh
# Date: 20200318
# TF layers, fancy coding
import tensorflow as tf
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# hyper parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
# for testing
self.training = tf.placeholder(tf.bool)
# input place holders
self.X = tf.placeholder(tf.float32, [None, 784])
# img 28x28x1 (black/white), Input Layer
X_img = tf.reshape(self.X, [-1, 28, 28, 1])
self.Y = tf.placeholder(tf.float32, [None, 10])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1,
rate=0.3, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2,
rate=0.3, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
padding="same", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
padding="same", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3,
rate=0.3, training=self.training)
# Dense Layer with Relu
flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat,
units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4,
rate=0.5, training=self.training)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
self.logits = tf.layers.dense(inputs=dropout4, units=10)
# define cost/loss & optimizer
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(self.cost)
correct_prediction = tf.equal(
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def predict(self, x_test, training=False):
return self.sess.run(self.logits,
feed_dict={self.X: x_test, self.training: training})
def get_accuracy(self, x_test, y_test, training=False):
return self.sess.run(self.accuracy,
feed_dict={self.X: x_test,
self.Y: y_test, self.training: training})
def train(self, x_data, y_data, training=True):
return self.sess.run([self.cost, self.optimizer], feed_dict={
self.X: x_data, self.Y: y_data, self.training: training})
# initialize
sess = tf.Session()
m1 = Model(sess, "m1")
sess.run(tf.global_variables_initializer())
print('Learning Started!')
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = m1.train(batch_xs, batch_ys)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
print('Accuracy:', m1.get_accuracy(mnist.test.images, mnist.test.labels))
| [
"[email protected]"
] | |
7889902c32c8f1e1a6044d9fe5c296b950de76b0 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/mortality_code/full_life_tables/full_lt_run_all.py | 267733953868b1857c52050eb98c3f6414fc2308 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,413 | py | import os
import sys
import argparse
import pandas as pd
import datetime
import random
import subprocess
import numpy as np
import rpy2.robjects as robjects
from subprocess import call
from multiprocessing import Process
from shutil import move
from sqlalchemy import create_engine
from jobmon.client.swarm.workflow.workflow import Workflow
from jobmon.client.swarm.workflow.bash_task import BashTask
from jobmon.client.swarm.workflow.python_task import PythonTask
from mort_wrappers.call_mort_function import call_mort_function
class full_life_table:
def __init__(self, username, process_run_table, run_finalizer, aggregate_full_lts, gbd_year, upload, enable_assertions_flag, mark_best, aggregate_locations):
self.no_shock_death_number_estimate_version = process_run_table['no shock death number estimate']
self.no_shock_life_table_estimate_version = process_run_table['no shock life table estimate']
self.shock_aggregator_version = process_run_table['shock aggregator']
self.age_sex_estimate_version = process_run_table['age sex estimate']
self.population_estimate_version = process_run_table['population estimate']
self.population_single_year_estimate_version = process_run_table['population single year estimate']
self.shock_death_number_estimate = process_run_table['shock death number estimate']
self.shock_life_table_estimate = process_run_table['shock life table estimate']
self.full_life_table_estimate_version = process_run_table['full life table estimate']
self.mlt_life_table_estimate_version = process_run_table['mlt life table estimate']
self.hiv_run_name = process_run_table['hiv']
self.user = username
self.gbd_year = gbd_year
self.mark_best = mark_best
self.upload_all_lt_params_flag = "True"
self.enable_assertions_flag = enable_assertions_flag
self.run_finalizer = run_finalizer
self.aggregate_full_lts = aggregate_full_lts
self.upload = upload
self.aggregate_locations = aggregate_locations
self.code_dir = "FILEPATH"
self.finalizer_code_dir = "FILEPATH"
self.r_singularity_shell = "FILEPATH"
self.r_singularity_shell_3501 = "FILEPATH"
self.stdout = "FILEPATH"
self.stderr = "FILEPATH"
self.master_dir = "FILEPATH"
self.input_dir = "{}/inputs".format(self.master_dir)
self.reckoning_output_dir = "FILEPATH + self.no_shock_death_number_estimate_version"
self.full_lt_dir = "{}/full_lt".format(self.master_dir)
self.abridged_lt_dir = "{}/abridged_lt".format(self.master_dir)
self.upload_dir = "{}/upload".format(self.master_dir)
self.log_dir = "{}/logs".format(self.master_dir)
def create_directories(self, master_dir, subdirs=[]):
# Create initial set of directories
if not os.path.exists(master_dir):
os.makedirs(master_dir)
for dir in subdirs:
new_dir = master_dir + "/" + dir
if not os.path.exists(new_dir):
os.makedirs(new_dir)
def generate_save_inputs_task(self, upstream_tasks):
job_hash_name = "agg_save_inputs_{}".format(self.no_shock_death_number_estimate_version)
num_cores = 4
m_mem_free = "36G"
runfile = "{}/01_save_inputs.R".format(self.code_dir)
args = ["--no_shock_death_number_estimate_version", str(self.no_shock_death_number_estimate_version),
"--population_estimate_version", str(self.population_estimate_version),
"--population_single_year_estimate_version", str(self.population_single_year_estimate_version),
"--gbd_year", str(self.gbd_year)]
argsstr = " ".join(args)
command = "{r_shell} {codefile} {passargs}".format(
r_shell=self.r_singularity_shell,
codefile=runfile,
passargs=argsstr)
return BashTask(command=command,
upstream_tasks=upstream_tasks,
name=job_hash_name,
num_cores=num_cores,
m_mem_free = m_mem_free,
max_runtime_seconds = 9000,
j_resource = True,
queue = "all.q")
def generate_full_lt_task(self, upstream_tasks, loc):
job_hash_name = "full_lt_{loc}_{version}".format(loc=loc, version=self.no_shock_death_number_estimate_version)
num_cores = 3
m_mem_free = "30G"
runfile = "{}/02_full_lt.R".format(self.code_dir)
args = ["--no_shock_death_number_estimate_version", str(self.no_shock_death_number_estimate_version),
"--mlt_life_table_estimate_version", str(self.mlt_life_table_estimate_version),
"--hiv_run", self.hiv_run_name,
"--loc", str(loc),
"--shock_aggregator_version", str(self.shock_aggregator_version),
"--gbd_year", str(self.gbd_year),
"--enable_assertions_flag", str(self.enable_assertions_flag)]
argsstr = " ".join(args)
command = "{r_shell} {codefile} {passargs}".format(
r_shell=self.r_singularity_shell,
codefile=runfile,
passargs=argsstr)
return BashTask(command=command,
upstream_tasks=upstream_tasks,
name=job_hash_name,
num_cores=num_cores,
m_mem_free = m_mem_free,
max_runtime_seconds = 90000,
j_resource = True,
queue = "all.q")
def generate_aggregate_lt_task(self, upstream_tasks, loc, lt_type, num_children):
job_hash_name = "agg_full_lts_{loc}_{lt_type}_{version}".format(loc=loc, lt_type=lt_type, version = self.no_shock_death_number_estimate_version)
if num_children < 10:
num_cores = 10
m_mem_free = "100G"
elif num_children >= 10 and num_children < 50:
num_cores = 20
m_mem_free = "300G"
else:
num_cores = 30
m_mem_free = "500G"
runfile = "{}/03_aggregate_lts.R".format(self.code_dir)
args = ["--no_shock_death_number_estimate_version", str(self.no_shock_death_number_estimate_version),
"--loc", str(loc),
"--lt_type", lt_type,
"--gbd_year", str(self.gbd_year),
"--enable_assertions_flag", str(self.enable_assertions_flag)]
argsstr = " ".join(args)
command = "{r_shell} {codefile} {passargs}".format(
r_shell=self.r_singularity_shell_3501,
codefile=runfile,
passargs=argsstr)
return BashTask(command=command,
upstream_tasks=upstream_tasks,
name=job_hash_name,
num_cores=num_cores,
m_mem_free = m_mem_free,
max_runtime_seconds = 90000,
j_resource = True,
queue = "all.q")
def generate_full_upload_task(self, upstream_tasks):
job_hash_name = "full_life_table_upload_{}".format(self.no_shock_death_number_estimate_version)
num_cores = 10
m_mem_free = "50G"
runfile = "{}/04_compile_upload_results.R".format(self.code_dir)
args = ["--no_shock_death_number_estimate_version", str(self.no_shock_death_number_estimate_version),
"--gbd_year", str(self.gbd_year),
"--full_life_table_estimate_version", str(self.full_life_table_estimate_version),
"--upload_all_lt_params_flag", str(self.upload_all_lt_params_flag)]
argsstr = " ".join(args)
command = "{r_shell} {codefile} {passargs}".format(
r_shell=self.r_singularity_shell_3501,
codefile=runfile,
passargs=argsstr)
return BashTask(command=command,
upstream_tasks=upstream_tasks,
name=job_hash_name,
num_cores=num_cores,
m_mem_free = m_mem_free,
max_runtime_seconds = 90000,
queue = "all.q")
def generate_finalizer_task(self, upstream_tasks):
job_hash_name = "finalizer_run_{}".format(self.no_shock_death_number_estimate_version)
num_cores = 1
m_mem_free = "2G"
runfile = "{}/finalizer_run_all.py".format(self.finalizer_code_dir)
# How do we pass args to the finalizer run_all?
args = ["--shock_death_number_estimate_version", self.shock_death_number_estimate,
"--shock_life_table_estimate_version", self.shock_life_table_estimate,
"--no_shock_death_number_estimate_version", self.no_shock_death_number_estimate_version,
"--no_shock_life_table_estimate_version", self.no_shock_life_table_estimate_version,
"--username", self.user,
"--gbd_year", self.gbd_year,
"--mark_best", self.mark_best,
"--shock_version_id", self.shock_aggregator_version,
"--aggregate_locations", self.aggregate_locations,
"--workflow_args", self.shock_death_number_estimate]
return PythonTask(name = job_hash_name,
num_cores = num_cores,
m_mem_free = m_mem_free,
script = runfile,
args = args,
upstream_tasks = upstream_tasks,
queue = "long.q",
max_runtime_seconds = 180000,
j_resource = True,
max_attempts = 1)
def generate_workflow(self, wf_name):
wf = Workflow(workflow_args=wf_name,
project="proj_mortenvelope",
stdout=self.stdout,
stderr=self.stderr, seconds_until_timeout = 777600, resume = True)
self.create_directories(master_dir=self.master_dir, subdirs=['inputs', 'shock_numbers', 'hiv_adjust', 'logs', 'upload', 'abridged_lt', 'full_lt'])
self.create_directories(master_dir=self.reckoning_output_dir, subdirs=['lt_whiv', 'lt_hivdel', 'envelope_whiv', 'envelope_hivdel'])
self.create_directories(master_dir=self.full_lt_dir, subdirs=['no_hiv', 'with_hiv', 'with_shock'])
self.create_directories(master_dir=self.abridged_lt_dir, subdirs=['no_hiv', 'with_hiv', 'with_shock'])
self.create_directories(master_dir=self.abridged_lt_dir, subdirs=['no_hiv', 'with_hiv', 'with_shock'])
self.create_directories(master_dir=self.upload_dir)
self.create_directories(master_dir=self.log_dir, subdirs=['full_with_hiv_mx_vs_no_hiv', 'full_shock_mx_vs_with_hiv', 'abridged_with_hiv_mx_vs_no_hiv',
'abridged_shock_mx_vs_with_hiv', 'abridged_no_hiv_qx_1_5', 'abridged_with_hiv_qx_1_5',
'abridged_shock_qx_1_5', 'shock_rate_compare', 'ax_compare'])
# Get locations
most_detail_locations = call_mort_function("get_locations", {"level" : "lowest", "gbd_year" : self.gbd_year})
most_detail_loc_ids = most_detail_locations.location_id.tolist()
# Generate save inputs task
# job name: agg_save_inputs_{}
# script being ran: save_inputs.R
save_inputs_task = self.generate_save_inputs_task(upstream_tasks=[])
wf.add_task(save_inputs_task)
# Generate full lt tasks
# job name: gen_full_{loc}_{version}
# script being ran: full_lt.R
full_lt_tasks = {}
for loc in most_detail_loc_ids:
full_lt_tasks[loc] = self.generate_full_lt_task(upstream_tasks=[save_inputs_task], loc=loc)
wf.add_task(full_lt_tasks[loc])
# Run finalizer
if self.run_finalizer:
finalizer_run_task = self.generate_finalizer_task(upstream_tasks=full_lt_tasks.values())
wf.add_task(finalizer_run_task)
# Generate rest of full_lt tasks and add to the workflow
# job names: "agg_full_{loc}_{lt_type}_{version}"
# script being ran: aggregate_lts.R
if self.aggregate_full_lts:
# Get aggregate locations
locations = call_mort_function("get_locations", {"level" : "all", "gbd_year" : self.gbd_year})
agg_locations = locations[(locations.level == 3) & (~locations.location_id.isin(most_detail_loc_ids))]
agg_loc_ids = agg_locations.location_id.tolist()
# Generate agg tasks
agg_tasks = {}
for loc in agg_loc_ids:
num_children = len(most_detail_locations.loc[most_detail_locations.path_to_top_parent.str.contains("," + str(loc) + ",")])
for lt_type in ['with_shock', 'with_hiv', 'no_hiv']:
agg_task_key = str(loc) + "_" + lt_type
agg_tasks[agg_task_key] = self.generate_aggregate_lt_task(upstream_tasks=full_lt_tasks.values(), loc=loc, lt_type=lt_type, num_children=num_children)
wf.add_task(agg_tasks[agg_task_key])
# Generate upload task
# job name: full_life_table_upload_{}
# script name: compile_upload_results.R
if self.upload:
upload_task = self.generate_full_upload_task(upstream_tasks=agg_tasks.values())
wf.add_task(upload_task)
return wf
def run(self):
d = datetime.datetime.now()
wf_name = "full_lt_{}".format(self.shock_death_number_estimate)
wf = self.generate_workflow(wf_name)
success = wf.run()
if success == 0:
print("Completed!")
if(self.mark_best == "True"):
command = """
library(mortdb, lib = "FILEPATH/r-pkg")
update_status(model_name = "full life table", model_type = "estimate", run_id = {}, new_status = "best", send_slack = TRUE)
""".format(self.full_life_table_estimate_version)
else:
command = """
library(mortdb, lib = "FILEPATH/r-pkg")
update_status(model_name = "full life table", model_type = "estimate", run_id = {}, new_status = "completed", send_slack = TRUE)
""".format(self.full_life_table_estimate_version)
robjects.r(command)
else:
print("FAILED!!!")
command = """
library(mortdb, lib = "FILEPATH/r-pkg")
send_slack_message()
""".format(version_id)
robjects.r(command)
raise Exception()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version_id', type=int, required=True,
action='store', help="Version id of full life table")
parser.add_argument('--shock_death_number_estimate_version', type=int, required=False,
action='store', help="Version id of with shock death number estimate")
parser.add_argument('--shock_life_table_estimate_version', type=int, required=False,
action='store', help="Version id of with shock life table estimate")
parser.add_argument('--mark_best', type=str, required=True,
action='store', help="True/False mark run as best")
parser.add_argument('--gbd_year', type=int, required=True,
action='store', help="GBD Year")
parser.add_argument('--username', type=str, required=True,
action='store', help="User conducting the run")
parser.add_argument('--run_finalizer', type=int, required=True,
action='store', help="True/False run finalizer")
parser.add_argument('--aggregate_full_lts', type=int, required=True,
action='store', help="True/False aggregate full lt's")
parser.add_argument('--upload', type=int, required=True,
action='store', help="True/False upload full life table estimate")
parser.add_argument('--enable_assertions_flag', type=str, required=True,
action='store', help="True/False whether or not to assert qx")
parser.add_argument('--aggregate_locations', type=str, required=True,
action='store', help="True/False whether or not to aggregate locations")
args = parser.parse_args()
full_lt_version_id = args.version_id
shock_death_number_estimate = args.shock_death_number_estimate_version
shock_life_table_estimate = args.shock_life_table_estimate_version
mark_best = args.mark_best
gbd_year = args.gbd_year
username = args.username
run_finalizer = args.run_finalizer
aggregate_full_lts = args.aggregate_full_lts
upload = args.upload
enable_assertions_flag = args.enable_assertions_flag
aggregate_locations = args.aggregate_locations
# Get gbd round id based on gbd_year
gbd_round_id = int(call_mort_function("get_gbd_round", {"gbd_year" : gbd_year}))
# Pull shock aggregator version
external_inputs = call_mort_function("get_external_input_map", {"process_name" : "full life table estimate", "run_id" : full_lt_version_id})
external_inputs = dict(zip(external_inputs.external_input_name, external_inputs.external_input_version))
shock_version_id = external_inputs['shock_aggregator']
hiv_run_name = external_inputs['hiv']
run_parents = call_mort_function("get_proc_lineage", {"model_name" : "full life table", "model_type" : "estimate", "run_id" : full_lt_version_id})
run_parents = dict(zip(run_parents.parent_process_name, run_parents.parent_run_id))
run_parents['shock aggregator'] = shock_version_id
run_parents['hiv'] = hiv_run_name
run_parents['shock death number estimate'] = shock_death_number_estimate
run_parents['shock life table estimate'] = shock_life_table_estimate
run_parents['full life table estimate'] = full_lt_version_id
full_lt = full_life_table(username, run_parents, run_finalizer, aggregate_full_lts, gbd_year, upload, enable_assertions_flag, mark_best, aggregate_locations)
full_lt.run()
if __name__ == "__main__":
main()
# DONE
| [
"[email protected]"
] | |
d46f12c676788ef01cdfbb69ba6c673db8a4a50d | affe6c648b9ce2434919ccbd88d36d969a619f94 | /moya/containers.py | 84db000ed363867ba8e37c4deb4c9f275fc93932 | [
"BSD-3-Clause"
] | permissive | thiagocalheiros/weasyprint_for_awslambda | 578c4e8c10aef40f82e36f0a9de5ec31032335f8 | 4080c49a3fb5fc94fca75bf38e8b10ee1acfb7ce | refs/heads/master | 2020-03-18T18:19:08.855086 | 2018-05-31T19:32:40 | 2018-05-31T19:32:40 | 135,084,321 | 0 | 0 | null | 2018-05-27T21:52:46 | 2018-05-27T21:52:46 | null | UTF-8 | Python | false | false | 3,050 | py | from __future__ import unicode_literals
from __future__ import print_function
from .compat import PY2, text_type, implements_to_string
from .urltools import urlencode
from threading import Lock
if PY2:
from urlparse import parse_qsl
else:
from urllib.parse import parse_qsl
from collections import OrderedDict
class LRUCache(OrderedDict):
"""A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least recently used key is
discarded to make room for the new item.
"""
def __init__(self, cache_size=None):
self.cache_size = cache_size
self.lock = Lock()
super(LRUCache, self).__init__()
def __reduce__(self):
return self.__class__, (self.cache_size,)
def __setitem__(self, key, value):
with self.lock:
if self.cache_size is not None and key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def lookup(self, key):
with self.lock:
value = OrderedDict.__getitem__(self, key)
del self[key]
OrderedDict.__setitem__(self, key, value)
return value
@implements_to_string
class QueryData(OrderedDict):
"""A container for data encoded in a url query string"""
@classmethod
def from_qs(cls, qs, change_callback=None):
qd = cls()
for k, v in parse_qsl(qs, keep_blank_values=True, strict_parsing=False):
qd.setdefault(k, []).append(v)
return qd
def copy(self):
return OrderedDict(self)
def update(self, d):
"""Specialized update, setting a value to None will delete it. Also ensures that the query data contains lists"""
for k, v in d.items():
if v is None:
if k in self:
del self[k]
else:
if isinstance(v, (list, set, tuple, dict)) or hasattr(v, 'items'):
self[k] = list(v)
else:
if v is None:
v = ''
elif not isinstance(v, text_type):
v = text_type(v)
self[k] = [v]
def __str__(self):
return urlencode(self)
def __repr__(self):
return '<querydata "{}">'.format(urlencode(self))
def __setitem__(self, k, v):
if v is None:
ret = self.__delitem__(k)
else:
if isinstance(v, (set, tuple)):
v = list(v)
if not isinstance(v, list):
v = [text_type(v)]
ret = super(QueryData, self).__setitem__(k, v)
return ret
def __delitem__(self, k):
ret = super(QueryData, self).__delitem__(k)
return ret
if __name__ == "__main__":
qd = QueryData.from_qs('foo=bar&a=1&b=2&hobbit=frodo&hobbit=sam')
print(qd.items())
qd.update({'foo': None})
print(qd.items())
| [
"[email protected]"
] | |
3f9fb9d22bc078f7448e8a2437c435ac8a8a2f3c | 9a7c84122bb5d52a4feeea37e6434adf844fe10a | /drf/SpiderPlatform/SpiderPlatform/wsgi.py | fcd3b77ab8b295e18bfa4df1b7562bfd1d0cb535 | [] | no_license | yangwen1997/code | f4dc17850b186860d6304efb8dd92a189f6b5e12 | e697ca24405372c9320ed170478b16b4d539b13f | refs/heads/master | 2022-12-10T21:50:37.361086 | 2020-08-14T01:02:12 | 2020-08-14T01:02:12 | 148,280,123 | 5 | 0 | null | 2022-12-08T10:49:07 | 2018-09-11T07:44:21 | HTML | UTF-8 | Python | false | false | 406 | py | """
WSGI config for SpiderPlatform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SpiderPlatform.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
daff31afed353adf8d2c6c28cfc9180c89b9fffc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_corning.py | 55f3a44638ba8fcc4697231b72956c80f298f130 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _CORNING():
def __init__(self,):
self.name = "CORNING"
self.definitions = corn
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['corn']
| [
"[email protected]"
] | |
46fef4fb96cf282a8ebbeeff3f92d583c9c9beca | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_058/ch23_2019_09_16_18_49_35_451655.py | ff62dbbbbbfe3a0b1e8e382db69bd27edc33dfd8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def verifica_idade(x):
if x>=21:
return "Libeirado EUA e BRASIL"
elif x>=18:
return "Liberado EUA"
else:
return "Não está liberado" | [
"[email protected]"
] | |
8370c8f3e10ffb2ae7e3c1ef790b16d98bc32461 | 4114e7371af1da819a1c7a11ccc63a7961fd3c11 | /tensorbridge/src/main/python/TFServer.py | 8c272f4ca236cb6aba45066135f3da4938be9f43 | [
"Apache-2.0"
] | permissive | BBN-E/LearnIt | dad4e71da2e2028875807545ce75801067a7dd37 | 4f602f113cac9f4a7213b348a42c0fef23e2739c | refs/heads/master | 2022-12-18T22:15:11.877626 | 2021-09-09T04:28:31 | 2021-09-09T04:28:31 | 193,987,875 | 8 | 2 | Apache-2.0 | 2022-12-10T21:02:36 | 2019-06-26T22:53:56 | Java | UTF-8 | Python | false | false | 1,494 | py | from flask import Flask, request, g, Response, jsonify
import sys
import json
import os
def initDecoder(decoderClass,paramsFile):
if len(decoderClass) == 0:
print "Empty decoder class not permitted"
sys.exit(1)
parts = decoderClass.split('.')
module = ".".join(parts[:-1])
print "Attempting to import module: %s" % module
m = __import__(module)
print dir(m)
for part in parts[1:]:
print "Processing part %s" % part
m = getattr(m, part)
return m(paramsFile)
app = Flask(__name__)
app.config.from_envvar('TF_SERVER_PARAMS')
decoder = initDecoder(app.config['DECODER_CLASS'], app.config['PARAMS'])
print "Decoder object initialized"
@app.route("/ready", methods=['GET'])
def ready():
return "true"
@app.route("/decode", methods=['POST'])
def decode():
decoded ={'labelToClassificationScore' : decoder.decode(request.get_json())}
newName = json['name'][::-1]
newVal = json['value'] + 100
return { 'name': newName, 'value' : newVal }
return jsonify(decoded)
@app.route("/shutdown", methods=['POST'])
def shutdown():
decoder.shutdown()
# see http://flask.pocoo.org/snippets/67/
shutdown_func = request.environ.get("werkzeug.server.shutdown")
if shutdown_func is None:
raise RuntimeError("Could not get shutdown function")
shutdown_func()
return "bye"
port = os.environ['TF_SERVER_PORT']
print "Running Tensorflow server on port %s" % port
app.run(port=port)
| [
"[email protected]"
] | |
43bb0e8f257aadaa0d1d098de741248fe249b3d3 | a12a4be7e8c792b4c1f2765d3e7a43056e9196b0 | /17-letter-combinations-of-a-phone-number/17-letter-combinations-of-a-phone-number.py | dcf91ce053c7b299be5f62a8c2801c58805c2e6d | [] | no_license | fdas3213/Leetcode | d4b7cfab70446b3f6a961252a55b36185bc87712 | 1335d5759c41f26eb45c8373f33ee97878c4a638 | refs/heads/master | 2022-05-28T16:24:15.856679 | 2022-05-19T21:56:35 | 2022-05-19T21:56:35 | 94,024,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | class Solution:
def letterCombinations(self, digits: str) -> List[str]:
#2. backtrack
res = []
if not digits:
return res
self.digit_map = {"1":"", "2":"abc","3":"def","4":"ghi",
"5":"jkl","6":"mno","7":"pqrs","8":"tuv", "9":"wxyz"}
def backtrack(start: int, cur: str):
if start==len(digits):
res.append(cur)
return
for i in range(start, len(digits)):
#to avoid cases like start=0, i=1, so "d" or "e" or "f" is added to the result list
if i > start:
continue
digit = digits[i]
for letter in self.digit_map[digit]:
backtrack(i+1, cur+letter)
backtrack(0, "")
return res
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.