blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f7bc5dacb84f4e18c258d76fd91a9bb8cc3af3b
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/23/12.py
|
da0396d4cf15e8267cd6d9041247bc41bc9c3b63
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,511 |
py
|
# -*- coding:utf-8 -*-
import os, itertools
curr_dir = os.path.dirname(os.path.abspath(__file__))
srcfilename = os.path.join(curr_dir, 'C-large.in')
dstfilename = os.path.join(curr_dir, 'output.txt')
def solve(numbers_):
numbers = sorted(numbers_)
memory = dict((k, [k]) for k in numbers)
for r in xrange(2, len(numbers)):
combinations = itertools.combinations(numbers, r)
for combination in combinations:
s = sum(combination)
if s in memory:
r1 = memory[s]
r2 = combination
return r1, r2
memory[s] = combination
return 'Impossible'
if __name__ == '__main__':
with open(srcfilename, 'rb') as inp:
with open(dstfilename, 'wb') as outp:
lines = inp.readlines()
count = int(lines.pop(0))
outlines = []
for i in xrange(count):
line = lines[i]
numbers = [int(number) for number in line.split(' ')]
numbers.pop(0)
result = solve(numbers)
if result == 'Impossible':
outlines.append('Case #%d: Impossible\n' % (i+1,))
else:
r1, r2 = result
outlines.append('Case #%d:\n' % (i+1,))
outlines.append('%s\n' % ' '.join(['%d' % r1i for r1i in r1]))
outlines.append('%s\n' % ' '.join(['%d' % r2i for r2i in r2]))
outp.writelines(outlines)
|
[
"[email protected]"
] | |
0716ae0a297c478efb4cabc07dd95d1ade9b0765
|
0c85cba348e9abace4f16dfb70531c70175dac68
|
/cloudroast/networking/networks/api/security_groups/test_security_groups_quotas.py
|
711c5f5a1d12b995b33e7c5f496a7e31ad6fa4c0
|
[
"Apache-2.0"
] |
permissive
|
RULCSoft/cloudroast
|
31157e228d1fa265f981ec82150255d4b7876af2
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
refs/heads/master
| 2020-04-04T12:20:59.388355 | 2018-11-02T21:32:27 | 2018-11-02T21:32:27 | 155,923,262 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,301 |
py
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes
from cloudroast.networking.networks.fixtures \
import NetworkingSecurityGroupsFixture
class SecurityGroupsQuotasTest(NetworkingSecurityGroupsFixture):
@classmethod
def setUpClass(cls):
"""Setting up test data"""
super(SecurityGroupsQuotasTest, cls).setUpClass()
# Setting up
cls.expected_secgroup = cls.get_expected_secgroup_data()
cls.expected_secgroup.name = 'test_secgroup_quotas'
def tearDown(self):
self.secGroupCleanUp()
super(SecurityGroupsQuotasTest, self).tearDown()
@tags('quotas')
def test_rules_per_group(self):
"""
@summary: Testing security rules quota per group
"""
secgroup = self.create_test_secgroup(self.expected_secgroup)
expected_secrule = self.get_expected_secrule_data()
expected_secrule.security_group_id = secgroup.id
rules_per_group = self.sec.config.max_rules_per_secgroup
self.create_n_security_rules_per_group(expected_secrule,
rules_per_group)
msg = ('Successfully created the expected security rules per group '
'allowed by the quota of {0}').format(rules_per_group)
self.fixture_log.debug(msg)
# Checking the quota is enforced
request_kwargs = dict(
security_group_id=expected_secrule.security_group_id,
raise_exception=False)
resp = self.sec.behaviors.create_security_group_rule(**request_kwargs)
neg_msg = ('(negative) Creating a security rule over the group quota'
' of {0}').format(rules_per_group)
self.assertNegativeResponse(
resp=resp, status_code=SecurityGroupsResponseCodes.CONFLICT,
msg=neg_msg, delete_list=self.delete_secgroups,
error_type=SecurityGroupsErrorTypes.OVER_QUOTA)
@tags('quotas')
def test_groups_per_tenant(self):
"""
@summary: Testing security groups quota per tenant
"""
groups_per_tenant = self.sec.config.max_secgroups_per_tenant
self.create_n_security_groups(self.expected_secgroup,
groups_per_tenant)
# Checking the quota is enforced
request_kwargs = dict(
name=self.expected_secgroup.name,
description=self.expected_secgroup.description,
raise_exception=False)
resp = self.sec.behaviors.create_security_group(**request_kwargs)
neg_msg = ('(negative) Creating a security group over the tenant quota'
' of {0}').format(groups_per_tenant)
status_code = SecurityGroupsResponseCodes.CONFLICT
error_type = SecurityGroupsErrorTypes.OVER_QUOTA
self.assertNegativeResponse(
resp=resp, status_code=status_code, msg=neg_msg,
delete_list=self.delete_secgroups,
error_type=error_type)
@tags('quotas')
def test_rules_per_tenant(self):
"""
@summary: Testing security rules quota per tenant
"""
expected_secrule = self.get_expected_secrule_data()
groups_per_tenant = self.sec.config.max_secgroups_per_tenant
rules_per_tenant = self.sec.config.max_rules_per_tenant
rules_per_group = rules_per_tenant / groups_per_tenant
secgroups = self.create_n_security_groups_w_n_rules(
self.expected_secgroup, expected_secrule, groups_per_tenant,
rules_per_group)
msg = ('Successfully created the expected security rules per tenant '
'allowed by the quota of {0}').format(rules_per_tenant)
self.fixture_log.debug(msg)
# Checking the quota is enforced
request_kwargs = dict(
security_group_id=secgroups[0].id,
raise_exception=False)
resp = self.sec.behaviors.create_security_group_rule(**request_kwargs)
neg_msg = ('(negative) Creating a security rule over the tenant quota'
' of {0}').format(rules_per_tenant)
self.assertNegativeResponse(
resp=resp, status_code=SecurityGroupsResponseCodes.CONFLICT,
msg=neg_msg, delete_list=self.delete_secgroups,
error_type=SecurityGroupsErrorTypes.OVER_QUOTA)
def create_n_security_groups_w_n_rules(self, expected_secgroup,
expected_secrule, groups_num,
rules_num):
"""
@summary: Creating n security groups with n rules
"""
secgroups = self.create_n_security_groups(expected_secgroup,
groups_num)
for group in secgroups:
expected_secrule.security_group_id = group.id
self.create_n_security_rules_per_group(expected_secrule, rules_num)
return secgroups
def create_n_security_groups(self, expected_secgroup, num):
"""
@summary: Creating n security groups
"""
secgroups = []
for x in range(num):
log_msg = 'Creating security group {0}'.format(x + 1)
self.fixture_log.debug(log_msg)
name = 'security_test_group_n_{0}'.format(x + 1)
expected_secgroup.name = name
secgroup = self.create_test_secgroup(expected_secgroup)
secgroups.append(secgroup)
msg = 'Successfully created {0} security groups'.format(num)
self.fixture_log.debug(msg)
return secgroups
def create_n_security_rules_per_group(self, expected_secrule, num):
"""
@summary: Creating n security rules within a security group and
verifying they are created successfully
"""
request_kwargs = dict(
security_group_id=expected_secrule.security_group_id,
raise_exception=False)
for x in range(num):
log_msg = 'Creating rule {0}'.format(x + 1)
self.fixture_log.debug(log_msg)
resp = self.sec.behaviors.create_security_group_rule(
**request_kwargs)
# Fail the test if any failure is found
self.assertFalse(resp.failures)
secrule = resp.response.entity
# Check the Security Group Rule response
self.assertSecurityGroupRuleResponse(expected_secrule, secrule)
msg = ('Successfully created {0} security rules at security group '
'{1}').format(num, expected_secrule.security_group_id)
self.fixture_log.debug(msg)
|
[
"[email protected]"
] | |
47b06042aeb032ae4e939d3b48da59ba5b47905c
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/flask-webservices-labs/flask-spyne-fc20-labs/examples-fc20-labs/.venv/bin/pserve
|
aa6ac24579b1b2bb05f169edd556d6441a8b4c09
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null |
UTF-8
|
Python
| false | false | 325 |
#!/root/NetBeansProjects/fedora/python/flask-webservices-labs/flask-spyne-fc20-labs/examples-fc20-labs/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pyramid.scripts.pserve import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
0fdeff39871fc700ab63276af189ae59086ca209
|
9025fc04844a202f00e691728c87eb10906e87c3
|
/Python/3/hue.py
|
47ddef65c29489500d3964a4d7a381559351461c
|
[] |
no_license
|
felipemarinho97/online-judge-exercices
|
e046e3fd951f4943c43e199f557d96f82d8ed286
|
28cff9b31431e1c1edeeba0b66689e871491ac0a
|
refs/heads/master
| 2021-01-20T00:33:09.782364 | 2017-04-23T15:19:04 | 2017-04-23T15:19:04 | 89,148,286 | 0 | 0 | null | 2017-04-23T15:21:01 | 2017-04-23T14:34:29 |
Python
|
UTF-8
|
Python
| false | false | 580 |
py
|
# coding: utf-8
# Melhor Ataque
# Felipe Marinho (C) | 116110223 | <[email protected]>
times = int(raw_input())
lista_times = []
lista_gols = []
total_gols = 0
maior = -1
for i in range(times) :
time = raw_input()
lista_times.append(time)
gols = int(raw_input())
lista_gols.append(gols)
total_gols += gols
if lista_gols[i] > maior :
maior = gols
print """Time(s) com melhor ataque (%i gol(s)):""" % maior
for i in range(times) :
if lista_gols[i] == maior :
print lista_times[i]
print ""
print "Média de gols marcados: %.1f" % (total_gols/float(times))
|
[
"[email protected]"
] | |
0d68ac6e207b37d788e51c89ec289b18727b302d
|
c22c83592571b64c3da4a3f3c4d1bbaaee50a318
|
/encryption.py
|
ea49016c24dde788787f3a42249522bd0f17076a
|
[] |
no_license
|
tt-n-walters/thebridge-week1
|
eaef2887122dd4f778ab94ab3c819f1e63a1985f
|
8598125af12b21794e93f09407984009c36aaf25
|
refs/heads/master
| 2023-06-16T14:31:45.955254 | 2021-07-09T12:14:40 | 2021-07-09T12:14:40 | 382,301,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 180 |
py
|
import hashlib
password = "password1"
encoded_password = password.encode()
encrypted = hashlib.sha256(encoded_password).hexdigest()
# https://resources.nicowalters.repl.co/hash
|
[
"[email protected]"
] | |
ccfd104c316ff6d373be371b1562c7625f50c37c
|
41f09c4f9990f8d2ce57aef92be1580f8a541656
|
/show_lbiflist.py
|
69778715a9ac37d8e3b06516f36e4ea83cfb6002
|
[] |
no_license
|
jebpublic/pybvccmds
|
d3111efe6f449c3565d3d7f1c358bdd36bc1a01a
|
997eead4faebf3705a83ce63b82d853730b23fbf
|
refs/heads/master
| 2016-09-05T18:56:52.509806 | 2015-02-25T17:41:47 | 2015-02-25T17:41:47 | 31,315,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,472 |
py
|
#!/usr/bin/python
import sys
import json
import pybvc
from pybvc.netconfdev.vrouter.vrouter5600 import VRouter5600
from pybvc.common.status import STATUS
from pybvc.controller.controller import Controller
from pybvc.common.utils import load_dict_from_file
if __name__ == "__main__":
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) == False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
except:
print ("Failed to get Controller device attributes")
exit(0)
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s" % (ctrlIpAddr, nodeName, nodeIpAddr))
result = vrouter.get_loopback_interfaces_list()
status = result[0]
if(status.eq(STATUS.OK) == True):
print "Loopback interfaces:"
dpIfList = result[1]
print json.dumps(dpIfList, indent=4)
else:
print ("\n")
print ("!!!Failed, reason: %s" % status.brief().lower())
print ("%s" % status.detail())
sys.exit(0)
|
[
"[email protected]"
] | |
17aec2e9e4241eb7c8589ae7042a57c2077d973f
|
209c876b1e248fd67bd156a137d961a6610f93c7
|
/python/paddle/fluid/tests/unittests/xpu/test_reduce_max_op_xpu.py
|
9256b135ba8d04c2c3984633b176dd0a68c66765
|
[
"Apache-2.0"
] |
permissive
|
Qengineering/Paddle
|
36e0dba37d29146ebef4fba869490ecedbf4294e
|
591456c69b76ee96d04b7d15dca6bb8080301f21
|
refs/heads/develop
| 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 |
Apache-2.0
| 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null |
UTF-8
|
Python
| false | false | 2,573 |
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class XPUTestReduceMaxOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'reduce_max'
class XPUTestReduceMaxBase(XPUOpTest):
def setUp(self):
self.place = paddle.XPUPlace(0)
self.init_case()
self.set_case()
def set_case(self):
self.op_type = 'reduce_max'
self.attrs = {
'use_xpu': True,
'reduce_all': self.reduce_all,
'keep_dim': self.keep_dim
}
self.inputs = {'X': np.random.random(self.shape).astype("float32")}
if self.attrs['reduce_all']:
self.outputs = {'Out': self.inputs['X'].max()}
else:
self.outputs = {
'Out':
self.inputs['X'].max(axis=self.axis,
keepdims=self.attrs['keep_dim'])
}
def init_case(self):
self.shape = (5, 6, 10)
self.axis = (0, )
self.reduce_all = False
self.keep_dim = False
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
class XPUTestReduceMaxCase1(XPUTestReduceMaxBase):
def init_case(self):
self.shape = (5, 6, 10)
self.axis = (0, )
self.reduce_all = False
self.keep_dim = True
support_types = get_xpu_op_support_types('reduce_max')
for stype in support_types:
create_test_class(globals(), XPUTestReduceMaxOp, stype)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
95f07028ed1317b33c687e3f152ed408d54accea
|
0d2f636592dc12458254d793f342857298c26f12
|
/11-2(tag).py
|
1baa801108cd7920160b82b12b955e92548f7030
|
[] |
no_license
|
chenpc1214/test
|
c6b545dbe13e672f11c58464405e024394fc755b
|
8610320686c499be2f5fa36ba9f11935aa6d657b
|
refs/heads/master
| 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
def mymax(n1,n2):
if n1 > n2:
print("較大值是 : ", n1)
else:
print("較大值是 : ", n2)
x1,x2 = eval(input("請輸入2個數值:"))
mymax(x1,x2)
"""自己做的
def mymax(n1,n2):
print("最大值為:",max(n1,n2))
a = input("請輸入2個數值:")
mymax(a,b)"""
|
[
"[email protected]"
] | |
b8fd4f4290f8a0877f2b1b3efb49106e25a3f001
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/rcc/models/od_mcomplex_type_definition_method_def.py
|
07a0da5592495c471d676699b1ab4f6c2e885f62
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,896 |
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class ODMcomplexTypeDefinitionMethodDef(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'ODMcomplexTypeDefinitionDescription',
'formal_expression': 'list[ODMcomplexTypeDefinitionFormalExpression]',
'alias': 'list[ODMcomplexTypeDefinitionAlias]',
'oid': 'str',
'name': 'str',
'type': 'str'
}
attribute_map = {
'description': 'description',
'formal_expression': 'formalExpression',
'alias': 'alias',
'oid': 'oid',
'name': 'name',
'type': 'type'
}
def __init__(self, description=None, formal_expression=None, alias=None, oid=None, name=None, type=None, local_vars_configuration=None): # noqa: E501
"""ODMcomplexTypeDefinitionMethodDef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._formal_expression = None
self._alias = None
self._oid = None
self._name = None
self._type = None
self.discriminator = None
self.description = description
if formal_expression is not None:
self.formal_expression = formal_expression
if alias is not None:
self.alias = alias
if oid is not None:
self.oid = oid
if name is not None:
self.name = name
if type is not None:
self.type = type
@property
def description(self):
"""Gets the description of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The description of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: ODMcomplexTypeDefinitionDescription
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ODMcomplexTypeDefinitionMethodDef.
:param description: The description of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: ODMcomplexTypeDefinitionDescription
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def formal_expression(self):
"""Gets the formal_expression of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The formal_expression of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: list[ODMcomplexTypeDefinitionFormalExpression]
"""
return self._formal_expression
@formal_expression.setter
def formal_expression(self, formal_expression):
"""Sets the formal_expression of this ODMcomplexTypeDefinitionMethodDef.
:param formal_expression: The formal_expression of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: list[ODMcomplexTypeDefinitionFormalExpression]
"""
self._formal_expression = formal_expression
@property
def alias(self):
"""Gets the alias of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The alias of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: list[ODMcomplexTypeDefinitionAlias]
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this ODMcomplexTypeDefinitionMethodDef.
:param alias: The alias of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: list[ODMcomplexTypeDefinitionAlias]
"""
self._alias = alias
@property
def oid(self):
"""Gets the oid of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The oid of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: str
"""
return self._oid
@oid.setter
def oid(self, oid):
"""Sets the oid of this ODMcomplexTypeDefinitionMethodDef.
:param oid: The oid of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: str
"""
self._oid = oid
@property
def name(self):
"""Gets the name of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The name of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ODMcomplexTypeDefinitionMethodDef.
:param name: The name of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:return: The type of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ODMcomplexTypeDefinitionMethodDef.
:param type: The type of this ODMcomplexTypeDefinitionMethodDef. # noqa: E501
:type: str
"""
allowed_values = ["COMPUTATION", "IMPUTATION", "TRANSPOSE", "OTHER"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ODMcomplexTypeDefinitionMethodDef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ODMcomplexTypeDefinitionMethodDef):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
95edf831f37b676ba3fb2731a59d15664766b478
|
3c099a78896ca4b775d28fccf38c2bfdf6a1a555
|
/zMiscellaneous/WebScraping/ScrapingEcommerce.py
|
91e6ae08778622a1632ba801532cb50101916bff
|
[] |
no_license
|
anmolparida/selenium_python
|
db21215837592dbafca5cced7aecb1421395ed41
|
78aec8bf34d53b19fb723a124ad13342c6ce641c
|
refs/heads/master
| 2022-12-03T23:52:32.848674 | 2020-08-30T19:26:30 | 2020-08-30T19:26:30 | 282,207,788 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,366 |
py
|
import requests
from bs4 import BeautifulSoup
# Getting Value from the First Page
url = 'https://scrapingclub.com/exercise/list_basic/?page=1'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
items = soup.find_all('div', class_='col-lg-4 col-md-6 mb-4')
count = 0
for i in items:
itemName = i.find('h4', class_='card-title').text.strip('\n')
itemPrice = i.find('h5').text
count = count + 1
print(str(count) + '. itemPrice: ' + itemPrice, 'itemName: ' + itemName)
# Getting Value from the All the Pages
pages = soup.find('ul', class_='pagination')
urls = []
links = pages.find_all('a', class_='page-link')
for link in links:
pageNum = int(link.text) if link.text.isdigit() else None
if pageNum is not None:
x = link.get('href')
urls.append(x)
print(urls)
print('\nGetting Value from the All the Pages')
count = 0
for i in urls:
newURL = url + i
response = requests.get(newURL)
soup = BeautifulSoup(response.text, 'lxml')
items = soup.find_all('div', class_='col-lg-4 col-md-6 mb-4')
for i in items:
itemName = i.find('h4', class_='card-title').text.strip('\n')
itemPrice = i.find('h5').text
count = count + 1
print(str(count) + '. itemPrice: ' + itemPrice, 'itemName: ' + itemName)
|
[
"[email protected]"
] | |
6bdee705a979426573bc0d836de6cc21f8c69502
|
a14dd601cde67f67d0ba38dfd1362f7c0109cef1
|
/graphs/past/perfect-friends.py
|
84d3237c7bc95823da7474a6ccbd297330ad8192
|
[] |
no_license
|
Meaha7/dsa
|
d5ea1615f05dae32671af1f1c112f0c759056473
|
fa80219ff8a6f4429fcf104310f4169d007af712
|
refs/heads/main
| 2023-09-03T18:52:41.950294 | 2021-11-05T09:14:42 | 2021-11-05T09:14:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 565 |
py
|
from collections import defaultdict
from graphs.util import build
def dfs(graph, src, vis):
vis.add(src)
count = 1
for nbr in graph[src]:
if nbr not in vis:
count += dfs(graph, nbr, vis)
return count
def main(edges):
graph, vis = build(edges), set()
csl = []
for src in graph.keys():
if src not in vis:
csl.append(dfs(graph, src, vis))
return sum([csl[i] * sum(csl[i + 1:]) for i in range(len(csl))])
for edges in [
[(0, 1), (2, 3), (4, 5), (5, 6), (4, 6)]
]:
print(main(edges))
|
[
"[email protected]"
] | |
d47d43472d31e0e542659aeb3cc520cb97087223
|
1643a5a0d1acd3bdc851718c223ba0b14bbec1c3
|
/backend/rn_push_notificatio_27417/settings.py
|
0f648a30594df5a74b623cf3269344d5cfcda383
|
[] |
no_license
|
crowdbotics-apps/rn-push-notificatio-27417
|
90c614ad558b2810e2b2cfe55e2dae7b97f1359e
|
ea9c37615be4e9e872a63d226562e4ca7bc2b6c5
|
refs/heads/master
| 2023-05-23T06:29:28.261563 | 2021-05-27T12:29:04 | 2021-05-27T12:29:04 | 370,993,920 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,141 |
py
|
"""
Django settings for rn_push_notificatio_27417 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rn_push_notificatio_27417.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rn_push_notificatio_27417.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
919204b02732c69b3cdce838f4f06670d71c72c5
|
5c5e7b03c3373e6217665842f542ca89491290ff
|
/2015/day25.py
|
cb3f0bf727f854fd9f2f893b07c4884439f6ee3e
|
[] |
no_license
|
incnone/AdventOfCode
|
9c35214e338e176b6252e52a25a0141a01e290c8
|
29eac5d42403141fccef3c3ddbb986e01c89a593
|
refs/heads/master
| 2022-12-21T21:54:02.058024 | 2022-12-15T17:33:58 | 2022-12-15T17:33:58 | 229,338,789 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 616 |
py
|
from getinput import get_input
from util import ncr
def get_idx(row, col):
if row == col == 1:
return 1
return ncr(row+col-1, 2) + col
def get_val(row, col):
mod = 33554393
rat = 252533
startval = 20151125
return startval*pow(rat, get_idx(row, col)-1, mod) % mod
def parse_input(s):
words = s.split()
return int(words[-1].rstrip('.')), int(words[-3].rstrip(','))
def part_1(row, col):
return get_val(row, col)
if __name__ == "__main__":
the_col, the_row = parse_input(get_input(25))
print(the_row, the_col)
print('Part 1:', part_1(the_row, the_col))
|
[
"[email protected]"
] | |
5bb05fab43f5353a702c4e9a5694f8f08030eda9
|
c74f234dc478b49f367106b414df2473ac35b93c
|
/mysite/polls/urls.py
|
5c7dd5797f18fd2607e2b916de5c2ac36d13007c
|
[] |
no_license
|
Richiewong07/Django
|
05994f552cea2cb612c6c1957a0a9a39605fdf5c
|
09ac06a60c623d79bb8ecafd014ac7dbc74e8535
|
refs/heads/master
| 2021-04-15T14:00:00.394201 | 2018-03-24T00:34:15 | 2018-03-24T00:34:15 | 126,238,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 591 |
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# r^$ MEANS DON'T ADD ANYTHING TO OUR URL
# views.index IS WHAT YOU WANT TO DISPLAY
# 127.0.0.1/polls/
url(r'^$', views.index, name="index"),
# SET QUESTION_ID TO A NUMBER
# 127.0.0.1/polls/1
url(r'^(?P<question_id>[0-9]+)/$', views.detail, name="detail"),
# 127.0.0.1/polls/1/results
url(r'^(?P<question_id>[0-9]+)/results$', views.results, name="results"),
# 127.0.0.1/polls/1/votes
url(r'^(?P<question_id>[0-9]+)/vote$', views.vote, name="vote"),
]
app_name = 'polls'
|
[
"[email protected]"
] | |
7b76e148b73b644e42f7a1abb259e77dad11fdcc
|
4f4c2e5a8a71a2058069b90eb75e11b1ec80efa9
|
/euler/Problem_38-Pandigital_multiples.py
|
3b25e4c08a2411b5567f23fe50c40e8e254addf0
|
[] |
no_license
|
mingyyy/dataquest_projects
|
20e234f1d0d3dd8be1f0202b7ed3bce172474e38
|
885ffe4338300cb9c295f37f6140c50ff3b72186
|
refs/heads/master
| 2022-12-11T17:25:44.053404 | 2020-01-10T09:24:28 | 2020-01-10T09:24:28 | 190,170,724 | 0 | 0 | null | 2022-12-08T05:55:21 | 2019-06-04T09:29:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 525 |
py
|
"""
Take the number 192 and multiply it by each of 1, 2, and 3:
By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3)
The same can be achieved by starting with 9 and multiplying by 1, 2, 3, 4, and 5, giving the pandigital, 918273645, which is the concatenated product of 9 and (1,2,3,4,5).
What is the largest 1 to 9 pandigital 9-digit number that can be formed as the concatenated product of an integer with (1,2, ... , n) where n > 1?
"""
|
[
"[email protected]"
] | |
4058a4aba52d9076ba294a27d437eb8344f2cdb7
|
668cc2cd1109cf1c207a57ae7decc5ae5edc9728
|
/backend/users/migrations/0002_auto_20201104_1426.py
|
d51fe7ac08131e041c8abbbf9f79c5410e4a4133
|
[] |
no_license
|
crowdbotics-apps/logictech-22290
|
7538661024c163c16881371468f84c181d1ee93f
|
f17151874e1fd60a1cc81b247a5e0599421ac6e8
|
refs/heads/master
| 2023-01-09T21:14:45.728461 | 2020-11-04T14:30:13 | 2020-11-04T14:30:13 | 310,025,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 627 |
py
|
# Generated by Django 2.2.17 on 2020-11-04 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"[email protected]"
] | |
d47b3bb24581ca86d9a76530a019eaca62ae8e66
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/p38a_input/L2EE/2EE-2J_MD_NVT_rerun/set_4.py
|
2b3fa06318de66ab34d51136748b9f7c26eaed64
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 742 |
py
|
import os
dir = '/mnt/scratch/songlin3/run/p38a/L2EE/MD_NVT_rerun/ti_one-step/2EE_2J/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"[email protected]"
] | |
eaf3840aa3f8986f9ac5af4ac914a14e080bd347
|
cc7ad1a2aa5d691c15ff7838d1e5126ab2c2bee0
|
/basic_notifications/views.py
|
b7e1ecc497a68ddf9693738e0e033c9b746371b7
|
[] |
no_license
|
demirantay/lingooapp
|
9632be8a7d3dd00e7a4ac13618f32975da389729
|
c842bb032668ef1bd5e7f4282acd4990843c8640
|
refs/heads/master
| 2023-03-14T08:00:37.681334 | 2021-01-09T09:36:48 | 2021-01-09T09:36:48 | 285,181,982 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,688 |
py
|
# Main Imports
import json
# Django Imports
from django.shortcuts import render, get_object_or_404, HttpResponse
from django.http import HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.contrib.auth.models import User
from django.utils import timezone
# My Module ImportsImports
from .models import NotificationBase
from profile_settings.models import BasicUserProfile
from teacher_authentication.models import TeacherUserProfile
from utils.session_utils import get_current_user, get_current_user_profile
from utils.session_utils import get_current_teacher_user_profile
from utils.access_control import delete_teacher_user_session
def notifications(request, page):
"""
in this page the user can see her notifications
"""
# Deleting admin-typed user session
# Deleting programmer-typed-user session
# Deleting Teacher-typed user sessions
# ACCESS CONTROL
delete_teacher_user_session(request)
# Get the current users
current_basic_user = get_current_user(request, User, ObjectDoesNotExist)
current_basic_user_profile = get_current_user_profile(
request,
User,
BasicUserProfile,
ObjectDoesNotExist
)
# Getting the current teacher profile
current_teacher_profile = get_current_teacher_user_profile(
request,
User,
TeacherUserProfile,
ObjectDoesNotExist
)
# Get all of the notifications
try:
all_notifications = NotificationBase.objects.filter(
notified_user=current_basic_user_profile
).order_by("-id")
except ObjectDoesNotExist:
all_notifications = None
# Get all of the posts
# At every page there will be 80 entries so always multiply it by that and
# then reduce your objects
current_page = page
previous_page = page-1
next_page = page+1
post_records_starting_point = current_page * 80
post_records_ending_point = post_records_starting_point + 80
try:
current_page_notifications = NotificationBase.objects.filter(
notified_user=current_basic_user_profile
).order_by('-id')[post_records_starting_point:post_records_ending_point]
except ObjectDoesNotExist:
current_page_notifications = None
# check if the user has unread notifications
has_unread_notifications = False
for notification in all_notifications:
if notification.is_read == False:
has_unread_notifications = True
break
else:
continue
# Since the page is visited make all of the notiications read = True
current_unread_notifications = {}
for notification in all_notifications:
if notification.is_read == False:
current_unread_notifications[notification.id] = False
notification.is_read = True
notification.save()
else:
pass
data = {
"current_basic_user": current_basic_user,
"current_basic_user_profile": current_basic_user_profile,
"current_teacher_profile": current_teacher_profile,
"all_notifications": all_notifications,
"has_unread_notifications": has_unread_notifications,
"current_page": current_page,
"previous_page": previous_page,
"next_page": next_page,
"current_page_notifications": current_page_notifications,
"current_unread_notifications": current_unread_notifications,
}
if current_basic_user == None:
return HttpResponseRedirect("/auth/login/")
else:
return render(request, "basic_notifications/notifications.html", data)
|
[
"[email protected]"
] | |
7ec56d1dfd873785b0db9c891aacd95142031aa1
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/sQN3Jb43teMbC7rGJ_18.py
|
795c8a4678747c53bbb24bcd6b59c6e238410b4e
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 176 |
py
|
def make_transpose(m):
dm = len(m)
dn = len(m[0])
tm = [[0] * dm for i in range(dn)]
for i in range(dm):
for j in range(dn):
tm[j][i] = m[i][j]
return tm
|
[
"[email protected]"
] | |
c2ea0ec2e21e9047ed990c7351593ad82edc44ad
|
536bce6ca78a9a151247b51acb8c375c9db7445f
|
/chapter1/1.5-interest_rate.py
|
15aba2121680fc7d7fffc673afd05db59b2923ce
|
[] |
no_license
|
clicianaldoni/aprimeronpython
|
57de34313f4fd2a0c69637fefd60b0fb5861f859
|
a917b62bec669765a238c4b310cc52b79c7df0c9
|
refs/heads/master
| 2023-01-28T18:02:31.175511 | 2023-01-23T08:14:57 | 2023-01-23T08:14:57 | 112,872,454 | 0 | 0 | null | 2017-12-02T19:55:40 | 2017-12-02T19:55:40 | null |
UTF-8
|
Python
| false | false | 464 |
py
|
p = 5 # Interest rate %
A = 1000 # Initial amount
years = 3 # Number of years to grow
# Formula for calculating sum: A(1 + p/100)^n
# To avoid integer division we convert p to float
sum = A * (1 + (float(p)/100))**years
print("After %g years with %g%% interest rate and an initial amount of %g we have %g." % (years, p, A, sum))
"""
Unix>python interest_rate.py
After 3 years with 5% interest rate and an initial amount of 1000 we have 1157.63.
"""
|
[
"[email protected]"
] | |
e57bf9dec7e340b0469004ecf5111d0ea081f482
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/armulator/armv6/opcodes/concrete/ldc_ldc2_immediate_a2.py
|
c145465c0a7b80a8b878d200a1c3998d5b55001d
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120 | 2023-08-08T04:57:02 | 2023-08-08T04:57:02 | 91,716,042 | 29 | 7 |
MIT
| 2023-08-08T04:55:59 | 2017-05-18T16:37:55 |
Python
|
UTF-8
|
Python
| false | false | 788 |
py
|
from armulator.armv6.arm_exceptions import UndefinedInstructionException
from armulator.armv6.bits_ops import substring, bit_at
from armulator.armv6.opcodes.abstract_opcodes.ldc_ldc2_immediate import LdcLdc2Immediate
class LdcLdc2ImmediateA2(LdcLdc2Immediate):
@staticmethod
def from_bitarray(instr, processor):
imm8 = substring(instr, 7, 0)
coproc = substring(instr, 11, 8)
rn = substring(instr, 19, 16)
index = bit_at(instr, 24)
add = bit_at(instr, 23)
wback = bit_at(instr, 21)
if substring(coproc, 3, 1) == 0b101:
raise UndefinedInstructionException()
else:
imm32 = imm8 << 2
return LdcLdc2ImmediateA2(instr, cp=coproc, n=rn, add=add, imm32=imm32, index=index, wback=wback)
|
[
"[email protected]"
] | |
fea4d5a004cb0d120f3829c1fa2cbf4b2df64e17
|
046333321b2717c6391a111fc2f74b04bbbeb7af
|
/chapter13(enumrate function)/sorted.py
|
cbe84261ffe34d30f366d660bdb7c5115a530460
|
[] |
no_license
|
jyash28/Python-practice
|
b0c9df42bc93716d8721a1420ee1f3170b40b18c
|
cd3a61934618145cbaa20e62194ebb1642ba9941
|
refs/heads/main
| 2023-07-03T18:06:38.407491 | 2021-07-13T09:47:07 | 2021-07-13T09:47:07 | 314,485,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
guitars= [
{"model1" : 'famaha f310' ,"price": 8400},
{"model2" : 'faith neptune' ,"price": 100000},
{"model3" : 'faith appolo venus' ,"price": 35000},
{"model4" : 'taylor' ,"price": 450000}
]
sorted_guitars = sorted(guitars, key= lambda d: d["price"],reverse = True)
print(sorted_guitars)
|
[
"[email protected]"
] | |
e22cf41bebc21fe5ea70c17604946adc4fe9a69e
|
ef5bde73d58734f5081f127fe344ae85c53b8b68
|
/config_modify.py
|
8c8255c6e3156d5372724911ccee779d14d2e548
|
[] |
no_license
|
ychnlgy/VoxCeleb1
|
a3a6337f322ec1c78f926e2f529db001f7ec8349
|
930ce2c5c9f0828705afb096c7ee33bfe4b6b96e
|
refs/heads/master
| 2020-06-11T10:40:35.462721 | 2019-07-09T16:42:24 | 2019-07-09T16:42:24 | 193,934,200 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
import argparse
import voxceleb1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", required=True)
args = parser.parse_args()
config = voxceleb1.training.Config(args.path)
del config.param_dict["_dob"]
kvs = ["--%s %s" % item for item in config.param_dict.items()]
print(" ".join(kvs))
|
[
"[email protected]"
] | |
c33973915a1487aa198d9586d9ef07976496fe35
|
9c6dcd6964c0bbbc960106736a3adf83f99ae613
|
/Balatarin/bipartiteMongo.py~
|
0ac84299fccd071931a5ee43aa4271ca00d40bdf
|
[] |
no_license
|
Roja-B/Trajectories
|
5ab065991c34ba74b6951ad090401c0cb14f222b
|
e1ce1c6ac8095f92853e0ebe7a41eb8a82e7eff2
|
refs/heads/master
| 2016-09-05T17:56:45.643404 | 2013-01-24T03:54:21 | 2013-01-24T03:54:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,701 |
#!/usr/lib/python3.0
# This program extracts bipartite edgelist of users and links belonging to a specific time window (both the link and the votes should come from that time window)
# Author: Roja Bandari
# October 2012
from pymongo import Connection
from PARAMETERS import *
import datetime
import time
import sys
#sDate = sys.argv[1]
#delta = sys.argv[2] # in days
#sYear = int(sDate.split('/')[2])
#sMonth = int(sDate.split('/')[0])
#sDay = int(sDate.split('/')[1])
begin = datetime.datetime(2006,9,1)
end = datetime.datetime(2006,11,25)
startDate = begin
difference = datetime.timedelta(days=WINDOW)
slidingWindow = datetime.timedelta(days=SLIDE)
t1 = time.time()
connection = Connection()
balatarindb = connection.Balatarin
links = balatarindb.links
votes = balatarindb.votes
log = open("mongoError.log","a")
while startDate < end:
endDate = startDate + difference
bgraphname = "".join(["bipartite_politics_",str(startDate.month),"_"+str(startDate.day),"_"+str(startDate.year),"_"+str(WINDOW),"_days"])
print bgraphname
f = open(PATH+"/bipartite/"+bgraphname+".txt","w")
for vote in votes.find({"date":{"$gte":startDate,"$lt":endDate}}):
# print vote["linkID"]
linkID = vote["linkID"]
link = links.find_one({"linkID":linkID})
try:
if link["date"] < startDate : continue
except:
log.write(linkID+'\n')
continue
if link["category"] == "4":
f.write(vote["userID"]+'\t'+vote["linkID"]+'\n')
f.close()
startDate += slidingWindow
t2 = time.time()
print "Time Spent: "+str((t2-t1)/60)+" minutes.\n"
log.close()
|
[
"[email protected]"
] | ||
a9520d4013f01df3a621233c6de34a7732d48832
|
2a05456121813e2c5c3a0e9a88c0c381a038633b
|
/euler089.py
|
b32e61c3f1608a6ae354bef88b3f646d1612cf92
|
[] |
no_license
|
Octaith/euler
|
022fab72f7d2a72327694ea1970aa3e13a560673
|
457676a99013c7c5fd33697b82be998d07c464d9
|
refs/heads/master
| 2020-09-26T21:04:08.656499 | 2014-09-14T07:47:51 | 2014-09-14T07:47:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 789 |
py
|
roman = (
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1)
)
def roman_to_dec(s):
result = 0
index = 0
for numeral, integer in roman:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
def dec_to_roman(n):
result = ""
for numeral, integer in roman:
while n >= integer:
result += numeral
n -= integer
return result
with open('roman.txt') as f:
data = f.read().split('\n')
saved = 0
for r in data:
saved += len(r)
saved -= len(dec_to_roman(roman_to_dec(r)))
print saved
|
[
"[email protected]"
] | |
ede98906221ceb5af90a8e165e9a48203a10f212
|
a1dae20db0338e735f0b4eb2804a069533bc5a9b
|
/render.py
|
f36dcfdfed83a87bd98faa44c513dbe54b05c932
|
[] |
no_license
|
thoppe/TwitterSquares
|
4d78e80680c3b01673d602c2564811bf42090aa6
|
a01dd65456fa70478a0ed03cd7c994c0a678e3ef
|
refs/heads/master
| 2020-03-20T08:17:42.525989 | 2018-06-19T22:05:20 | 2018-06-19T22:05:20 | 137,304,270 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,058 |
py
|
"""Render Twitter Squares
Usage:
render.py <term> <n_images> [--resolution=<n>]
Options:
-h --help Show this screen.
-r --resolution=<n> Output resolution [default: 1200]
"""
import glob
import os
import sys
import random
from tqdm import tqdm
import numpy as np
import cv2
from docopt import docopt
dargs = docopt(__doc__)
total_images = int(dargs["<n_images>"])
square_n = int(np.sqrt(total_images))
resolution = int(dargs["--resolution"])
if square_n**2 != total_images:
raise ValueError(f"<n_images={total_images}> must be a square number!")
max_image_row_size = 20
#model_img_size = 224
model_img_size = 299
name = dargs["<term>"]
load_dest = f"data/profile_image/{name}"
subimage_dest = f"data/subimage/{name}"
activations_dest = f"data/activations/{name}"
figure_dest = "figures/"
def resize_and_crop(f0):
# Resize all the images to the base shape of (model_img_size,model_img_size)
# Center crop non-square images
f1 = os.path.join(subimage_dest, os.path.basename(f0)) + '.jpg'
if os.path.exists(f1):
return False
img = cv2.imread(f0)
if img is None:
os.remove(f0)
return False
x,y,c = img.shape
if x > y:
dx = (x - y)//2
img = img[dx:dx+y, :, :]
if y > x:
dy = y - x
img = img[:, dy:dy+x, :]
img = cv2.resize(img, (model_img_size,model_img_size))
x,y,c = img.shape
assert(x==y==model_img_size)
cv2.imwrite(f1, img)
#print ("Saved", f1)
def load_image_data():
F_INPUT = sorted(glob.glob(os.path.join(subimage_dest, '*')))
random.shuffle(F_INPUT)
F_INPUT = F_INPUT[:total_images]
IMG, ACT = [], []
for f0 in tqdm(F_INPUT):
f1 = os.path.join(activations_dest, os.path.basename(f0))+'.txt'
assert(os.path.exists(f1))
img = cv2.imread(f0)
IMG.append(img)
ACT.append(np.loadtxt(f1))
IMG = np.array(IMG)
ACT = np.array(ACT)
return IMG, ACT
_clf = None # Only import the model if we need to score something
def compute_activations(f0):
f1 = os.path.join(activations_dest, os.path.basename(f0)) + '.txt'
if os.path.exists(f1):
return False
global _clf
if _clf is None:
print("Importing classification model")
from model import layer_model
_clf = layer_model()
img = cv2.imread(f0)
img = img[:,:,::-1] # BGR to RGB
ax = _clf.predict(img)
np.savetxt(f1, ax)
if __name__ == "__main__":
# Create any missing directories
for d in [subimage_dest, figure_dest, activations_dest]:
if not os.path.exists(d):
os.system(f'mkdir -p "{d}"')
F_IN = set(sorted(glob.glob(os.path.join(load_dest, '*'))))
# Remove all zero-byte files
for f in list(F_IN):
if os.stat(f).st_size==0:
print(f"Removing zero-byte file {f}")
os.remove(f)
F_IN.remove(f)
for f0 in tqdm(F_IN):
resize_and_crop(f0)
print(f"Largest model possible {int(np.floor(len(F_IN)**0.5)**2)}")
F_IN = set(sorted(glob.glob(os.path.join(subimage_dest, '*'))))
for f0 in tqdm(F_IN):
compute_activations(f0)
# Check to make sure we have enough images
F_IN = set(sorted(glob.glob(os.path.join(activations_dest, '*'))))
if len(F_IN) < total_images:
msg = f"Not enough images for {name}, {len(F_IN)}/{total_images}"
raise ValueError(msg)
IMG, ACT = load_image_data()
from grid import generate_tsne, fit_to_grid
print("Generating tSNE coordinates")
X = generate_tsne(ACT)
print("Running Jonker-Volgenan")
img = fit_to_grid(IMG, X, square_n, out_res=model_img_size)
print("Resizing image")
img = cv2.resize(
img, (resolution, resolution), interpolation=cv2.INTER_CUBIC)
f_img_save = os.path.join(figure_dest, f"{name}.jpg")
cv2.imwrite(
f_img_save, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
print (f"Saved output image to {f_img_save}")
os.system(f'eog "figures/{name}.jpg"')
|
[
"[email protected]"
] | |
fccc5e04254af51c2fc4a03cdf992b81f31a1d28
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/sql/v20190601preview/__init__.py
|
82b3a2004814746567987c5300774fdd220485e0
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,559 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .database import *
from .get_database import *
from .get_managed_database import *
from .get_server import *
from .get_server_azure_ad_administrator import *
from .get_sync_group import *
from .get_sync_member import *
from .get_workload_classifier import *
from .get_workload_group import *
from .managed_database import *
from .server import *
from .server_azure_ad_administrator import *
from .sync_group import *
from .sync_member import *
from .workload_classifier import *
from .workload_group import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:sql/v20190601preview:Database":
return Database(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:ManagedDatabase":
return ManagedDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:Server":
return Server(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:ServerAzureADAdministrator":
return ServerAzureADAdministrator(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:SyncGroup":
return SyncGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:SyncMember":
return SyncMember(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:WorkloadClassifier":
return WorkloadClassifier(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:WorkloadGroup":
return WorkloadGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "sql/v20190601preview", _module_instance)
_register_module()
|
[
"[email protected]"
] | |
8ce1689b4605bab929cceaf30bd0e1e4bc9293a9
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/1007_Minimum_Domino_Rotations_For_Equal_Row.py
|
974ee558096c8fe9a393d9b91f507186e8e356d7
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626 | 2022-03-14T08:39:47 | 2022-03-14T08:39:47 | 69,802,909 | 862 | 438 |
MIT
| 2022-08-18T06:42:46 | 2016-10-02T14:51:31 |
Python
|
UTF-8
|
Python
| false | false | 2,177 |
py
|
# Source: https://tinyurl.com/v3zqer7
# Approach 1
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
result = float("inf")
for domino in range(1, 7): # Since each domino can have only 1 to 6 values. So check all values if we can make it
isPossible = True
topRorationCount, bottomRotationCount = 0, 0
for a, b in zip(A, B):
if domino != a and domino != b: #
isPossible = False
break
if domino == a and domino != b:
bottomRotationCount += 1
elif domino != a and domino == b:
topRorationCount += 1
if isPossible:
result = min(result, min(topRorationCount, bottomRotationCount))
return -1 if result == float("inf") else result
# Source: https://tinyurl.com/v3zqer7
# Approach 2
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
rotations = self.checkRotationFor(A, B, A[0])
# If one could make all elements in A or B equal to A[0]
if rotations != -1 or A[0] == B[0]:
return rotations
# If one could make all elements in A or B equal to B[0]
else:
return self.checkRotationFor(A, B, B[0])
def checkRotationFor(self, A, B, num):
"""
Return minimum number of swaps,
if one could make all elements in A or B equal to 'num'.
Else return -1
"""
# How many rotations should be done
# to have all elements in A equal to 'num'
# and to have all elements in B equal to 'num'
length = len(A)
rotations_A, rotations_B = 0, 0
for i in range(length):
if A[i] != num and B[i] != num:
return -1
elif A[i] != num:
rotations_A += 1
elif B[i] != num:
rotations_B += 1
return min(rotations_A, rotations_B)
|
[
"[email protected]"
] | |
59ab5667a34c44fdb895072c8f91f93182bc126b
|
acf314ab0fa399018764b2ebd96e33c66362994e
|
/0x0F-python-object_relational_mapping/1-filter_states.py
|
3db5e1107d420d574d6614b5ae1f741eb6da16ad
|
[] |
no_license
|
glyif/holbertonschool-higher_level_programming
|
98f9c2da0b71a4e9e2dd9f6fde755875e9015f34
|
14c02d79e2008db1b992b08f9faa55b20dbe0691
|
refs/heads/master
| 2021-01-20T06:53:16.179354 | 2017-09-28T18:14:12 | 2017-09-28T18:14:12 | 89,939,980 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 975 |
py
|
#!/usr/bin/python3
"""
mysqldb filter states
"""
import sys
import MySQLdb
def db_connection(user_name, password, db_name, host="localhost"):
"""
db_connection - connects to db
:param user_name: username
:param password: password
:param db_name: database name
:param host: host - default to localhost
:return: db
"""
db = MySQLdb.connect(host=host,
user=user_name,
passwd=password,
db=db_name)
return db
def db_query(db, query):
"""
db_query - queries database
:param db: database
:param query: query
:return: none
"""
cur = db.cursor()
cur.execute(query)
data = cur.fetchall()
for row in data:
print(row)
if __name__ == "__main__":
db = db_connection(sys.argv[1], sys.argv[2], sys.argv[3])
db_query(db, """SELECT id, name FROM states
WHERE name LIKE 'N%' ORDER BY states.id ASC""")
|
[
"[email protected]"
] | |
2b6dbf579ae37711f46b26057e43ff7b642659e2
|
77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5
|
/chelseashin/ProblemSolving/2156_포도주시식.py
|
940cea71221cff29f679eb73ae27638dc45e2bad
|
[] |
no_license
|
chelseashin/AlgorithmStudy2021
|
786f03c4c17bc057518d428481e7d710d24ec98e
|
1a4744a621ed25715fc9060c5224f0b1092d9c00
|
refs/heads/master
| 2023-06-22T22:27:47.289806 | 2021-07-28T02:54:22 | 2021-07-28T02:54:22 | 326,441,667 | 1 | 5 | null | 2021-06-29T01:27:40 | 2021-01-03T15:44:16 |
Python
|
UTF-8
|
Python
| false | false | 848 |
py
|
# 참고 : https://pacific-ocean.tistory.com/152
# https://claude-u.tistory.com/204
# dp[i]의 최댓값을 구하는 것은 세 가지 방법에 의해 결정된다.
# 1) OXOO: 연속 두 개
# 2) OXO: 하나 띄고 한 개
# 3) X: i 번째를 마시지 않는 경우
from sys import stdin
input = stdin.readline
n = int(input())
a = [0] + [int(input()) for _ in range(n)]
dp = [0, a[1]]
if n > 1:
dp.append(a[1] + a[2])
for i in range(3, n+1):
dp.append(max(dp[i-1],
dp[i-3]+a[i-1]+a[i],
dp[i-2]+a[i]))
# print(n, a, dp)
print(dp[n])
# 위와 같은 방법
# wine = [0] + [int(input()) for _ in range(n)]
# dp = [0] * (n+1)
# dp[1] = wine[1]
# if n > 1:
# dp[2] = wine[1] + wine[2]
# for i in range(3, n+1):
# dp[i] = max(dp[i-3]+wine[i-1]+wine[i], dp[i-2]+wine[i], dp[i-1])
#
# print(dp[n])
|
[
"[email protected]"
] | |
0324dd3dc62f88495cb95ea7424deef660c43536
|
e2e39726195c7bc075b9bd56e757acd136527d5c
|
/typings/vtkmodules/vtkIOXML/__init__.pyi
|
5a9a78845d01dba6a9f4391c9438656d0f13da23
|
[
"BSD-3-Clause"
] |
permissive
|
gen4438/vtk-python-stubs
|
a652272183d2d1ee48d4639e86bcffc1ac454af0
|
c9abd76362adf387af64ce5ddbd04c5d3bebe9da
|
refs/heads/main
| 2023-04-04T02:13:15.459241 | 2021-04-15T10:47:28 | 2021-04-15T10:53:59 | 358,224,363 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,682 |
pyi
|
"""
This type stub file was generated by pyright.
"""
import vtkmodules.vtkCommonCore as __vtkmodules_vtkCommonCore
import vtkmodules.vtkCommonExecutionModel as __vtkmodules_vtkCommonExecutionModel
import vtkmodules.vtkIOXMLParser as __vtkmodules_vtkIOXMLParser
from .vtkXMLReader import vtkXMLReader
from .vtkXMLDataReader import vtkXMLDataReader
from .vtkXMLUnstructuredDataReader import vtkXMLUnstructuredDataReader
from .vtkXMLPolyDataReader import vtkXMLPolyDataReader
from .vtkRTXMLPolyDataReader import vtkRTXMLPolyDataReader
from .vtkXMLCompositeDataReader import vtkXMLCompositeDataReader
from .vtkXMLWriter import vtkXMLWriter
from .vtkXMLCompositeDataWriter import vtkXMLCompositeDataWriter
from .vtkXMLDataObjectWriter import vtkXMLDataObjectWriter
from .vtkXMLDataSetWriter import vtkXMLDataSetWriter
from .vtkXMLFileReadTester import vtkXMLFileReadTester
from .vtkXMLGenericDataObjectReader import vtkXMLGenericDataObjectReader
from .vtkXMLHierarchicalBoxDataFileConverter import vtkXMLHierarchicalBoxDataFileConverter
from .vtkXMLUniformGridAMRReader import vtkXMLUniformGridAMRReader
from .vtkXMLHierarchicalBoxDataReader import vtkXMLHierarchicalBoxDataReader
from .vtkXMLUniformGridAMRWriter import vtkXMLUniformGridAMRWriter
from .vtkXMLHierarchicalBoxDataWriter import vtkXMLHierarchicalBoxDataWriter
from .vtkXMLMultiBlockDataReader import vtkXMLMultiBlockDataReader
from .vtkXMLMultiGroupDataReader import vtkXMLMultiGroupDataReader
from .vtkXMLHierarchicalDataReader import vtkXMLHierarchicalDataReader
from .vtkXMLHyperTreeGridReader import vtkXMLHyperTreeGridReader
from .vtkXMLHyperTreeGridWriter import vtkXMLHyperTreeGridWriter
from .vtkXMLStructuredDataReader import vtkXMLStructuredDataReader
from .vtkXMLImageDataReader import vtkXMLImageDataReader
from .vtkXMLStructuredDataWriter import vtkXMLStructuredDataWriter
from .vtkXMLImageDataWriter import vtkXMLImageDataWriter
from .vtkXMLMultiBlockDataWriter import vtkXMLMultiBlockDataWriter
from .vtkXMLPartitionedDataSetCollectionReader import vtkXMLPartitionedDataSetCollectionReader
from .vtkXMLPartitionedDataSetCollectionWriter import vtkXMLPartitionedDataSetCollectionWriter
from .vtkXMLPartitionedDataSetReader import vtkXMLPartitionedDataSetReader
from .vtkXMLPartitionedDataSetWriter import vtkXMLPartitionedDataSetWriter
from .vtkXMLPDataObjectReader import vtkXMLPDataObjectReader
from .vtkXMLPDataReader import vtkXMLPDataReader
from .vtkXMLPHyperTreeGridReader import vtkXMLPHyperTreeGridReader
from .vtkXMLPStructuredDataReader import vtkXMLPStructuredDataReader
from .vtkXMLPImageDataReader import vtkXMLPImageDataReader
from .vtkXMLUnstructuredDataWriter import vtkXMLUnstructuredDataWriter
from .vtkXMLPolyDataWriter import vtkXMLPolyDataWriter
from .vtkXMLPUnstructuredDataReader import vtkXMLPUnstructuredDataReader
from .vtkXMLPPolyDataReader import vtkXMLPPolyDataReader
from .vtkXMLPRectilinearGridReader import vtkXMLPRectilinearGridReader
from .vtkXMLPStructuredGridReader import vtkXMLPStructuredGridReader
from .vtkXMLPTableReader import vtkXMLPTableReader
from .vtkXMLPUnstructuredGridReader import vtkXMLPUnstructuredGridReader
from .vtkXMLRectilinearGridReader import vtkXMLRectilinearGridReader
from .vtkXMLRectilinearGridWriter import vtkXMLRectilinearGridWriter
from .vtkXMLStructuredGridReader import vtkXMLStructuredGridReader
from .vtkXMLStructuredGridWriter import vtkXMLStructuredGridWriter
from .vtkXMLTableReader import vtkXMLTableReader
from .vtkXMLTableWriter import vtkXMLTableWriter
from .vtkXMLUnstructuredGridReader import vtkXMLUnstructuredGridReader
from .vtkXMLUnstructuredGridWriter import vtkXMLUnstructuredGridWriter
__loader__ = ...
__spec__ = ...
|
[
"[email protected]"
] | |
ee9cee9c908ac3278c8545a66f4d96149faae702
|
7ce05272d21c903abc85ebc74544009aacd80c82
|
/Advance_Python/Python_Database_Programming/Other/add_user_in_bank.py
|
5c2f984a2d9a435280b32ffcf34ffcf45b74ed87
|
[] |
no_license
|
sachinyadav3496/PythonInternBatch2018
|
8899a866f60a39b4c7eff4f5bc79ec2586833403
|
8e2610ad80c39ea747e8a6547ebe540e7b019a79
|
refs/heads/master
| 2021-06-26T09:18:58.178457 | 2020-10-03T09:49:32 | 2020-10-03T09:49:32 | 136,880,809 | 18 | 34 | null | 2020-10-03T09:49:33 | 2018-06-11T05:56:26 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 548 |
py
|
import pymysql as sql
db = sql.connect(host='localhost',port=3306,user='bank_app',password='redhat',database='bank_app')
c = db.cursor()
f = open('bank_data.csv')
data = []
for line in f :
d = line.split(',')
d[2] = float(d[2][:-1])
data.append(d)
f.close()
for var in data :
name = var[0]
password = var[1]
bal = var[2]
cmd = "insert into bank(user,password,bal) values('{}','{}',{})".format(name,password,bal)
c.execute(cmd)
db.commit()
print("Added data to bank sucessfully")
c.close()
db.close()
|
[
"[email protected]"
] | |
073b6b152e0805dbc16dce1d402482e505bd9770
|
9d5723c09148cc353e5339a706ba582a162dceec
|
/hunkim/lab12-5.py
|
822c653469deeadaddde45a16a92e53b9bc3eaab
|
[] |
no_license
|
SilverQ/dl_study
|
424bce279c059c290a4c766e87fadb150fff82da
|
663b432abc5afd0eed278368a5fea19ece6a383c
|
refs/heads/master
| 2022-11-14T08:27:10.937535 | 2020-07-02T10:05:04 | 2020-07-02T10:05:04 | 82,505,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,120 |
py
|
'''
The original script shows how to predict the next day's closing stock prices using a basic RNN
https://github.com/hunkim/DeepLearningZeroToAll/blob/master/lab-12-5-rnn_stock_prediction.py
At first, let's understand the original code and prior arts completely
'''
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
tf.set_random_seed(777) # reproducibility
np.set_printoptions(precision=2)
if "DISPLAY" not in os.environ:
# remove Travis CI Error
matplotlib.use('Agg')
def MinMaxScaler(data):
'''
Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
원래는 normalized data만 반환하였으나, 데이터의 복구를 위해 min, max도 반환
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return [numerator / (denominator + 1e-7), np.min(data, 0), np.max(data, 0)]
# train Parameters
seq_length = 7
data_dim = 5
hidden_dim = 10
output_dim = 1
learning_rate = 0.01
iterations = 500
# Open, High, Low, Volume, Close
xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
xy_rev = xy[::-1] # reverse order (chronically ordered), 날짜 오름차순으로.
'''
print('xy: ', xy[-3:])
xy: [[ 566.89 567. 556.93 10800. 556.97]
[ 561.2 566.43 558.67 41200. 559.99]
[ 568. 568. 552.92 13100. 558.46]]
print('xy_rev: ', xy_rev[:3])
xy: [[ 568. 568. 552.92 13100. 558.46]
[ 561.2 566.43 558.67 41200. 559.99]
[ 566.89 567. 556.93 10800. 556.97]]
'''
# split data to train_set/test_set and Scaling
train_size = int(len(xy_rev) * 0.7)
train_set = xy_rev[0:train_size]
test_set = xy_rev[train_size - seq_length:] # Index from [train_size - seq_length] to utilize past sequence
[train_set, min, max] = MinMaxScaler(train_set)
[test_set, min, max] = MinMaxScaler(test_set)
'''
print('train_set: ', train_set[:3])
print('min: ', min) # 컬럼별로 min-max 연산은 따로따로 한 것을 알 수 있음.!!!
train_set: [[0.25 0.25 0.23 0. 0.23]
[0.23 0.24 0.25 0. 0.24]
[0.25 0.24 0.25 0. 0.23]]
min: [ 494.65 495.98 487.56 7900. 492.55]
'''
# build datasets. Create batch for 7-days.
def build_dataset(time_series, seq_length):
dataX = []
dataY = []
for i in range(0, len(time_series) - seq_length):
_x = time_series[i:i + seq_length, :]
_y = time_series[i + seq_length, [-1]] # the next day's closing stock prices
# print(_x, "->", _y)
dataX.append(_x)
dataY.append(_y)
return np.array(dataX), np.array(dataY)
trainX, trainY = build_dataset(train_set, seq_length)
testX, testY = build_dataset(test_set, seq_length)
'''
print('trainX: ', trainX[:4])
print('trainY: ', trainY[:3])
trainX: [[[2.53e-01 2.45e-01 2.34e-01 4.66e-04 2.32e-01]
[2.30e-01 2.40e-01 2.55e-01 2.98e-03 2.37e-01]
[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]]
[[2.30e-01 2.40e-01 2.55e-01 2.98e-03 2.37e-01]
[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]]
[[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]
[1.65e-01 2.01e-01 1.93e-01 2.82e-01 2.20e-01]]
[[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]
[1.65e-01 2.01e-01 1.93e-01 2.82e-01 2.20e-01]
[2.24e-01 2.36e-01 2.34e-01 2.98e-01 2.52e-01]]]
trainY: [[0.16]
[0.22]
[0.25]]
'''
# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, 1])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(
outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
# cost/loss
loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
# RMSE
targets = tf.placeholder(tf.float32, [None, 1])
predictions = tf.placeholder(tf.float32, [None, 1])
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# Training step
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={
X: trainX, Y: trainY})
if i % 100 ==0:
print("[step: {}] loss: {}".format(i, step_loss))
# Test step
test_predict = sess.run(Y_pred, feed_dict={X: testX})
rmse_val = sess.run(rmse, feed_dict={
targets: testY, predictions: test_predict})
print("RMSE: {}".format(rmse_val))
# Plot predictions
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
# plt.show()
plt.savefig('Stock_price.png')
|
[
"[email protected]"
] | |
f5b2ea2f20edbedb90a3351960045e897c52f2c3
|
db98aeb4883d2aa9969970d353b9d6212c7dbde2
|
/lectures/07-python-dictionaries/examples/dna9.py
|
f71e08b870a9f5ce84946e6c88096ad74de04bfa
|
[
"MIT"
] |
permissive
|
qianwenluo/biosys-analytics
|
cec7e84477e01f9aa17e30c1fd8286710deed617
|
f936095931fa8f237de8bdf058b960db86effa49
|
refs/heads/master
| 2020-04-15T20:19:25.669143 | 2019-05-07T17:52:17 | 2019-05-07T17:52:17 | 164,988,099 | 0 | 1 |
MIT
| 2019-01-10T04:12:20 | 2019-01-10T04:12:20 | null |
UTF-8
|
Python
| false | false | 478 |
py
|
#!/usr/bin/env python3
"""Tetra-nucleotide counter"""
import sys
import os
from collections import defaultdict
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} DNA'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
arg = args[0]
dna = ''
if os.path.isfile(arg):
dna = ''.join(open(arg).read().splitlines())
else:
dna = arg
count = defaultdict(int)
for base in dna.lower():
count[base] += 1
print(' '.join(map(lambda b: str(count[b]), "acgt")))
|
[
"[email protected]"
] | |
908d6b9bdd11c832f27b876675752c230f0dd8e9
|
901bfc797cc369c0bea21167ac471d0311cb93ac
|
/e3/DiffieHellman.py
|
bf11a36e45a5949541a91c675a66430dd0b9b984
|
[
"MIT"
] |
permissive
|
NigrumAquila/subject_cryptographic_protection
|
022216fd1481febc3a010efdfd11ab3398c73d00
|
2b4015b3c1b6d57391e866a70d308e78e5cab719
|
refs/heads/master
| 2021-03-17T15:58:10.590822 | 2020-05-01T06:30:54 | 2020-05-01T06:30:54 | 247,001,657 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 858 |
py
|
import __main__
if __main__.__file__ != 'main.py':
exit('run main.py')
from .DHlib.DHalg import encrypt, decrypt, getSharedSecret, printAllKeys, printParams
from lib.colors import *
from lib.duty import *
key = getSharedSecret()
printAllKeys()
while True:
printParams();
message = typedText('Enter message for RSA encryption: ')
printTextAndValue('Original message: ', message)
encrypted_message = encrypt(key, message)
try:
printTextAndValue('Encrypted message: ', encrypted_message)
except UnicodeError:
warning('\rYour encoding isn\'t UTF-8')
end('Please, restart it with "PYTHONIOENCODING=UTF-8 python main.py" or by IDE with utf8 encoding')
decrypted_message = decrypt(key, encrypted_message)
printTextAndValue('Decrypted message: ', decrypted_message)
repeatProcedure()
|
[
"[email protected]"
] | |
2ed5006395d6e55cc012484b9d82f09f074e11cf
|
8fc2ab3d29a30e603e19b30bb9517928de529167
|
/hackerank_whatsnext.py
|
2d44363c21597514612dc972cc035e6441f66752
|
[] |
no_license
|
rushilchugh/Practise
|
35a9861bec6786580dc0a440eb25d78e43cb7bc9
|
98fd593b95dad641bef1d519c6c6ed1daaae630f
|
refs/heads/master
| 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,765 |
py
|
__author__ = 'Rushil'
#SetCount(x) - Number of ones in an binary number x
#Johnny wants to find a binary number, D, that is the smallest binary number >B where setCount(B) = setCount(D)
#He then wants to compress D into an array of integers,C (in the same way that integer array A contains the compressed form of binary string B).
#Values in even represents consecutive 1
#Values in odd represents consecutive 0
from itertools import groupby
import re
#Given input 4 1 3 2 4
def get_bin_rep(num):
inp_text = num.replace(' ','')
f_str = ''
for index,char in enumerate(inp_text):
if index % 2 == 0:
f_str += '1'*int(char)
else:
f_str += '0'*int(char)
return f_str
def get_other_bin(bin_num):
occ_0 = 0
bin_num = list(bin_num)
if bin_num[-1] == '0':
f1_index = ''.join(bin_num).rfind('1')
bin_num[-1] = '1'
bin_num[f1_index] = '0'
return ''.join(bin_num)
for index,i in enumerate(bin_num):
if i == '0':
occ_0 = index
bin_num[occ_0] = '1'
bin_num[occ_0 + 1] = '0'
return ''.join(bin_num)
def make_rep(bin_num):
#11110111010111
f_str = ''
for i,j in groupby(bin_num):
f_str += str(len(list(j)))
f_str += ' '
return f_str
#
#print(get_other_bin('11110111001111'))
#print(make_rep('11110111001111'))
#print(make_rep(get_other_bin(get_bin_rep('4 1 3 2 4'))))
n = int(input().strip())
m_list = []
for i in range(n):
w_len = input().strip()
m_word = input().strip()
m_list.append(m_word)
for i in m_list:
f_sol = make_rep(get_other_bin(get_bin_rep(i)))
print(len(f_sol))
print(f_sol)
|
[
"[email protected]"
] | |
2ae309ab7516c2e17c6d104bf77aa92bce5dbd7d
|
26e91aead18d0fad6f5ce8fc4adf7d8e05a2f07f
|
/byceps/services/board/models/topic.py
|
ddc5451016c5f326ba92595817d09bd24677a035
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leathe/byceps
|
40c1f8a1aab3521fcac45d88eab6364d448d4e67
|
cd0c618af63fed1cd7006bb67da46eac0ddbb1c7
|
refs/heads/master
| 2020-12-02T09:02:51.087511 | 2019-12-14T17:00:22 | 2019-12-14T17:00:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,529 |
py
|
"""
byceps.services.board.models.topic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from sqlalchemy.ext.associationproxy import association_proxy
from ....blueprints.board.authorization import (
BoardPermission,
BoardTopicPermission,
)
from ....database import BaseQuery, db, generate_uuid
from ....typing import UserID
from ....util.instances import ReprBuilder
from ...authentication.session.models.current_user import CurrentUser
from ...user.models.user import User
from ..transfer.models import CategoryID
from .category import Category
class TopicQuery(BaseQuery):
def for_category(self, category_id: CategoryID) -> BaseQuery:
return self.filter_by(category_id=category_id)
def only_visible_for_user(self, user: CurrentUser) -> BaseQuery:
"""Only return topics the user may see."""
if not user.has_permission(BoardPermission.view_hidden):
return self.without_hidden()
return self
def without_hidden(self) -> BaseQuery:
"""Only return topics every user may see."""
return self.filter(Topic.hidden == False)
class Topic(db.Model):
"""A topic."""
__tablename__ = 'board_topics'
query_class = TopicQuery
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
category_id = db.Column(db.Uuid, db.ForeignKey('board_categories.id'), index=True, nullable=False)
category = db.relationship(Category)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
creator_id = db.Column(db.Uuid, db.ForeignKey('users.id'), nullable=False)
title = db.Column(db.UnicodeText, nullable=False)
posting_count = db.Column(db.Integer, default=0, nullable=False)
last_updated_at = db.Column(db.DateTime, default=datetime.utcnow)
last_updated_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
last_updated_by = db.relationship(User, foreign_keys=[last_updated_by_id])
hidden = db.Column(db.Boolean, default=False, nullable=False)
hidden_at = db.Column(db.DateTime)
hidden_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
hidden_by = db.relationship(User, foreign_keys=[hidden_by_id])
locked = db.Column(db.Boolean, default=False, nullable=False)
locked_at = db.Column(db.DateTime)
locked_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
locked_by = db.relationship(User, foreign_keys=[locked_by_id])
pinned = db.Column(db.Boolean, default=False, nullable=False)
pinned_at = db.Column(db.DateTime)
pinned_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
pinned_by = db.relationship(User, foreign_keys=[pinned_by_id])
initial_posting = association_proxy('initial_topic_posting_association', 'posting')
posting_limited_to_moderators = db.Column(db.Boolean, default=False, nullable=False)
def __init__(
self, category_id: CategoryID, creator_id: UserID, title: str
) -> None:
self.category_id = category_id
self.creator_id = creator_id
self.title = title
def may_be_updated_by_user(self, user: CurrentUser) -> bool:
return (
(
not self.locked
and user.id == self.creator_id
and user.has_permission(BoardTopicPermission.update)
)
or user.has_permission(BoardPermission.update_of_others)
)
@property
def reply_count(self) -> int:
return self.posting_count - 1
def count_pages(self, postings_per_page: int) -> int:
"""Return the number of pages this topic spans."""
full_page_count, remaining_postings = divmod(
self.posting_count, postings_per_page
)
if remaining_postings > 0:
return full_page_count + 1
else:
return full_page_count
def __eq__(self, other) -> bool:
return self.id == other.id
def __repr__(self) -> str:
builder = ReprBuilder(self) \
.add_with_lookup('id') \
.add('category', self.category.title) \
.add_with_lookup('title')
if self.hidden:
builder.add_custom(f'hidden by {self.hidden_by.screen_name}')
if self.locked:
builder.add_custom(f'locked by {self.locked_by.screen_name}')
if self.pinned:
builder.add_custom(f'pinned by {self.pinned_by.screen_name}')
return builder.build()
|
[
"[email protected]"
] | |
a4315be2234838908f42b3d2d0d042647f384c92
|
a80884040ce1c178274a3068d216f440dd541844
|
/tests/regression/test_tee_map_completion.py
|
07635a9b627669bb5320d9242fc4ef3be123bf53
|
[
"MIT"
] |
permissive
|
maki-nage/rxsci
|
a4aae51edc1ef684b55df22e34c11aa1d54ef740
|
915e59ebf593c4b313265bb87cf0e1209ec2ee0f
|
refs/heads/master
| 2023-01-19T14:32:11.638497 | 2023-01-17T08:06:35 | 2023-01-17T08:06:35 | 242,592,973 | 9 | 2 |
MIT
| 2022-11-08T21:54:16 | 2020-02-23T21:23:56 |
Python
|
UTF-8
|
Python
| false | false | 464 |
py
|
import rx
import rx.operators as ops
import rxsci as rs
def test_completion():
data = [1, 2, 3]
actual_data = []
actual_completed = []
rx.from_(data).pipe(
rs.ops.tee_map(
ops.count(),
rs.math.sum(reduce=True),
)
).subscribe(
on_next=actual_data.append,
on_completed=lambda: actual_completed.append(True)
)
assert actual_completed == [True]
assert actual_data == [(3, 6)]
|
[
"[email protected]"
] | |
28d7d37773b000b74a0651e75715b1992064c925
|
5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e
|
/python_basics/python_fundamentals/functionsIntermediate/functionsIntermediate1.py
|
d4c0b59e7de7fe8d6ba8eb493267361224b8c5de
|
[] |
no_license
|
eDiazGtz/pythonLearning
|
06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8
|
57d7b2292cf5d9769cce9adf765962c3c0930d6c
|
refs/heads/master
| 2023-06-18T02:16:09.293375 | 2021-05-03T18:09:52 | 2021-05-03T18:09:52 | 335,090,531 | 0 | 0 | null | 2021-05-03T18:09:53 | 2021-02-01T21:35:24 |
Python
|
UTF-8
|
Python
| false | false | 1,200 |
py
|
import random
# random.random() returns a random floating number between 0.000 and 1.000
# random.random() * 50 returns a random floating number between 0.000 and 50.000
# random.random() * 25 + 10 returns a random floating number between 10.000 and 35.000
# round(num) returns the rounded integer value of num 0.5 round up
#print(randInt()) # should print a random integer between 0 to 100
#print(randInt(max=50)) # should print a random integer between 0 to 50
#print(randInt(min=50)) # should print a random integer between 50 to 100
#print(randInt(min=50, max=500)) # should print a random integer between 50 and 500
def randInt(min=0, max=100):
range = max - min
if(range < 0):
return "Min must be less than Max; Max must be greater than 0"
num = round(random.random() * range + min)
return num
print(randInt()) # should print a random integer between 0 to 100
print(randInt(max=50)) # should print a random integer between 0 to 50
print(randInt(min=50)) # should print a random integer between 50 to 100
print(randInt(min=50, max=500)) # should print a random integer between 50 and 500
|
[
"[email protected]"
] | |
c8770ff0014c49e8aef32a4df572380d038204df
|
23f3349e8b50f0cb3e461bbd65c1ea8dec792d0b
|
/2_semestr/lec_05.py
|
683d1a2c6fabbc138893e0adaa2d19cb1db944a8
|
[] |
no_license
|
JacobLutin/Python_bmstu
|
d17866dbab0e74f0f9d600c4dbd9d53eb5c5b7be
|
66fd8679de7556978b9cd1e9fd8646a8d7d6daa8
|
refs/heads/master
| 2020-05-29T14:40:09.310602 | 2017-03-27T05:18:58 | 2017-03-27T05:18:58 | 64,742,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
import numpy as np
a = np.arange(12)
a1 = np.copy(a)
print("Исходная матрицы")
a2 = np.reshape(a1, (3, 4))
print(a2, '\n')
a2 = a2.T
print("Транспонированная матрица")
print(a2, '\n')
#min, max, sum, сортировка
b = np.array([[2, 8, 0], [6, 1, 3], [4, 7, 5]])
print("Новая исходная матрица\n", b, '\n')
dsum = b.sum()
dmin = b.min()
dmax = b.max()
print('Некоторые значения для всей матрицы')
print('sum=', dsum, ' min=', dmin, ' max=', dmax, '\n')
mincol = b.min(axis=0)
maxrow = b.max(axis=1)
print('Значения min и max для столбцов и строк')
print('min в столбцах = ', mincol, ' max в строках = ', maxrow, '\n')
# Функция sort описание
# sort(axis=-1, kind='quicksort', order=None)
# axis - ось, по которой идет сортировка.
# kind - тпи сортировки. Возможные значения 'quicksort', 'mergesort', 'heapsort'
c = b.copy()
c.sort(axis=0, kind='mergesort')
print('Сортировка столбцов\n', c)
print()
c = b.copy()
c.sort(axis=1, kind='mergesort')
print('Сортировка строк\n', c)
print()
|
[
"[email protected]"
] | |
dcf32321584fe37884e0f4817db5a71e31b2c2c1
|
b0c2f67b2878a312c6e6ffa5fe4158bd55dad69c
|
/chap4/exp4.1.py
|
55fe517c4d14c71d71fb13cf69c53d6a324056ee
|
[] |
no_license
|
loukey/pythonDemo
|
efda51be07beede0be2a8cdaae9b7e153bc790bc
|
9e6f64908ccba64d32ffc58edbb8d6f8ab6bf68d
|
refs/heads/master
| 2021-01-20T04:39:24.054749 | 2017-05-19T05:41:00 | 2017-05-19T05:41:00 | 89,711,134 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
py
|
# -*- coding: utf-8 -*-
#例4.1简单地打印数字
#很有趣的是,Python没有do..while以及while..until
#但是我们也可以强行实现
while True:
print 'Please enter a number:'
number = input()
print number
if number==0:
break
print 'List Ended'
|
[
"[email protected]"
] | |
e8cb6b230d208935d065fedcf70f0c591e8ba666
|
8bdd86dd0ae6b6f7aae17ff0ef2d887afd06d2fa
|
/examples/sharecuts.py
|
e78cac870e69efa96b9030c63a0ef69e72d5fb6a
|
[
"MIT"
] |
permissive
|
shuxiaokai/looter
|
b0504600e4d5730eff2aab27fbe19d2fd5fb1f18
|
2be094576e31fd13123719ca94e42cb31475dffa
|
refs/heads/master
| 2023-04-18T01:19:51.827004 | 2020-05-17T08:11:28 | 2020-05-17T08:11:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 849 |
py
|
"""
捷径社区的捷径排行榜
"""
from pprint import pprint
import requests
import looter as lt
domain = 'https://sharecuts.cn'
total = []
def crawl(url):
items = requests.get(url, headers=lt.DEFAULT_HEADERS).json()
for item in items:
data = {}
data['name'] = item['name']
data['category'] = item['Category']['name']
data['note'] = item['note']
data['author'] = item['User']['nickname']
data['url'] = item['url']
data['downloads'] = item['downloads_count']
data['votes'] = item['votes_count']
data['comments'] = item['comments_count']
pprint(data)
total.append(data)
if __name__ == '__main__':
task = f'{domain}/api/shortcuts/hot?offset=0&limit=1025'
crawl(task)
lt.save(total, name='sharecuts.csv', sort_by='votes', order='desc')
|
[
"[email protected]"
] | |
6075bf7ba52d2c689ce7e8d799b2bdfa2bb43e1b
|
4ad06bae18751fd71df145d126e3624ea90e05e6
|
/flat_sharp/interpolation.py
|
b8f5c77d79fd72a53d69363fa13955e41a1408be
|
[] |
no_license
|
daniellengyel/flat_sharp
|
04d82399e44d178e52c56acf1ba2ff3a75e4c27f
|
4a1e3f4abbebc7a5342aaa63080493b77aff5677
|
refs/heads/master
| 2021-04-22T16:14:26.238625 | 2020-07-09T13:03:44 | 2020-07-09T13:03:44 | 249,861,447 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,933 |
py
|
import numpy as np
from utils import *
from data_getters import *
from postprocessing import *
import copy
import torch
def interpolate_models(model1, model2, beta):
params1 = model1.named_parameters()
params2 = model2.named_parameters()
new_model = copy.deepcopy(model2)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in dict_new_params:
dict_new_params[name1].data.copy_((1. - beta) * param1.data + beta * dict_new_params[name1].data)
return new_model
def scale_output_model(model1, alpha):
if isinstance(model1, LeNet):
last_layer_names = ["fc3.weight", "fc3.bias"]
else:
last_layer_names = ["fc2.weight", "fc2.bias"]
params1 = model1.named_parameters()
new_model = copy.deepcopy(model1)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in last_layer_names:
dict_new_params[name1].data.copy_(alpha * param1.data)
return new_model
def T_alpha_models(model, num_inter_models, alpha_range):
inter_models_arr = []
alphas = np.linspace(alpha_range[0], alpha_range[1], num_inter_models)
for alpha in alphas:
params1 = model.named_parameters()
new_model = copy.deepcopy(model)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in dict_new_params:
dict_new_params[name1].data.copy_((1. - beta) * param1.data + beta * dict_new_params[name1].data)
inter_models_arr.append(curr_model)
return inter_models_arr
return new_model
def get_loss_grad(net, criterion, data):
inputs, labels = data
# Compute gradients for input.
inputs.requires_grad = True
net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs.float(), labels)
loss.backward(retain_graph=True)
param_grads = get_grad_params_vec(net)
return loss, torch.norm(param_grads)
def get_model_interpolate_arr(model_a, model_b, num_inter_models, beta_bound=None):
inter_models_arr = []
if beta_bound is None:
beta_bound = [0, 1]
betas = np.linspace(beta_bound[0], beta_bound[1], num_inter_models)
for beta in betas:
curr_model = interpolate_models(model_a, model_b, beta)
inter_models_arr.append(curr_model)
return inter_models_arr
def get_model_interpolate_2d(offset, v1, v2, num_inter_models, alpha_bound, beta_bound, func):
X = np.linspace(alpha_bound[0], alpha_bound[1], num_inter_models)
Y = np.linspace(beta_bound[0], beta_bound[1], num_inter_models)
v1_net = vec_to_net(v1, offset)
v2_net = vec_to_net(v2, offset)
v1_dict = dict(v1_net.named_parameters())
v2_dict = dict(v2_net.named_parameters())
val_arr = []
for x in X:
curr_arr = []
for y in Y:
curr_model = copy.deepcopy(offset)
dict_curr_model = dict(curr_model.named_parameters())
for name1, param1 in offset.named_parameters():
dict_curr_model[name1].data.copy_(dict_curr_model[name1].data + x * v1_dict[name1].data + y * v2_dict[name1].data)
to_append = func(curr_model)
curr_arr.append(to_append)
val_arr.append(curr_arr)
return val_arr
def project_onto(net, v1, v2, offset):
v1_norm = v1 / torch.norm(v1)
v2_norm = v2 / torch.norm(v2)
net_vect = get_params_vec(net) - get_params_vec(offset)
alpha = torch.matmul(v1_norm, net_vect)
beta = torch.matmul(v2_norm, net_vect)
return alpha, beta
def take_n_gd_steps(net, optimizer, criterion, data, n=1, get_grad=True, v1=None, v2=None, offset=None):
grads_arr = []
projections = []
if (v1 is not None) and (v2 is not None):
projections.append(project_onto(net, v1, v2, offset))
for _ in range(n):
inputs, labels = data
# Compute gradients for input.
inputs.requires_grad = True
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs.float(), labels)
loss.backward(retain_graph=True)
optimizer.step()
if (_ % 100) == 0:
print(_)
print(loss)
print()
if get_grad:
grads_arr.append(get_grad_params_vec(net))
if (v1 is not None) and (v2 is not None):
projections.append(project_onto(net, v1, v2, offset))
return net, grads_arr, projections
def do_the_do(model, optimizer, criterion, data_loader, num_inter_models, num_steps=1, beta_bound=None):
data = next(iter(data_loader))
model_a = copy.deepcopy(model)
model_b = take_n_gd_steps(model, optimizer, criterion, data, n=num_steps)
inter_models = get_model_interpolate_arr(model_a, model_b, num_inter_models, beta_bound=beta_bound)
return inter_models
exp_id = "1589992134.56161"
if __name__ == "__main__":
# get data
train_data, test_data = get_postprocessing_data(experiment_folder, vectorized=True)
train_loader = DataLoader(train_data, batch_size=10000, shuffle=True) # fix the batch size
test_loader = DataLoader(test_data, batch_size=len(test_data))
criterion = torch.nn.CrossEntropyLoss()
cfs_dict = exp_dict["stuff"]["configs"].loc[exp_id].to_dict()
nets = get_nets(cfs_dict)
optimizers = get_optimizers(cfs_dict)(nets)
inter_nets = []
for nn_idx in range(len(nets)):
inter_nets.append(do_the_do(nets[nn_idx], optimizers[nn_idx], criterion, train_loader, 20))
for nn_index in range(len(nets)):
y_val = inter_nets[nn_index][1][:, 1]
plt.plot(list(range(len(y_val))), y_val)
plt.show()
|
[
"[email protected]"
] | |
3c2914aeeb137940e884af34f7f4ae1b9a1cb124
|
306d2a92fb331aec6ddf0794b538d6e3385a0df9
|
/app/api/news/views.py
|
4821c74644481851dbbc7b49363e6c122d7dddf6
|
[] |
no_license
|
Zarinabonu/ForceApp
|
f343d3a52aee08890230c5425c9e238df99c5a7f
|
13f8e8613999c4850fc6f0bfcec66f897eecbe4a
|
refs/heads/master
| 2020-12-10T08:00:25.072289 | 2020-01-20T13:14:07 | 2020-01-20T13:14:07 | 233,540,795 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
py
|
from rest_framework.generics import ListAPIView
from app.api.news.serializers import NewsSerializer
from app.model import News
class NewsListAPIView(ListAPIView):
serializer_class = NewsSerializer
def get_queryset(self):
qs = News.objects.all()
v = self.request.GET.get('-views')
l_seen = self.request.GET.get('last_seen')
if v:
qs = qs.order_by('views')
if l_seen:
qs = qs.order_by('-created')
return qs
|
[
"[email protected]"
] | |
fa4627fc540dc0fe3e22751b5e32ea7167da0399
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc029/B/4327177.py
|
e0009a3dbc5483db0e2de0f6c57fab864e879af1
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
from collections import Counter
input()
A=list(map(int,input().split()))
A.sort(reverse=True)
C=Counter(A)
ans=0
for a in A:
if C[a]==0:
continue
C[a]-=1
t=2**a.bit_length()-a
if C[t]:
C[t]-=1
ans+=1
print(ans)
|
[
"[email protected]"
] | |
955d81f88ec11f246f6d9dcdd6f9b8a4d2744fe8
|
c8dc80a0082b2d59de0e1df24b25483f55510c57
|
/0x0F-python-object_relational_mapping/11-model_state_insert.py
|
55472dd9df2fb8c159784fb95664ea3259b05a3a
|
[] |
no_license
|
Daransoto/holbertonschool-higher_level_programming
|
51d81fac1dc3a6bd0799283332a3bcf5e2480330
|
4fa5f95b462f0e22b1e87189d162f0cb8c5625b6
|
refs/heads/master
| 2020-07-22T23:03:01.184032 | 2020-02-13T21:36:18 | 2020-02-13T21:36:18 | 207,358,507 | 0 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
#!/usr/bin/python3
""" Inserts Louisiana state. """
import sys
from sqlalchemy import create_engine
from model_state import Base, State
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
New = State(name="Louisiana")
session.add(New)
session.commit()
print(New.id)
session.close()
|
[
"[email protected]"
] | |
d5b2d0a9e571234c680d803851735c7c32986bee
|
62912bea20c56093f27fb2826e0f5f4a26a3ed0b
|
/symphony/cli/pyinventory/api/user.py
|
26b7cb869b430045ee70020b452c8fdb9a7edcd2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
sijad/magma
|
5c2b6520e207f05c29f29248627b90629f1f4088
|
78b5f16432d7070a84da74b90d4f1e3f8348fa37
|
refs/heads/master
| 2021-04-09T06:34:15.295104 | 2020-03-20T19:28:42 | 2020-03-20T19:31:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,129 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from typing import List
from ..client import SymphonyClient
from ..consts import Entity, User
from ..exceptions import EntityNotFoundError
from ..graphql.edit_user_input import EditUserInput
from ..graphql.edit_user_mutation import EditUserMutation
from ..graphql.user_query import UserQuery
from ..graphql.user_status_enum import UserStatus
from ..graphql.users_query import UsersQuery
USER_ROLE = 1
def get_user(client: SymphonyClient, email: str) -> User:
"""Returns `pyinventory.consts.User` object by its email
Args:
email: the email address the user registered with
Returns:
pyinventory.consts.User object
Raises:
EntityNotFoundError: the user was not found
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
```
"""
result = UserQuery.execute(client, email)
user = result.user
if user is None:
raise EntityNotFoundError(entity=Entity.User, entity_name=email)
return User(
id=user.id,
auth_id=user.authID,
email=user.email,
status=user.status,
role=user.role,
)
def add_user(client: SymphonyClient, email: str, password: str) -> User:
"""Adds new user to inventory with its email and password
Args:
email: the email address of the user
password: password the user would connect with
Returns:
pyinventory.consts.User object
Raises:
EntityNotFoundError: the user was not created properly
FailedOperationException: internal inventory error
AssertionError: The user was not created for some known reason
HTTPError: Error with connection
Example:
```
user = client.add_user("[email protected]", "P0ssW!rd0f43")
```
"""
resp = client.post(
"/user/async/",
{"email": email, "password": password, "role": USER_ROLE, "networkIDs": []},
)
if not resp.ok:
error_message = resp.json().get("error", None)
if error_message is not None:
raise AssertionError(error_message)
raise
return get_user(client, email)
def deactivate_user(client: SymphonyClient, user: User) -> None:
"""Deactivate the user which would prevent the user from login in to symphony
Users in symphony are never deleted. Only de-activated.
Args:
user: user to deactivate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
client.deactivate_user(user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.DEACTIVATED)
)
def activate_user(client: SymphonyClient, user: User) -> None:
"""Activate the user which would allow the user to login again to symphony
Args:
user: user to activate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
client.activate_user(user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.ACTIVE)
)
def get_users(client: SymphonyClient) -> List[User]:
"""Get the list of users in the system (both active and deactivate)
Returns:
list of `pyinventory.consts.User` objects
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_users()
for user in users:
print(user.email)
```
"""
result = UsersQuery.execute(client).users
if result is None:
return []
users = []
for edge in result.edges:
node = edge.node
if node is not None:
users.append(
User(
id=node.id,
auth_id=node.authID,
email=node.email,
status=node.status,
role=node.role,
)
)
return users
def get_active_users(client: SymphonyClient) -> List[User]:
"""Get the list of the active users in the system
Returns:
list of `pyinventory.consts.User` objects
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_active_users()
for user in users:
print(user.email)
```
"""
users = get_users(client)
return [user for user in users if user.status == UserStatus.ACTIVE]
|
[
"[email protected]"
] | |
fda194aff772871c7c4b2ea781497dc72cf05c8a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_cryings.py
|
62b48a889fcdd5f58f940b8aca110dd0c8ff2b83
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
from xai.brain.wordbase.verbs._cry import _CRY
#calss header
class _CRYINGS(_CRY, ):
def __init__(self,):
_CRY.__init__(self)
self.name = "CRYINGS"
self.specie = 'verbs'
self.basic = "cry"
self.jsondata = {}
|
[
"[email protected]"
] | |
ca010878792d0bc73fec72213f7db9f251dfd0e5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_bandaged.py
|
9ef009f331d8a4fe83aa934179c391e624957156
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
from xai.brain.wordbase.nouns._bandage import _BANDAGE
#calss header
class _BANDAGED(_BANDAGE, ):
def __init__(self,):
_BANDAGE.__init__(self)
self.name = "BANDAGED"
self.specie = 'nouns'
self.basic = "bandage"
self.jsondata = {}
|
[
"[email protected]"
] | |
cd846f89d90d6f2f5ce61fa895e49409d4e39009
|
604ffaf79c5f9c816bb1a2151ae33fbf29bca52b
|
/cloudstoragetui/keypress.py
|
6eaf37d5320d02c08c43e2c2b52b735c33eabb6f
|
[
"MIT"
] |
permissive
|
joeyism/cloud-storage-tui
|
1069092b51f1d11daa033ea5896b625e42e55691
|
8fda9bc8551756e88db706944489f1bbcc95a52c
|
refs/heads/master
| 2023-05-31T00:33:07.979555 | 2021-06-07T17:19:26 | 2021-06-07T17:19:26 | 352,346,198 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,773 |
py
|
import curses
from typing import List
from cloudstoragetui.constants import KEY_QUIT, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_ENTER, ESC, UP, DOWN, LEFT, RIGHT
from cloudstoragetui.draw import DrawnBox
from cloudstoragetui.cursor_state import CursorState
from cloudstoragetui.debug import log
def _extract_min_max(box):
min_y = box.top_left_y + 1
min_x = box.top_left_x + 1
max_y = box.length_y + box.top_left_y - 2
max_x = (box.index + 1) * box.length_x - 1
return (min_y, min_x, max_y, max_x)
def _eval_keypress(screen, key, boxes, cursor_state):
curs_y, curs_x = curses.getsyx()
box = boxes[cursor_state.column]
min_y, min_x, max_y, max_x = _extract_min_max(box)
action = None
if key in KEY_QUIT:
action = ESC
elif key in KEY_UP:
cursor_state.move_row_up(min_y)
screen.move(max(curs_y - 1, min_y), curs_x)
action = UP
elif key in KEY_DOWN:
cursor_state.move_row_down(max_y)
screen.move(min(curs_y + 1, max_y), curs_x)
action = DOWN
elif key in KEY_LEFT:
if curs_x == min_x:
cursor_state.move_column_left()
box = boxes[cursor_state.column]
min_y, min_x, max_y, max_x = _extract_min_max(box)
screen.move(min_y, min_x)
else:
screen.move(curs_y, max(curs_x - 1, min_x))
action = LEFT
elif key in KEY_RIGHT + KEY_ENTER:
cursor_state.move_column_right()
box = boxes[cursor_state.column]
screen.move(box.top_left_y + 1, box.top_left_x + 1)
action = RIGHT
screen.refresh()
return action
def eval_keypress(screen, key: int, boxes: List[DrawnBox], cursor_state: CursorState):
return _eval_keypress(screen, key, boxes, cursor_state)
|
[
"[email protected]"
] | |
8020760bc5cd0a1d148739c5991cea3a09beb85f
|
5aadc1f06bdb68a73bb003b23cc85af528d61bf4
|
/detection_network/src/rl/ppo.py
|
b71a185fbb72e8e16bc734d17634f6bdea14b165
|
[] |
no_license
|
zideajang/zi_full_self_drive_system
|
81dca2ca0541dfab7c021c6e3a0e58701bbf1693
|
fee2e4057619a19a585fbd8b9622f69c25946be1
|
refs/heads/master
| 2023-09-03T02:41:35.720600 | 2021-09-27T02:37:41 | 2021-09-27T02:37:41 | 358,083,188 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,307 |
py
|
import torch
import torch.optim as optim
class RolloutStorage:
def __init__(self,num_steps,num_processes,action_size):
pass
class PPO(object):
def __init__(
self,
controller,
clip_param,
lr,
baseline_decay,
action_size = 18,
ppo_epoch=1,
num_mini_batch=100,
max_grad_norm=2.0,
entropy_coef=0,
num_steps=100,
num_processes=1
):
self.ppo_epoch = ppo_epoch
self.controller = controller
self.optimizer = optim.Adam(controller.parameters(),lr=lr)
self.num_mini_batch = num_mini_batch
self.clip_param = clip_param
self.max_grad_norm = max_grad_norm
self.entropy_coef = entropy_coef
self.rollouts = RolloutStorage(num_steps,num_processes,action_size)
self.baseline = None
self.decay = baseline_decay
def state_dict(self):
return {
"baseline":self.baseline,
"rollouts":self.controller.state_dict(),
"optimizer:":self.optimizer.state_dict()
}
def load_state_dict(self,states):
pass
def update(self, sample, is_train=True):
reward, action, log_prob = sample
if self.baseline is None:
self.baseline = reward
else:
self.baseline = self.decay * self.baseline + (1 - self.decay) * reward
self.rollouts.insert(action, log_prob, reward)
if not is_train:
return -1,-1
advantages = self.rollouts.rewards - self.baseline
loss_epoch = 0
entropy_epoch = 0
for _ in range(self.ppo_epoch):
data_generator = self.rollouts.generator(advantages, self.num_mini_batch)
for sample in data_generator:
(
actions_batch,
reward_batch,
old_actions_log_probs_batch,
adv_targ,
) = sample
action_log_probs, entropy = self.controller.evaluate_actions(
actions_batch
)
ratio = torch.exp(
action_log_probs - torch.from_numpy(adv_targ)
)
adv_targ_th = torch.from_numpy(adv_targ).float()
|
[
"[email protected]"
] | |
1fe3fb6fa971710011542bc58df695cb0c6d7730
|
c3082eb2adc43b311dd3c9ff16fd3ed9df85f266
|
/python/examples/fastapi/dynamic-response/main.py
|
e9ecf608f59322153963fae72ce85a28b0f05e1f
|
[] |
no_license
|
szabgab/slides
|
78818c7138331b3ba9e221c81da3678a46efe9b3
|
63bba06678554db737602f2fbcd6510c36037e8a
|
refs/heads/main
| 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 |
Python
|
UTF-8
|
Python
| false | false | 164 |
py
|
from fastapi import FastAPI
import datetime
app = FastAPI()
@app.get("/")
async def root():
return {"message": f"Hello World at {datetime.datetime.now()}"}
|
[
"[email protected]"
] | |
a9582fe1ff3a16c1aa108f54b5ff1ae3984f5ccb
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_171/ch65_2019_06_07_01_24_18_525767.py
|
79aa0ff8a8f6334244edcac2ba434f9ec46d556f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 206 |
py
|
def acha_bigramas(string):
lista=[]
i=0
while i<len(string):
if string[i:i+2] not in lista and len(string[i:i+2])>3:
lista.append(string[i:i+2])
i+=1
return lista
|
[
"[email protected]"
] | |
c5248a3cae6dcafab9c6ad505abc712db1980a72
|
74ec860957869ea48af8535bf32f9fd87cc81011
|
/dna-methylation/scripts/develop/illumina450k/residuals/plot/scatter_comparison.py
|
22a26b4bf6f95cc4078b5e99f2a407784322a5a3
|
[] |
no_license
|
GillianGrayson/dna-methylation
|
f1a0878f4aa8c917bee9e5230387d6145826fb3a
|
e602ba91f3d275d92aadf0f874ac6f189adf547b
|
refs/heads/master
| 2022-02-08T03:31:22.423781 | 2022-02-01T16:50:37 | 2022-02-01T16:50:37 | 164,105,085 | 0 | 1 | null | 2020-03-20T18:08:24 | 2019-01-04T12:30:29 |
Python
|
UTF-8
|
Python
| false | false | 2,721 |
py
|
import pydnameth as pdm
import pandas as pd
import os.path
from scripts.develop.routines import *
max_rows = 10
fn = 'scatter_comparison_rows.xlsx'
rows_dict = {}
if os.path.isfile(fn):
df = pd.read_excel(fn)
tmp_dict = df.to_dict()
for key in tmp_dict:
curr_dict = tmp_dict[key]
rows_dict[key] = list(curr_dict.values())
fn = 'scatter_comparison_cols.xlsx'
cols_dict = {}
if os.path.isfile(fn):
df = pd.read_excel(fn)
tmp_dict = df.to_dict()
for key in tmp_dict:
curr_dict = tmp_dict[key]
cols_dict[key] = list(curr_dict.values())
data_bases = cols_dict['data_bases']
data_list = []
annotations_list = []
attributes_list = []
observables_list = []
data_params_list = []
for data_base in data_bases:
data = pdm.Data(
path='',
base=data_base
)
data_list.append(data)
annotations = pdm.Annotations(
name='annotations',
type='450k',
exclude='bad_cpgs',
select_dict={
'CHR': ['-X', '-Y']
}
)
annotations_list.append(annotations)
observables = pdm.Observables(
name='observables',
types={}
)
cells = pdm.Cells(
name='cells',
types='any'
)
target = get_target(data.base)
obs = get_observables_list(data.base)
data_params = get_data_params(data.base)
data_params['cells'] = ['Bcell', 'CD4T', 'CD8T', 'Gran', 'NK']
data_params['observables'] = ['gender']
attributes = pdm.Attributes(
target='age',
observables=observables,
cells=cells
)
attributes_list.append(attributes)
observables_list.append(obs)
data_params_list.append(data_params)
for run_id in range(0, len(rows_dict['items']), max_rows):
s_id = run_id
f_id = min(s_id + max_rows, len(rows_dict['items']))
curr_dict = {}
for key in rows_dict:
curr_dict[key] = rows_dict[key][s_id:f_id][::-1]
pdm.residuals_plot_scatter_comparison(
data_list=data_list,
annotations_list=annotations_list,
attributes_list=attributes_list,
observables_list=observables_list,
data_params_list=data_params_list,
rows_dict=curr_dict,
cols_dict=cols_dict,
method_params={
'line': 'no',
'fit': 'yes',
'semi_window': 4,
'box_b': 'Q1',
'box_t': 'Q99',
'legend_size': 1,
'add': 'none'
}
# method_params = {
# 'line': 'no',
# 'fit': 'no',
# 'semi_window': 4,
# 'box_b': 'Q1',
# 'box_t': 'Q99',
# 'legend_size': 1,
# 'add': 'none'
# }
)
|
[
"[email protected]"
] | |
8df0a5de30770486d65f5750ddf7332158529917
|
385ce240ae264a1449079c21bd0c4cbe7c0fe3b8
|
/myowntests/ifelseladder.py
|
6be8802a946907017b902d6c6c70418b5968deb2
|
[] |
no_license
|
Maxcousin123/Python-workspace
|
3ed60ae80d790b5c055bf47872ff0fdd39f4ec58
|
326b023190a12e082dcb35ae5ab8ef644c32159b
|
refs/heads/master
| 2022-11-24T11:05:08.707003 | 2020-07-29T06:32:08 | 2020-07-29T06:32:08 | 283,415,557 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
maths=int(input('fill your math grade'))
physics=int(input('fill your physics grade'))
chemistry=int(input('fill your chemistry grade'))
av=(maths+physics+chemistry)/3
if maths<35:
print('Exam Failed')
else:print('Exam passed')
if physics<35:
print('Exam failed')
else:print('Exam passed')
if physics<35:
print('Exam failed')
else:print('Exam passed')
if maths and physics and chemistry<35:
print('Exams failed')
elif av<=59:
print('your grade is c')
elif 59>av<=69:
print('your grade is b')
else:
print('your grade is a')
#69
|
[
"[email protected]"
] | |
c737af5d1ff073a22f5a3aaaf91937cb8797fb95
|
8164fd930d78efbd3885198efbfd9692c585319b
|
/week7/CrawWeb/craw_web.py
|
b837f0305e260bb319dc6622a4866529aa9c6f96
|
[] |
no_license
|
kobso1245/Hack_BG
|
7a7b7524b20fada3d9856a583e02c6959d442e66
|
7ffdb8ccefd67aeca5a49c9a9354e65c77149ad4
|
refs/heads/master
| 2020-05-31T17:43:10.316633 | 2015-10-05T21:43:20 | 2015-10-05T21:43:20 | 30,460,888 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
from Crawler import *
from Histogram import *
from Plotter import *
if __name__ == '__main__':
craw("http://register.start.bg/", "histogram2")
plot("path_to_database_file/websites.db")
|
[
"[email protected]"
] | |
414d29786eb51284f28473d7090b7778c546c6c3
|
dd860973103347b382d8a04ef68a9376561725ea
|
/wazimap_ng/profile/serializers/highlights_serializer.py
|
003ee147dc77b5daa5f28a8c58f772775589eb56
|
[
"Apache-2.0"
] |
permissive
|
mauricemojito/wazimap-ng
|
7a7da6c9fc653054c376d77c22df120ed0abb653
|
e03748cb1258cbafb43faba441bbc37dd0556a2a
|
refs/heads/master
| 2023-03-12T01:45:53.173039 | 2020-09-30T13:55:29 | 2020-09-30T13:55:29 | 342,342,503 | 0 | 0 |
Apache-2.0
| 2021-02-25T18:55:09 | 2021-02-25T18:34:39 |
Python
|
UTF-8
|
Python
| false | false | 2,545 |
py
|
from wazimap_ng.datasets.models import IndicatorData
from wazimap_ng.utils import mergedict
def get_subindicator(highlight):
subindicators = highlight.indicator.subindicators
idx = highlight.subindicator if highlight.subindicator is not None else 0
return subindicators[idx]
def sibling(highlight, geography):
siblings = geography.get_siblings()
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography__in=siblings)
subindicator = get_subindicator(highlight)
numerator = None
denominator = 0
for datum in indicator_data:
if datum.geography == geography:
numerator = datum.data["subindicators"].get(subindicator, 0)
s = datum.data["subindicators"][subindicator]
denominator += s
if denominator > 0 and numerator is not None:
return numerator / denominator
return None
def absolute_value(highlight, geography):
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography=geography)
if indicator_data.count() > 0:
subindicator = get_subindicator(highlight)
data = indicator_data.first().data # TODO what to do with multiple results
return data["subindicators"].get(subindicator, 0)
return None
def subindicator(highlight, geography):
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography=geography)
if indicator_data.count() > 0:
indicator_data = indicator_data.first() # Fix this need to cater for multiple results
subindicator = get_subindicator(highlight)
numerator = indicator_data.data["subindicators"].get(subindicator, 0)
denominator = 0
for datum, count in indicator_data.data["subindicators"].items():
denominator += count
if denominator > 0 and numerator is not None:
return numerator / denominator
return None
algorithms = {
"absolute_value": absolute_value,
"sibling": sibling,
"subindicators": subindicator
}
def HighlightsSerializer(profile, geography):
highlights = []
profile_highlights = profile.profilehighlight_set.all().order_by("order")
for highlight in profile_highlights:
denominator = highlight.denominator
method = algorithms.get(denominator, absolute_value)
val = method(highlight, geography)
if val is not None:
highlights.append({"label": highlight.label, "value": val, "method": denominator})
return highlights
|
[
"[email protected]"
] | |
a6269498158572202304da939470fc4fdd2e3b1f
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_github/tests/test_releases.py
|
afd754d1f6b234e0108ff323b9fb987cfd825a66
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 |
MIT
| 2023-03-29T20:40:31 | 2017-08-25T14:07:33 |
Python
|
UTF-8
|
Python
| false | false | 4,464 |
py
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from .common_config import github_config, TS
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_github"
FUNCTION_NAME = "github_create_release"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_function(circuits, function_name, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction(function_name, function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait(f"{function_name}_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
def call_github_create_release_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("github_create_release", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("github_create_release_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestGithubCreateRelease:
""" Tests for the github_create_release function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.livetest
def test_create_release(self, circuits_app):
""" Test calling with sample values for the parameters """
create_release_setup = github_config('create_release')
create_release_setup['github_release_name'] = f"{create_release_setup['github_release_name']}_{TS.strftime('%Y%m%d_%H%M%S')}"
create_release_setup['github_release_tag'] = f"{create_release_setup['github_release_tag']}_{TS.strftime('%Y%m%d_%H%M%S')}"
results = call_function(circuits_app, "github_create_release", create_release_setup)
assert(results['success'])
@pytest.mark.livetest
def test_get_release(self, circuits_app):
""" Test calling with sample values for the parameters """
get_release_setup = github_config('get_release')
get_release_setup['github_release_tag'] = f"{get_release_setup['github_release_tag']}_{TS.strftime('%Y%m%d_%H%M%S')}"
results = call_function(circuits_app, "github_get_release", get_release_setup)
assert(results['success'])
assert(results['content'])
@pytest.mark.livetest
def test_get_releases(self, circuits_app):
get_releases_setup = github_config('get_releases')
results = call_function(circuits_app, "github_get_releases", get_releases_setup)
assert(results['success'])
assert(results['content'])
@pytest.mark.livetest
def test_get_latest_release(self, circuits_app):
get_releases_setup = github_config('get_latest_release')
results = call_function(circuits_app, "github_get_latest_release", get_releases_setup)
assert(results['success'])
assert(results['content'])
|
[
"[email protected]"
] | |
8ebabb8929c847e3c9edcd7a71bcd0940adfa0c2
|
d44bfb67b8b19f3773558870a71a42e0cd3ec002
|
/telemetry-library/telemetry/telemetry_mqtt.py
|
cc84776095c2500d7a5842bdd8449b8635c5956f
|
[
"Apache-2.0"
] |
permissive
|
Abstract-Horizon/pyros-telemetry
|
764cdbb8cc98b7d72b1b2a04490c4989c003cbd2
|
7ecb5deaf266689555cbf0721f9c156e4dfe28d7
|
refs/heads/master
| 2023-08-17T19:52:24.684594 | 2021-10-11T17:46:40 | 2021-10-11T17:46:40 | 272,370,230 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,580 |
py
|
################################################################################
# Copyright (C) 2016-2020 Abstract Horizon
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Daniel Sendula - initial API and implementation
#
#################################################################################
import paho.mqtt.client as mqtt
import random
import re
import sys
import threading
import time
import traceback
from telemetry.telemetry_logger import TelemetryLogger, LocalPipeTelemetryLoggerDestination, PubSubTelemetryLoggerClient
from telemetry.telemetry_client import PubSubTelemetryClient
class MQTTLocalPipeTelemetryLogger(TelemetryLogger):
def __init__(self, stream_name, host="localhost", port=1883, topic='telemetry'):
self.mqtt = MQTTWrapper(host, port)
super(MQTTLocalPipeTelemetryLogger, self).__init__(stream_name,
destination=LocalPipeTelemetryLoggerDestination(),
telemetry_client=PubSubTelemetryLoggerClient(topic, self.mqtt.publish, self.mqtt.subscribe))
def init(self):
while not self.mqtt.is_connected():
self.mqtt.loop(0.02)
super(MQTTLocalPipeTelemetryLogger, self).init()
while not self.stream_ready and self.registration_error == 0:
self.mqtt.loop(0.02)
class MQTTTelemetryClient(PubSubTelemetryClient):
def __init__(self, host="localhost", port=1883, topic='telemetry'):
self.mqtt = MQTTWrapper(host, port)
super(MQTTTelemetryClient, self).__init__(topic, self.mqtt.publish, self.mqtt.subscribe)
class MQTTWrapper:
def __init__(self, host="localhost", port=1883, auto_init=True):
self.client = None
self.host = host
self.port = port
self.name = "telemetry-server-" + str(random.randint(10000, 99999))
self._subscribers = []
self._regexToLambda = {}
self._received = False
self.connected = False
if auto_init:
self.init()
def init(self, wait_to_connect=True):
self.client = mqtt.Client(self.name)
self.client.on_disconnect = self._on_disconnect
self.client.on_connect = self._on_connect
self.client.on_message = self._on_message
if self.host is not None:
self._connect()
if wait_to_connect:
print(" " + self.name + " waiting to connect to broker...")
while not self.connected:
self.loop(0.02)
print(" " + self.name + " connected to broker.")
def _connect(self):
self.connected = False
if self.client is not None:
try:
self.client.disconnect()
except Exception:
pass
self.client.connect_async(self.host, self.port, 60)
thread = threading.Thread(target=self._reconnect)
thread.daemon = True
thread.start()
def _on_disconnect(self, _mqtt_client, _data, _rc):
self._connect()
def _on_connect(self, mqtt_client, _data, _flags, rc):
if rc == 0:
self.connected = True
for subscriber in self._subscribers:
mqtt_client.subscribe(subscriber, 0)
else:
print("ERROR: Connection returned error result: " + str(rc))
sys.exit(rc)
def _on_message(self, _mqtt_client, _data, msg):
global _received
_received = True
topic = msg.topic
try:
for regex in self._regexToLambda:
matching = regex.match(topic)
if matching:
method = self._regexToLambda[regex]
method(topic, msg.payload)
return
except Exception as ex:
print("ERROR: Got exception in on message processing; " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
def _reconnect(self):
try:
self.client.reconnect()
except Exception:
pass
def publish(self, topic, message):
if self.connected:
self.client.publish(topic, message)
def subscribe(self, topic, method):
self._subscribers.append(topic)
regex_string = "^" + topic.replace("+", "([^/]+)").replace("#", "(.*)") + "$"
regex = re.compile(regex_string)
self._regexToLambda[regex] = method
if self.connected:
self.client.subscribe(topic, 0)
def is_connected(self):
return self.connected
def sleep(self, delta_time):
self.loop(self, delta_time)
def loop(self, delta_time, _inner=None):
current_time = time.time()
self._received = False
self.client.loop(0.0005) # wait for 0.5 ms
until = current_time + delta_time
while current_time < until:
if self._received:
self._received = False
self.client.loop(0.0005) # wait for 0.1 ms
current_time = time.time()
else:
time.sleep(0.002) # wait for 2 ms
current_time = time.time()
if current_time + 0.0005 < until:
self.client.loop(0.0005) # wait for 0.1 ms
current_time = time.time()
def forever(self, delta_time, outer=None, inner=None):
current_time = time.time()
next_time = current_time
while True:
next_time = next_time + delta_time
try:
if outer is not None:
outer()
except BaseException as ex:
print("ERROR: Got exception in main loop; " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
current_time = time.time()
sleep_time = next_time - current_time
if sleep_time < 0.002:
next_time = current_time
self._received = False
self.client.loop(0.0005) # wait for 0.1 ms
count = 10 # allow at least 5 messages
while count > 0 and self._received:
self._received = True
count -= 1
self.client.loop(0.0005) # wait for 0.1 ms
else:
self.loop(sleep_time, inner=inner)
|
[
"[email protected]"
] | |
09da46de08db8efd21ef86e80c0bd1b0bfa4641f
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/fbs_0140+360/sdB_FBS_0140+360_lc.py
|
757218cb78912bdc16ba760e680f6cef9c974b74
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[25.926708,36.25925], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_FBS_0140+360 /sdB_FBS_0140+360_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0f923332d74ab03086681ff9097adf5ed4fd7603
|
70ec704fdd3e30c5df97323cd4c9532ebfd544ea
|
/ml_wiki/ch2/selenium-login.py
|
d53a91bf593dc64f47bd9b445885c8954f53b454
|
[] |
no_license
|
smart1004/learn_src
|
e02c13c82bae65b7de2a572e4a1ae58e2ea11588
|
353f92f7657a6f676a271d8d7f00d7c20e39d234
|
refs/heads/master
| 2021-01-25T11:49:49.906805 | 2018-04-08T13:26:18 | 2018-04-08T13:26:18 | 123,435,997 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,073 |
py
|
from selenium import webdriver
USER = "<아이디>"
PASS = "<비밀번호>"
# PhantomJS 드라이버 추출하기 --- (※1)
browser = webdriver.PhantomJS()
browser.implicitly_wait(3)
# 로그인 페이지에 접근하기 --- (※2)
url_login = "https://nid.naver.com/nidlogin.login"
browser.get(url_login)
print("로그인 페이지에 접근합니다.")
# 텍스트 박스에 아이디와 비밀번호 입력하기 --- (※3)
e = browser.find_element_by_id("id")
e.clear()
e.send_keys(USER)
e = browser.find_element_by_id("pw")
e.clear()
e.send_keys(PASS)
# 입력 양식 전송해서 로그인하기 --- (※4)
form = browser.find_element_by_css_selector("input.btn_global[type=submit]")
form.submit()
print("로그인 버튼을 클릭합니다.")
# 쇼핑 페이지의 데이터 가져오기 --- (※5)
browser.get("https://order.pay.naver.com/home?tabMenu=SHOPPING")
# 쇼핑 목록 출력하기 --- (※6)
products = browser.find_elements_by_css_selector(".p_info span")
print(products)
for product in products:
print("-", product.text)
|
[
"[email protected]"
] | |
38459d585c7f1861e8774c7571859a85236be08b
|
6515a47190986c4f3b6beececfabab42e3d34e34
|
/Models/GPT2_Model/Model/GPT2LMHead.py
|
7f0304b3b13ce6e44e8c037aec7823bb34427b7e
|
[] |
no_license
|
jk96491/Advanced_Models
|
f4140936f5004ed9a9464ad745b33e52d63157fa
|
cde49356fec3c53296446a54f4be497a89dd08cd
|
refs/heads/master
| 2023-06-14T02:26:43.869417 | 2021-06-30T13:07:31 | 2021-06-30T13:07:31 | 143,489,382 | 60 | 16 | null | null | null | null |
UTF-8
|
Python
| false | false | 782 |
py
|
import torch.nn as nn
class GPT2LMHead(nn.Module):
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
|
[
"[email protected]"
] | |
1e236e30d75c559339f1261b732a9a70d9df7122
|
35053a371d85c2d45a4f52239d8a70b38194ef48
|
/Can Place Flowers.py
|
86bf8aeeb427181d1fe805cbf5b1d1bcb364a643
|
[] |
no_license
|
Kuehar/LeetCode
|
51d169c81a2e572ea854399fc78e1130220388f9
|
4555c20455f181f9dd7b3aba2a8779dea795edfb
|
refs/heads/master
| 2023-04-16T10:13:03.584541 | 2023-04-06T11:47:21 | 2023-04-06T11:47:21 | 243,361,421 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 930 |
py
|
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if n == 0: return True
if len(flowerbed) == 0: return False
if len(flowerbed) == 1: return flowerbed[0] == 0
pre,cur = flowerbed[0],flowerbed[1]
if pre + cur == 0:
flowerbed[0] = 1
n -= 1
cur,nex = flowerbed[-1],flowerbed[-2]
if cur + nex == 0:
flowerbed[-1] = 1
n -= 1
for i in range(2,len(flowerbed)-2):
pre = flowerbed[i-1]
cur = flowerbed[i]
nex = flowerbed[i+1]
if (pre + cur + nex) == 0:
flowerbed[i] = 1
n -= 1
return n <= 0
# Runtime: 164 ms, faster than 58.48% of Python3 online submissions for Can Place Flowers.
# Memory Usage: 14.5 MB, less than 89.00% of Python3 online submissions for Can Place Flowers.
|
[
"[email protected]"
] | |
2e79679ebdc6ebb91b85f95ac5bccc7866b865ab
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/openstackclient/tests/functional/image/v2/test_image.py
|
6faff94a3295cd738875bfb8fda2baf1ac162efb
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 |
Python
|
UTF-8
|
Python
| false | false | 3,865 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from openstackclient.tests.functional import base
class ImageTests(base.TestCase):
"""Functional tests for image. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
os.environ['OS_IMAGE_API_VERSION'] = '2'
opts = cls.get_opts(cls.FIELDS)
raw_output = cls.openstack('image create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
raw_output = cls.openstack('image set --name ' + cls.OTHER_NAME + ' '
+ cls.NAME)
cls.assertOutput('', raw_output)
# Delete test
raw_output = cls.openstack('image delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_image_list(self):
opts = self.get_opts(self.HEADERS)
raw_output = self.openstack('image list' + opts)
self.assertIn(self.NAME, raw_output)
def test_image_show(self):
opts = self.get_opts(self.FIELDS)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
def test_image_set(self):
opts = self.get_opts([
"disk_format", "visibility", "min_disk", "min_ram", "name"])
self.openstack('image set --min-disk 4 --min-ram 5 ' +
'--public ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual("raw\n4\n5\n" + self.NAME + '\npublic\n', raw_output)
def test_image_metadata(self):
opts = self.get_opts(["name", "properties"])
self.openstack('image set --property a=b --property c=d ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\na='b', c='d'\n", raw_output)
def test_image_unset(self):
opts = self.get_opts(["name", "tags", "properties"])
self.openstack('image set --tag 01 ' + self.NAME)
self.openstack('image unset --tag 01 ' + self.NAME)
# test_image_metadata has set image properties "a" and "c"
self.openstack('image unset --property a --property c ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n\n", raw_output)
def test_image_members(self):
opts = self.get_opts(['project_id'])
my_project_id = self.openstack('token issue' + opts).strip()
self.openstack(
'image add project {} {}'.format(self.NAME, my_project_id))
self.openstack(
'image set --accept ' + self.NAME)
shared_img_list = self.parse_listing(
self.openstack('image list --shared', self.get_opts(['name']))
)
self.assertIn(self.NAME, [img['Name'] for img in shared_img_list])
self.openstack(
'image set --reject ' + self.NAME)
shared_img_list = self.parse_listing(
self.openstack('image list --shared', self.get_opts(['name']))
)
self.openstack(
'image remove project {} {}'.format(self.NAME, my_project_id))
|
[
"[email protected]"
] | |
4fe46a3e69863bca6e98c1cb6ab5c17fd36f8261
|
5c531de5e4759c904e608b4fc653b2b041f79a0e
|
/779. K-th Symbol in Grammar.py
|
06e46cf683b3f090a9b595db7b7a9fd6675029aa
|
[] |
no_license
|
jianhui-ben/leetcode_python
|
133c7e6e5c7316d00607ba2e327239e002de28b2
|
fcc16124cc24a5993e27f5d97e78d8f290e68230
|
refs/heads/master
| 2022-06-05T22:32:18.034581 | 2022-05-17T02:27:11 | 2022-05-17T02:27:11 | 250,683,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 728 |
py
|
#779. K-th Symbol in Grammar
#On the first row, we write a 0. Now in every subsequent row, we look at the previous row and replace each occurrence of 0 with 01, and each occurrence of 1 with 10.
#Given row N and index K, return the K-th indexed symbol in row N. (The values of K are 1-indexed.) (1 indexed).
#Examples:
#Input: N = 1, K = 1
#Output: 0
#Input: N = 2, K = 1
#Output: 0
#Input: N = 2, K = 2
#Output: 1
#Input: N = 4, K = 5
#Output: 1
class Solution:
def kthGrammar(self, N: int, K: int) -> int:
## recursion
if N==1: return 0
if K%2==1:
return self.kthGrammar(N-1,(K+1)//2)
else:
return 1-self.kthGrammar(N-1,(K+1)//2)
|
[
"[email protected]"
] | |
600d648aef968fa6d9aaf3ddd8d410059382df4b
|
65f856bb3c782fe2fec794192260d5b7aa997ef3
|
/wsc_django/wsc_django/apps/shop/services.py
|
0a53f3c8e183bdcaeeefad41252f7a5440069671
|
[
"MIT"
] |
permissive
|
hzh595395786/wsc_django
|
0c8faf0cac1d8db8d9e3fa22f6914b6b64bf788b
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
refs/heads/main
| 2023-06-06T07:26:17.979944 | 2021-06-24T13:14:53 | 2021-06-24T13:14:53 | 336,303,377 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,909 |
py
|
from uuid import uuid4
from django.db.models import Count
from product.constant import ProductStatus
from shop.models import Shop, HistoryRealName, ShopRejectReason, PayChannel
from shop.utils import get_shop_mini_program_qcode, put_qcode_file_to_tencent_cos
from user.models import User
from shop.constant import (
ShopStatus,
)
def create_shop(shop_info: dict, user: User):
"""
创建一个商铺
:param shop_info:{
"shop_name": "name",
"shop_img": "http://xxx",
"shop_province": 420000,
"shop_city": 420100,
"shop_county": 420101,
"shop_address": "光谷智慧谷一栋505",
"description": "xxxx",
"suggest_phone": "153xxxxxxxx",
"shop_phone": "152xxxxxxxx",
"super_admin_id": 1
}
:param user: 创建商铺的用户对象
:return:
"""
# 创建店铺
# 随机一个商铺编码, 查一下,万一重复就再来一个
while True:
shop_code = str(uuid4())[-9:]
shop = Shop.objects.filter(shop_code=shop_code)
if not shop:
break
shop_info["shop_code"] = shop_code
shop_info["shop_phone"] = user.phone
shop_info["super_admin_id"] = user.id
shop = Shop(**shop_info)
shop.save()
return shop
def create_pay_channel(pay_channel_info: dict, shop_id: int):
"""
创建一个商铺的pay_channel
:param pay_channel_info:
:param shop_id:
:return:
"""
shop_pay_channel = PayChannel(shop_id=shop_id, **pay_channel_info)
shop_pay_channel.save()
return shop_pay_channel
def create_shop_reject_reason_by_shop_id(shop_id: int, reject_reason: str):
"""
给拒绝的商铺创建一个拒绝理由
:param shop_id:
:return:
"""
reject_reason = ShopRejectReason(id=shop_id, reject_reason=reject_reason)
reject_reason.save()
return reject_reason
def create_shop_creator_history_realname(shop_id: int, history_realname: str):
"""
储存商铺创建者的历史真实姓名, 与店铺绑定
:param shop_id:
:param history_realname:
:return:
"""
history_realname = HistoryRealName(id=shop_id, realname=history_realname)
history_realname.save()
return history_realname
def create_shop_mini_program_qcode(shop_code: str):
"""
为商铺创建小程序码
:param shop_code:
:return:
"""
qcode_file = get_shop_mini_program_qcode(shop_code)
success, url = put_qcode_file_to_tencent_cos(qcode_file, shop_code)
return success, url
def update_shop_data(shop: Shop, args: dict):
"""
修改商铺信息
:param shop:
:param args:
:return:
"""
for k, v in args.items():
setattr(shop, k, v)
shop.save()
return shop
def get_shop_by_shop_code(shop_code: str, only_normal: bool = True):
"""
通过shop_code获取shop对象
:param shop_code: 商铺编码
:param only_normal: 只查询正常
:return:
"""
shop = Shop.objects.filter(shop_code=shop_code)
if shop and only_normal:
shop = shop.filter(status=ShopStatus.NORMAL)
shop = shop.first()
return shop
def get_shop_by_shop_id(shop_id: int, filter_close: bool = True):
"""
通过商铺id获取商
:param shop_id: 商铺id
:param filter_close: 不查询关闭的
:return:
"""
shop = Shop.objects.filter(id=shop_id)
if shop and filter_close:
shop = shop.exclude(status=ShopStatus.CLOSED)
shop = shop.first()
return shop
def list_shop_by_shop_ids(shop_ids: list, filter_close: bool = True, role: int = 1):
"""
通过ship_id列表查询商铺列表
:param shop_ids:
:param filter_close:过滤关闭
:param role: 访问角色,1:为普通用户,2.为admin用户,普通用户访问时只能查到已审核的店铺
:return:
"""
shop_list_query = Shop.objects.filter(id__in=shop_ids)
if shop_list_query and filter_close:
shop_list_query = shop_list_query.exclude(status=ShopStatus.CLOSED)
if role == 1:
shop_list_query = shop_list_query.filter(status=ShopStatus.NORMAL)
shop_list = shop_list_query.all()
return shop_list
def list_shop_by_shop_status(shop_status: int):
"""
查询某一状态的所有商铺
:param shop_status:
:return:
"""
shop_list = Shop.objects.filter(status=shop_status).order_by('update_at').all()
return shop_list
def list_shop_creator_history_realname(shop_ids: list):
"""
找出商铺创建的历史真实姓名列表
:param shop_ids:
:return:
"""
history_realname_list = (
HistoryRealName.objects.filter(id__in=shop_ids).all()
)
return history_realname_list
def list_shop_reject_reason(shop_ids: list):
"""查询出所有的商铺拒绝信息"""
reject_reason_list = ShopRejectReason.objects.filter(id__in=shop_ids).all()
return reject_reason_list
|
[
"[email protected]"
] | |
a4988105b8f44db42f20393940d9d3a3ae4e6178
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/191/bmi.py
|
0ee130d805eb92fd958498062113b022207001d6
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,056 |
py
|
# data """Luke Skywalker,172,77
# C-3PO,167,75
# R2-D2,96,32
# Darth Vader,202,136
# Leia Organa,150,49
# Owen Lars,178,120
# Beru Whitesun lars,165,75
# R5-D4,97,32
# Biggs Darklighter,183,84
# Obi-Wan Kenobi,182,77
# Anakin Skywalker,188,84
# Chewbacca,228,112
# Han Solo,180,80
# Greedo,173,74
# Jek Tono Porkins,180,110
# Yoda,66,17
# Palpatine,170,75
# Boba Fett,183,78.2
# IG-88,200,140
# Bossk,190,113
# """
#
#
# ___ person_max_bmi data_?
# """Return (name, BMI float) of the character in data that
# has the highest BMI (rounded on 2 decimals)"""
# bmi # dict
# data_list ?.s.. "\n"
#
# ___ row __ ?
# current ?.s...s.. ","
# __ l.. ? > 1
# ? ? 0 f__ c.. 2 / i.. ? 1 / 100) ** 2
#
# name_max_bmi m.. b.. key b__.g..
# r.. ? r.. b.. ? 2
#
# # if __name__ == "__main__":
# # print(person_max_bmi())
|
[
"[email protected]"
] | |
94171e19440d59601861aee4f580b056a82ba31e
|
104085f6878411a137521b17c06612e5f648ef33
|
/service_pro/service_pro/doctype/agent_payment_request/agent_payment_request_dashboard.py
|
bf5bad55f86a6116faab28263273e7b0828fce28
|
[
"MIT"
] |
permissive
|
ksbbalean/service-pro
|
d39f0d12977dd66627b9f7c0336c605d7be4c388
|
c89b39a8e9967dada50dc0db4b08460ed45843bf
|
refs/heads/master
| 2023-04-13T05:35:19.842021 | 2021-04-22T11:05:18 | 2021-04-22T11:05:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
from frappe import _
def get_data():
return {
'fieldname': 'agent_payment_request',
'transactions': [
{
'label': _('Linked Forms'),
'items': ["Journal Entry"]
}
]
}
|
[
"[email protected]"
] | |
9efe79a16c6f27bddfc4536d573389398935b830
|
3b5d1a53af8d2f4094005f342403eabc7af9c980
|
/moderation_module/storage/logging_data.py
|
5f1eb3107e2c0a2e75844b5cbdd60700cde60414
|
[
"MIT"
] |
permissive
|
alentoghostflame/StupidAlentoBot
|
daa828be3d47b24d3e13d500155a6a0d2019f724
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
refs/heads/master
| 2021-06-30T17:50:14.997416 | 2021-06-08T03:54:24 | 2021-06-08T03:54:24 | 237,541,303 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
from alento_bot import guild_data_transformer
import logging
import typing
logger = logging.getLogger("main_bot")
@guild_data_transformer(name="guild_logging_config")
class GuildLoggingConfig:
def __init__(self):
self.toggled_on: bool = False
self.log_channel_id: int = 0
self.exempt_channels: typing.Set[int] = set()
self.log_bots: bool = False
|
[
"[email protected]"
] | |
6f361c7d8b2af01f6ee96c8df06630eaf5cef7f8
|
1929a989d1e2a5c5caabad32aa8baf4444250574
|
/h2o-py/tests/testdir_munging/pyunit_upload_large.py
|
3d4d69107d8603c202a6d6e94a6ae18df88df391
|
[
"Apache-2.0"
] |
permissive
|
codelibs/h2o-3
|
9c417c0c6ee4ae9a6eaffe5a0373c0d78c37527e
|
cf96fb28da4732870a0d65c24f0d99f422d140d1
|
refs/heads/master
| 2023-05-27T10:04:14.408620 | 2023-04-28T18:16:48 | 2023-04-28T18:16:48 | 253,197,280 | 0 | 0 |
Apache-2.0
| 2020-04-05T09:22:41 | 2020-04-05T09:22:40 | null |
UTF-8
|
Python
| false | false | 772 |
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import os
import tempfile
from tests import pyunit_utils
def generate_large_file(path, size):
with open(path, "wb") as f:
f.seek(size-1)
f.write(b"\0")
assert size == os.stat(path).st_size
def upload_large_file():
path = os.path.join(tempfile.mkdtemp(), "large.bin")
byte_size = 2 * 1024 * 1024 * 1024 + 1 # 2GB + 1 byte
generate_large_file(path, byte_size)
raw_data = h2o.api("POST /3/PostFile", filename=path)
print(raw_data)
assert raw_data["total_bytes"] == byte_size
h2o.remove(raw_data["destination_frame"])
if __name__ == "__main__":
pyunit_utils.standalone_test(upload_large_file)
else:
upload_large_file()
|
[
"[email protected]"
] | |
6f927f95ffc8e9ede4b6ba26df040a784d1f5146
|
8f5cb19e9c6a0670100b4a4fbdbb892d94ccd4a8
|
/deployment/georegistry.py
|
5c1e240d6a6b964f6e177dd39a8f7f9b1dc6a607
|
[] |
no_license
|
invisibleroads/georegistry
|
84438e680e56ac716f60d23784f05469c4888841
|
df56cc17b01a794bfbd53f354bb5fa9abeb420cc
|
refs/heads/master
| 2023-08-24T03:34:50.554375 | 2011-05-05T16:36:19 | 2011-05-05T16:36:19 | 966,680 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,359 |
py
|
'GeoRegistry API Python wrapper'
# Import system modules
import urllib
import urllib2
import simplejson
# Core
baseURL = 'http://georegistry.invisibleroads.com'
def updateFeatures(key, srid, featureCollection, tags, public=False):
'Update features using the GeoRegistry web service'
# Initialize
url = baseURL + '/features'
# Call
responseData = call(url, {
'key': key,
'srid': srid,
'featureCollection': featureCollection,
'tags': '\n'.join(tags),
'public': 1 if public else 0,
}, 'POST')
# Return
return [int(x) for x in responseData.splitlines()]
def deleteFeatures(key, featureIDs):
'Delete features using the GeoRegistry web service'
# Initialize
url = baseURL + '/features'
# Call
call(url, {
'key': key,
'featureIDs': '\n'.join(str(x) for x in featureIDs),
}, 'DELETE')
def getTags(key):
'Get tags with visible features using the GeoRegistry web service'
# Initialize
url = baseURL + '/tags'
# Call
responseData = call(url + '.json', {
'key': key,
}, 'GET')
# Return
return responseData.splitlines()
def viewMaps(key, srid, tags, simplified=True, bboxFormat='yxyx', bbox=None):
'Assemble a map using the GeoRegistry web service'
# Initialize
url = baseURL + '/maps'
# Call
responseData = call(url + '.json', {
'key': key,
'srid': srid,
'tags': '\n'.join(tags),
'bboxFormat': bboxFormat,
'bbox': bbox if bbox else '',
'simplified': 1 if simplified else 0,
}, 'GET')
# Return
return responseData
# Helpers
def call(url, valueByName, method):
'Call a method in the GeoRegistry web service'
requestData = urllib.urlencode(valueByName.items())
request = Request(method, url, requestData) if method.upper() == 'POST' else Request(method, url + '?' + requestData)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, error:
raise GeoRegistryError(error.read())
return response.read()
class Request(urllib2.Request):
def __init__(self, method, *args, **kwargs):
self._method = method
urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
return self._method
# Error
class GeoRegistryError(Exception):
pass
|
[
"[email protected]"
] | |
8071db56a1faa459eccd4c3bfbd0c735f51f2c1e
|
6ace7e15e3191d1b8228ad7922a8552ca84f84e7
|
/.history/image_detector_20200614203341.py
|
2465a36001cd934f7bd739e37f170e75e719b85c
|
[] |
no_license
|
mehmetaliarican/Similar-Image-Finder
|
f72e95be50c51aa03fc64954a03124b199ca64b1
|
a9e0015c443b4a73394099cccf60329cfc4c7cef
|
refs/heads/master
| 2022-10-27T00:57:43.173993 | 2020-06-14T18:02:16 | 2020-06-14T18:02:16 | 272,256,295 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,485 |
py
|
from skimage.metrics import structural_similarity as ssim
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import os
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--threshold", type=float, default=0.9,
help="threshold")
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
args = vars(ap.parse_args())
class Utility:
totalFound = 0
totalSearch = 0
searching = False
def mse(self, imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(self, im1, im2, imageA, imageB):
# compute the mean squared error and structural similarity
# index for the images
m = self.mse(imageA, imageB)
s = ssim(imageA, imageB)
tres = args['threshold']
totalSearch++
if s >= tres:
print("Image[{c1}] '{p1}' compared to Image[{c2}] '{p2}' Simility:{sim}".format(c1=im1['comp'], c2=im2['comp'],p1=im1['path'], p2=im2['path'], sim=str(s)))
twin = np.hstack([imageA, imageB])
cv2.imshow('', twin)
cv2.waitKey(0)
self.searching = False
elif self.searching is False:
print('Searching...')
self.searching = True
imagePaths = list(paths.list_images(args['dataset']))
companies = ['dhl', 'paypal', 'wellsfargo']
all_data = []
for path in imagePaths:
company = ''
for c in companies:
if c in path:
company = c
all_data.append({'comp': c, 'path': path})
print(all_data)
u = Utility()
for image in all_data:
try:
p1 = cv2.imread(image['path'])
p1 = cv2.resize(p1, (300, 300))
p1 = cv2.cvtColor(p1, cv2.COLOR_BGR2GRAY)
for i in all_data:
if i['path'] != image['path']:
p2 = cv2.imread(i['path'])
p2 = cv2.resize(p2, (300, 300))
p2 = cv2.cvtColor(p2, cv2.COLOR_BGR2GRAY)
u.compare_images(image, i, p1, p2)
except Exception as e:
print(str(e))
|
[
"[email protected]"
] | |
5268cff948f9c48f0fd6138032a6afd729243dd6
|
2a6412a9359a1df5f8f12e319e73b9e4e46fd64c
|
/specializedSubjects/AlgorithmII/dijkstra_ON2.py
|
f7130f780bcfbbcb7e2864c816bf76de51c44942
|
[] |
no_license
|
danganhvu1998/myINIAD
|
504d1147a02f12e593f30e369daf82f85aa01bfd
|
01547673dd3065efb6c7cc8db77ec93a5a4f5d98
|
refs/heads/master
| 2022-03-17T12:58:34.647229 | 2022-02-08T06:34:19 | 2022-02-08T06:34:19 | 143,675,719 | 1 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 770 |
py
|
import networkx as nx
import matplotlib.pyplot as plt
def get_min(D, X):
arg_min= -1
min_value= float('inf')
for i in range(len(D)):
if D[i] < min_value:
if i in X:
arg_min= i
min_value= D[i]
return arg_min
def dijkstra(src, G):
D= [float('inf')] * nx.number_of_nodes(G)
D[src]= 0.0
X= set(G.nodes)
while X:
u= get_min(D, X)
X.remove(u)
neighbors= G.neighbors(u)
for v in neighbors:
if v in X:
if (D[u] + G.edges[u, v]['weight'] < D[v]):
D[v]= D[u] + G.edges[u, v]['weight']
return D
G= nx.read_weighted_edgelist('dij.edgelist', nodetype=int)
print(dijkstra(0, G))
nx.draw_networkx(G)
plt.show()
|
[
"[email protected]"
] | |
62f8f6e45e2c8fa0b96b0ee822ef9e2ee1a0d83b
|
44a7b4879c1da661cc2e8aa51c7fcc24cfb0fd3b
|
/src/scs_core/osio/manager/user_manager.py
|
f14e3e70118f2019ef5dd083551e6ca93ec113de
|
[
"MIT"
] |
permissive
|
seoss/scs_core
|
21cd235c9630c68f651b9a8c88120ab98fe5f513
|
a813f85f86b6973fa77722a7d61cc93762ceba09
|
refs/heads/master
| 2021-08-08T08:09:56.905078 | 2020-04-16T19:46:52 | 2020-04-16T19:46:52 | 156,239,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,315 |
py
|
"""
Created on 21 Mar 2017
@author: Bruno Beloff ([email protected])
"""
from scs_core.osio.client.rest_client import RESTClient
from scs_core.osio.data.user import User
from scs_core.osio.data.user_metadata import UserMetadata
# --------------------------------------------------------------------------------------------------------------------
class UserManager(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, http_client, api_key):
"""
Constructor
"""
self.__rest_client = RESTClient(http_client, api_key)
# ----------------------------------------------------------------------------------------------------------------
def find(self, user_id):
request_path = '/v1/users/' + user_id
# request...
self.__rest_client.connect()
try:
response_jdict = self.__rest_client.get(request_path)
except RuntimeError:
response_jdict = None
self.__rest_client.close()
user = User.construct_from_jdict(response_jdict)
return user
def find_public(self, user_id):
request_path = '/v1/public/users/' + user_id
# request...
self.__rest_client.connect()
try:
response_jdict = self.__rest_client.get(request_path)
except RuntimeError:
response_jdict = None
self.__rest_client.close()
user = UserMetadata.construct_from_jdict(response_jdict)
return user
def find_members_of_org(self, org_id):
pass
# ----------------------------------------------------------------------------------------------------------------
def update(self, user_id, user):
request_path = '/v1/users/' + user_id
# request...
self.__rest_client.connect()
try:
self.__rest_client.put(request_path, user.as_json())
finally:
self.__rest_client.close()
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "UserManager:{rest_client:%s}" % self.__rest_client
|
[
"[email protected]"
] | |
2db16db5c0570084ec0dbb9abc064697f824fa90
|
f51aff57f826aeea1be21e2d0c03cce0adaadefc
|
/exp/utils/rand.py
|
70cd3809130978b9f18a56c77772c3f8afb2594d
|
[
"MIT"
] |
permissive
|
zv5dmjq5/vivit
|
0a26f8b61e6f00da75fce7a9bbc75b0185ffea76
|
a05f448d1badb2db42e724c80676ce7e309194d2
|
refs/heads/master
| 2023-07-12T06:36:10.627912 | 2021-08-26T12:02:59 | 2021-08-26T12:02:59 | 370,409,161 | 1 | 0 |
MIT
| 2021-08-10T12:58:27 | 2021-05-24T16:00:04 |
Python
|
UTF-8
|
Python
| false | false | 1,198 |
py
|
"""Utility functions to control random seeds."""
import torch
class temporary_seed:
"""Temporarily set PyTorch seed to a different value, then restore current value.
This has the effect that code inside this context does not influence the outer
loop's random generator state.
"""
def __init__(self, temp_seed):
self._temp_seed = temp_seed
def __enter__(self):
"""Store the current seed."""
self._old_state = torch.get_rng_state()
torch.manual_seed(self._temp_seed)
def __exit__(self, exc_type, exc_value, traceback):
"""Restore the old random generator state."""
torch.set_rng_state(self._old_state)
def test_temporary_seed():
"""Test if temporary_seed works as expected."""
torch.manual_seed(3)
num1 = torch.rand(1)
with temporary_seed(2):
num2 = torch.rand(1)
num3 = torch.rand(1)
torch.manual_seed(3)
num4 = torch.rand(1)
num5 = torch.rand(1)
torch.manual_seed(2)
num6 = torch.rand(1)
assert torch.allclose(num1, num4)
assert torch.allclose(num3, num5)
assert torch.allclose(num2, num6)
if __name__ == "__main__":
test_temporary_seed()
|
[
"Anonymous"
] |
Anonymous
|
a645386e0c34474857014299151a5d45a2ce0535
|
b8115bc7503581bf3bb44bfa87b270793aff4381
|
/ddsp/training/data_preparation/ddsp_prepare_tfrecord.py
|
f7a4c0f3576f5ff085e50c33af5f4a887ed246f2
|
[
"Apache-2.0"
] |
permissive
|
pollinations/ddsp
|
7a5cfd18efcd8a77729d26231d294a4c03c2d286
|
4bbb3b1b0aa9e9a4c1f77e8758f409cbd1ec03f7
|
refs/heads/main
| 2023-06-21T18:21:37.230721 | 2021-07-22T09:45:54 | 2021-07-22T09:45:54 | 388,399,770 | 0 | 0 |
Apache-2.0
| 2021-07-22T09:14:11 | 2021-07-22T09:14:10 | null |
UTF-8
|
Python
| false | false | 3,340 |
py
|
# Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Create a TFRecord dataset from audio files.
Usage:
====================
ddsp_prepare_tfrecord \
--input_audio_filepatterns=/path/to/wavs/*wav,/path/to/mp3s/*mp3 \
--output_tfrecord_path=/path/to/output.tfrecord \
--num_shards=10 \
--alsologtostderr
"""
from absl import app
from absl import flags
from ddsp.training.data_preparation.prepare_tfrecord_lib import prepare_tfrecord
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
flags.DEFINE_list('input_audio_filepatterns', [],
'List of filepatterns to glob for input audio files.')
flags.DEFINE_string(
'output_tfrecord_path', None,
'The prefix path to the output TFRecord. Shard numbers will be added to '
'actual path(s).')
flags.DEFINE_integer(
'num_shards', None,
'The number of shards to use for the TFRecord. If None, this number will '
'be determined automatically.')
flags.DEFINE_integer('sample_rate', 16000,
'The sample rate to use for the audio.')
flags.DEFINE_integer(
'frame_rate', 250,
'The frame rate to use for f0 and loudness features. If set to 0, '
'these features will not be computed.')
flags.DEFINE_float(
'example_secs', 4,
'The length of each example in seconds. Input audio will be split to this '
'length using a sliding window. If 0, each full piece of audio will be '
'used as an example.')
flags.DEFINE_float(
'sliding_window_hop_secs', 1,
'The hop size in seconds to use when splitting audio into constant-length '
'examples.')
flags.DEFINE_float(
'eval_split_fraction', 0.0,
'Fraction of the dataset to reserve for eval split. If set to 0, no eval '
'split is created.'
)
flags.DEFINE_float(
'coarse_chunk_secs', 20.0,
'Chunk size in seconds used to split the input audio files.')
flags.DEFINE_list(
'pipeline_options', '--runner=DirectRunner',
'A comma-separated list of command line arguments to be used as options '
'for the Beam Pipeline.')
def run():
input_audio_paths = []
for filepattern in FLAGS.input_audio_filepatterns:
input_audio_paths.extend(tf.io.gfile.glob(filepattern))
prepare_tfrecord(
input_audio_paths,
FLAGS.output_tfrecord_path,
num_shards=FLAGS.num_shards,
sample_rate=FLAGS.sample_rate,
frame_rate=FLAGS.frame_rate,
window_secs=FLAGS.example_secs,
hop_secs=FLAGS.sliding_window_hop_secs,
eval_split_fraction=FLAGS.eval_split_fraction,
coarse_chunk_secs=FLAGS.coarse_chunk_secs,
pipeline_options=FLAGS.pipeline_options)
def main(unused_argv):
"""From command line."""
run()
def console_entry_point():
"""From pip installed script."""
app.run(main)
if __name__ == '__main__':
console_entry_point()
|
[
"[email protected]"
] | |
8e1fb53a12cc8169be23e1cdcdc37884cdf551ec
|
a6cbc03780b5c390f4f8ce9063bd8a7f6d75e8aa
|
/mail2.py
|
1c0dfcd906c6beffc8d5efcafb1c822ea969e238
|
[] |
no_license
|
KimaruThagna/Email_and_Regex
|
5f825554bd17e56ff091a79187c5ab7a758960d9
|
c250e37d6e09f1a9c35fb6af873ff1c77707a8fd
|
refs/heads/master
| 2021-09-06T11:59:04.784389 | 2018-02-06T12:16:58 | 2018-02-06T12:16:58 | 110,789,925 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,992 |
py
|
#This example still uses gmail but this time includes an attachment
import os,smtplib
from email.mime.text import MIMEText
from email.encoders import encode_base64
from email.mime.multipart import MIMEMultipart
from tkinter.filedialog import askopenfilename
from email.mime.base import MIMEBase
# function that sends the email. Feed it with relevant parameters
def sendMail(sender,pwd,subject,body,receiver,q):
message=MIMEMultipart() # define the whole message as a mimemultipart and add releven
#metadata
message['Subject']=subject
message['From']=sender
message['To']=receiver
text=MIMEText(body)
message.attach(text)# attach the body or actual message to the message object
if q=='y':
file=askopenfilename()# create window which allows you to browse file system\
#and select file
data=open(file,'rb').read() # read file in binary mode
part=MIMEBase('application','octet-stream')
part.set_payload(data) # set the payload as the file read in binary mode
encode_base64(part) #encode the attachment to base64
part.add_header('Content-disposition','attachment; filename='+os.path.basename(file))
message.attach(part)
print('Connecting ...')
server=smtplib.SMTP('smtp.gmail.com',587) # setup email server
server.ehlo() # identify yourself to gmail client
server.starttls() # start transport layer security
server.ehlo() #re-identify yourself after encryption
server.login(sender,pwd) # login to sender account
print('Connected')
server.sendmail(sender,receiver,message.as_string()) # perform actual sending of mail
print('Mail Sent.')
server.quit()
#prompts
sender=input('Input Your email ')
receiver=input('Provide Recepient ')
pwd=input('Provide password ' )
subject=input('Mail Subject ')
body=input('Type your message ')
con=input('Do you want to send an attachment? Enter y for YES ')
#call method
sendMail(sender,pwd,subject,body,receiver,con)
|
[
"[email protected]"
] | |
d5ab6e2c2301fa7c2de21056b961275cd20e463d
|
840b98f14f181f7dbd693f2ee4b3c46e5be59305
|
/demos/demo_pycloudmessenger/POM1/NeuralNetworks/pom1_NN_worker_pycloudmessenger.py
|
3bb1b70be16f54c7404843da2a380711222b695e
|
[
"Apache-2.0"
] |
permissive
|
Musketeer-H2020/MMLL-Robust
|
4ef6b2ff5dff18d4d2b2a403a89d9455ba861e2b
|
ccc0a7674a04ae0d00bedc38893b33184c5f68c6
|
refs/heads/main
| 2023-09-01T18:47:46.065297 | 2021-09-28T15:34:12 | 2021-09-28T15:34:12 | 386,264,004 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,786 |
py
|
# -*- coding: utf-8 -*-
'''
@author: Marcos Fernandez Diaz
November 2020
Example of use: python pom1_NN_worker_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --id <id>
Parameters:
- user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created
- password: String with the password
- task_name: String with the name of the task. If the task already exists, an error will be displayed
- id: Integer representing the partition of data to be used by the worker. Each worker should use a different partition, possible values are 0 to 4.
'''
# Import general modules
import argparse
import logging
import json
import numpy as np
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings
import tensorflow as tf
import onnxruntime
from sklearn.metrics import accuracy_score
# Add higher directory to python modules path.
sys.path.append("../../../../")
# To be imported from MMLL (pip installed)
from MMLL.nodes.WorkerNode import WorkerNode
from MMLL.comms.comms_pycloudmessenger import Comms_worker as Comms
# To be imported from demo_tools
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders
# Set up logger
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default=None, help='User')
parser.add_argument('--password', type=str, default=None, help='Password')
parser.add_argument('--task_name', type=str, default=None, help='Name of the task')
parser.add_argument('--id', type=int, default=None, choices=[0, 1, 2, 3, 4], help='The address of the worker')
FLAGS, unparsed = parser.parse_known_args()
user_name = FLAGS.user
user_password = FLAGS.password
task_name = FLAGS.task_name
data_partition_id = FLAGS.id # This integer identifies the data partition used for the worker
# Set basic configuration
dataset_name = 'mnist'
verbose = False
pom = 1
model_type = 'NN'
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Setting up the logger
logger = Logger('./results/logs/Worker_' + str(user_name) + '.log')
# Load the credentials for pycloudmessenger
display('===========================================', logger, verbose)
display('Creating Worker...', logger, verbose)
# Note: this part creates the worker (participant) and it joins the task. This code is
# intended to be used only at the demos, in Musketeer this part must be done in the client.
credentials_filename = '../../musketeer.json'
try:
with open(credentials_filename, 'r') as f:
credentials = json.load(f)
except:
display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose)
sys.exit()
# Create user and join task
tm = Task_Manager(credentials_filename)
participant = tm.create_worker_and_join_task(user_name, user_password, task_name, display, logger)
display("Worker %s has joined task %s" %(user_name, task_name), logger, verbose)
# Creating the comms object
display('Creating WorkerNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose)
comms = Comms(participant, user_name)
# Creating Workernode
wn = WorkerNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, verbose)
# Load data
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
try:
dc = DC(data_file)
except:
display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose)
sys.exit()
# Get train/test data and set training data
[Xtr, ytr, _, _, Xtst, ytst] = dc.get_all_data_Worker(int(data_partition_id))
wn.set_training_data(dataset_name, Xtr, ytr)
display('WorkerNode loaded %d patterns for training' % wn.NPtr, logger, verbose)
# Creating a ML model and start training procedure
wn.create_model_worker(model_type)
display('MMLL model %s is ready for training!' %model_type, logger, verbose)
display('Worker_' + model_type + ' %s is running...' %user_name, logger, verbose)
wn.run()
display('Worker_' + model_type + ' %s: EXIT' %user_name, logger, verbose)
# Retrieving and saving the trained model
display('Retrieving the trained model from WorkerNode', logger, verbose)
model = wn.get_model()
# Warning: this save_model utility is only for demo purposes
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model'
model.save(output_filename_model)
# Making predictions on test data
display('------------- Obtaining predictions------------------------------------\n', logger, verbose)
preprocessors = wn.get_preprocessors()
if preprocessors is not None:
for prep_model in preprocessors: # Apply stored preprocessor sequentially (in the same order received)
Xtst = prep_model.transform(Xtst)
display('Test data transformed using %s' %prep_model.name, logger, verbose)
preds_tst = model.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
y = np.argmax(ytst, axis=-1) # Convert to labels
classes = np.arange(ytst.shape[1]) # 0 to 9
# Evaluating the results
display('------------- Evaluating --------------------------------------------\n', logger, verbose)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended
# to be used for the demos. Use them at your own risk.
output_filename = 'Worker_' + str(user_name) + '_NN_confusion_matrix_' + dataset_name + '.png'
title = 'NN confusion matrix in test set worker'
plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True)
# Load Tf SavedModel and check results
model_loaded = tf.keras.models.load_model(output_filename_model)
preds_tst = model_loaded.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
# Model export to ONXX
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model.onnx'
model.save(output_filename_model)
# Compute the prediction with ONNX Runtime
onnx_session = onnxruntime.InferenceSession(output_filename_model)
onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst}
onnx_output = onnx_session.run(None, onnx_inputs)[0]
onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels
err_onnx = accuracy_score(y,onnx_output)
display('Test accuracy in ONNX model is %f' %err_onnx, logger, verbose)
|
[
"[email protected]"
] | |
95986ae73d179770f7292b38dbaaf00b540d68bb
|
67ecf1aca10c6b3504027edc131d3f295a66ae08
|
/00-deleteintreeview.py
|
d3e2d07a7e5d08d4338fd394ee4a32966af7637f
|
[
"MIT"
] |
permissive
|
UncleEngineer/TkinterTrick
|
5efa58dee8612d48d18040debe7868c6b5815e3c
|
471a5f4906ddad195731410e9df1a2b35f466fcb
|
refs/heads/master
| 2020-03-16T09:57:28.696335 | 2018-06-22T10:23:39 | 2018-06-22T10:23:39 | 132,626,504 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,029 |
py
|
from tkinter import *
from tkinter import ttk
root = Tk()
tree = ttk.Treeview(root)
tree["columns"]=("one","two")
tree.column("one", width=100 )
tree.column("two", width=100)
tree.heading("one", text="coulmn A")
tree.heading("two", text="column B")
tree.insert("" , 0, text="Line 1", values=("1A","1b"))
id2 = tree.insert("", 1, "dir2", text="Dir 2")
tree.insert(id2, "end", "dir 2", text="sub dir 2", values=("2A","2B"))
##alternatively:
tree.insert("", 3, "dir3", text="Dir 3")
tree.insert("dir3", 3, text=" sub dir 3",values=("3A"," 3B"))
def edit():
x = tree.get_children()
for item in x: ## Changing all children from root item
tree.item(item, text="blub", values=("foo", "bar"))
def delete():
selected_item = tree.selection()[0] ## get selected item
tree.delete(selected_item)
tree.pack()
button_del = Button(root, text="del", command=delete)
button_del.pack()
button_del = Button(root, text="edit", command=edit)
button_del.pack()
root.mainloop()
|
[
"[email protected]"
] | |
d2af03e3a4906a1fa23e9f3a1ce18e723be2b7dd
|
a5b4d77e760c6131ba1c5f040265a3b08d3c0478
|
/enemy_bot/enemy_bot_level5/burger_detect/scripts/image_save.py
|
e695863b746ba773cd8cf5ea415ec5f9c57f2dab
|
[
"BSD-3-Clause"
] |
permissive
|
kenjirotorii/burger_war_kit
|
700b511739299a9d90d23c70262ecf4856d234b7
|
d9b1b443f220980a4118c13cdf22174696c3db9c
|
refs/heads/main
| 2023-03-21T23:32:24.415502 | 2021-03-11T15:59:12 | 2021-03-11T15:59:12 | 337,704,943 | 0 | 1 |
BSD-3-Clause
| 2021-03-11T15:59:13 | 2021-02-10T11:36:22 |
Python
|
UTF-8
|
Python
| false | false | 1,046 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import random
#
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import sys
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
import os
dir="Image/test/"
num=10*1000
class ImageGet():
def __init__(self):
rospy.Subscriber('/image_raw', Image, self.Image_save)
self.bridge = CvBridge()
self.count=0
def Image_save(self,data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
cv2.imshow("sample.jpg",cv_image)
cv2.waitKey(5)
#cv2.imwrite(dir+"sample"+repr(self.count)+".jpg",cv_image)
print("save done.")
#self.count+=1
def get_image(self):
r = rospy.Rate(1) # change speed 1fps
while not rospy.is_shutdown():
r.sleep()
if self.count>num:
break
if __name__ == '__main__':
if not os.path.exists(dir):
os.mkdir(dir)
rospy.init_node('get_image')
bot = ImageGet()
bot.get_image()
|
[
"[email protected]"
] | |
327f5bed18063bc5103443d55e4856bea69453da
|
009c5522fe7fd1b6ffad167097535e592818c9d7
|
/app/inheritance/abstract/migrations/0003_auto_20191223_0545.py
|
02758dcc786385764c2036954dc49dd6a0eb3c57
|
[] |
no_license
|
moorekwon/django-document
|
d891d3d329bc697598517c0918e912da89cf5f6a
|
983de2babdabd106e17467af27bac4efced170b8
|
refs/heads/master
| 2021-09-29T00:37:04.647977 | 2019-12-26T09:07:55 | 2019-12-26T09:07:55 | 228,784,209 | 0 | 0 | null | 2021-09-22T18:10:05 | 2019-12-18T07:35:12 |
Python
|
UTF-8
|
Python
| false | false | 740 |
py
|
# Generated by Django 3.0 on 2019-12-23 05:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_childa_childb'),
]
operations = [
migrations.AlterField(
model_name='childa',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childa_set', to='abstract.Student'),
),
migrations.AlterField(
model_name='childb',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childb_set', to='abstract.Student'),
),
]
|
[
"[email protected]"
] | |
37cda467832d9959605d1668f2ef07cc8c293df9
|
ece6f45409ee2bcbff1be64fa1ac98e7805e0e18
|
/API:Data_Visualization/population_json.py
|
78c7c29a84dd29b055d74f7b1bbb767d4b2871b3
|
[] |
no_license
|
PickertJoe/python_exercises
|
5b9ac3334eec32e35a477d126c911d4ca07a4343
|
77955427db9c3342c9a51618a0cd9cf6f884fbee
|
refs/heads/master
| 2022-12-12T11:57:08.267814 | 2019-12-08T21:52:31 | 2019-12-08T21:52:31 | 184,834,676 | 1 | 0 | null | 2022-12-08T05:14:43 | 2019-05-04T00:16:12 |
Python
|
UTF-8
|
Python
| false | false | 1,293 |
py
|
# A program to read and analyze the data in population_data.json
import json
from comma import comma
from country_code import code_search
from pygal.maps.world import World
from pygal.style import RotateStyle
# Importing the data in the json file into a list
filename = "chapter_16/population_data.json"
with open(filename) as f:
population_data = json.load(f)
# Building a dictionary of the population data
cc_populations = {}
for country in population_data:
if country['Year'] == '2010':
country_name = country["Country Name"]
population = int(float(country["Value"]))
code = code_search(country_name)
if code:
cc_populations[code] = population
# Creating three separate categories for different population ranges
cc_pop1, cc_pop2, cc_pop3 = {}, {}, {}
for code, population in cc_populations.items():
if population > 1000000000:
cc_pop1[code] = population
elif population > 10000000:
cc_pop2[code] = population
else:
cc_pop3[code] = population
wm_style = RotateStyle('#336699')
wm = World(style=wm_style)
wm.title = "World Population in 2010, by Select Countries"
wm.add('1bn+', cc_pop1)
wm.add('10m - 1bn', cc_pop2)
wm.add('0-10m', cc_pop3)
wm.render_to_file('country_populations_category.svg')
|
[
"[email protected]"
] | |
65c4da75fb004f1520cb29a69802bcce620518d9
|
40c4b0c31a5870a9201d3d42a63c5547092e5912
|
/frappe/recorder.py
|
8cbcaa01bb980c8cbdc9a77613591f7700643486
|
[
"MIT"
] |
permissive
|
ektai/frappe3
|
fab138cdbe15bab8214cf623d9eb461e9b9fb1cd
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
refs/heads/master
| 2022-12-25T15:48:36.926197 | 2020-10-07T09:19:20 | 2020-10-07T09:19:20 | 301,951,677 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,212 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from collections import Counter
import datetime
import inspect
import json
import re
import time
import traceback
import frappe
import sqlparse
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from frappe import _
RECORDER_INTERCEPT_FLAG = "recorder-intercept"
RECORDER_REQUEST_SPARSE_HASH = "recorder-requests-sparse"
RECORDER_REQUEST_HASH = "recorder-requests"
def sql(*args, **kwargs):
start_time = time.time()
result = frappe.db._sql(*args, **kwargs)
end_time = time.time()
stack = list(get_current_stack_frames())
if frappe.conf.db_type == 'postgres':
query = frappe.db._cursor.query
else:
query = frappe.db._cursor._executed
query = sqlparse.format(query.strip(), keyword_case="upper", reindent=True)
# Collect EXPLAIN for executed query
if query.lower().strip().split()[0] in ("select", "update", "delete"):
# Only SELECT/UPDATE/DELETE queries can be "EXPLAIN"ed
explain_result = frappe.db._sql("EXPLAIN {}".format(query), as_dict=True)
else:
explain_result = []
data = {
"query": query,
"stack": stack,
"explain_result": explain_result,
"time": start_time,
"duration": float("{:.3f}".format((end_time - start_time) * 1000)),
}
frappe.local._recorder.register(data)
return result
def get_current_stack_frames():
current = inspect.currentframe()
frames = inspect.getouterframes(current, context=10)
for frame, filename, lineno, function, context, index in list(reversed(frames))[:-2]:
if "/apps/" in filename:
yield {
"filename": re.sub(".*/apps/", "", filename),
"lineno": lineno,
"function": function,
"context": "".join(context),
"index": index,
"locals": json.dumps(frame.f_locals, skipkeys=True, default=str)
}
def record():
if __debug__:
if frappe.cache().get_value(RECORDER_INTERCEPT_FLAG):
frappe.local._recorder = Recorder()
def dump():
if __debug__:
if hasattr(frappe.local, "_recorder"):
frappe.local._recorder.dump()
class Recorder():
def __init__(self):
self.uuid = frappe.generate_hash(length=10)
self.time = datetime.datetime.now()
self.calls = []
self.path = frappe.request.path
self.cmd = frappe.local.form_dict.cmd or ""
self.method = frappe.request.method
self.headers = dict(frappe.local.request.headers)
self.form_dict = frappe.local.form_dict
_patch()
def register(self, data):
self.calls.append(data)
def dump(self):
request_data = {
"uuid": self.uuid,
"path": self.path,
"cmd": self.cmd,
"time": self.time,
"queries": len(self.calls),
"time_queries": float("{:0.3f}".format(sum(call["duration"] for call in self.calls))),
"duration": float("{:0.3f}".format((datetime.datetime.now() - self.time).total_seconds() * 1000)),
"method": self.method,
}
frappe.cache().hset(RECORDER_REQUEST_SPARSE_HASH, self.uuid, request_data)
frappe.publish_realtime(event="recorder-dump-event", message=json.dumps(request_data, default=str))
self.mark_duplicates()
request_data["calls"] = self.calls
request_data["headers"] = self.headers
request_data["form_dict"] = self.form_dict
frappe.cache().hset(RECORDER_REQUEST_HASH, self.uuid, request_data)
def mark_duplicates(self):
counts = Counter([call["query"] for call in self.calls])
for index, call in enumerate(self.calls):
call["index"] = index
call["exact_copies"] = counts[call["query"]]
def _patch():
frappe.db._sql = frappe.db.sql
frappe.db.sql = sql
def do_not_record(function):
def wrapper(*args, **kwargs):
if hasattr(frappe.local, "_recorder"):
del frappe.local._recorder
frappe.db.sql = frappe.db._sql
return function(*args, **kwargs)
return wrapper
def administrator_only(function):
def wrapper(*args, **kwargs):
if frappe.session.user != "Administrator":
frappe.throw(_("Only Administrator is allowed to use Recorder"))
return function(*args, **kwargs)
return wrapper
@frappe.whitelist()
@do_not_record
@administrator_only
def status(*args, **kwargs):
return bool(frappe.cache().get_value(RECORDER_INTERCEPT_FLAG))
@frappe.whitelist()
@do_not_record
@administrator_only
def start(*args, **kwargs):
frappe.cache().set_value(RECORDER_INTERCEPT_FLAG, 1)
@frappe.whitelist()
@do_not_record
@administrator_only
def stop(*args, **kwargs):
frappe.cache().delete_value(RECORDER_INTERCEPT_FLAG)
@frappe.whitelist()
@do_not_record
@administrator_only
def get(uuid=None, *args, **kwargs):
if uuid:
result = frappe.cache().hget(RECORDER_REQUEST_HASH, uuid)
lexer = PythonLexer(tabsize=4)
for call in result["calls"]:
for stack in call["stack"]:
formatter = HtmlFormatter(noclasses=True, hl_lines=[stack["index"] + 1])
stack["context"] = highlight(stack["context"], lexer, formatter)
else:
result = list(frappe.cache().hgetall(RECORDER_REQUEST_SPARSE_HASH).values())
return result
@frappe.whitelist()
@do_not_record
@administrator_only
def delete(*args, **kwargs):
frappe.cache().delete_value(RECORDER_REQUEST_SPARSE_HASH)
frappe.cache().delete_value(RECORDER_REQUEST_HASH)
|
[
"[email protected]"
] | |
871e74c940da56c3387dffad57b313ca22cdc089
|
9d961bd6a590cc96db0c1f9c72d84e3a66636edf
|
/심심풀이땅콩/[백준]4673.py
|
76fb463f5fde7bf3313b00ca4769b70034e63f75
|
[] |
no_license
|
0equal2/Python_Programming
|
bae65338929e8e1a88247b8d23de805caa026702
|
2ac1d0262320220f49cbdb45e787e55e994d0b0f
|
refs/heads/master
| 2023-05-14T22:13:41.583214 | 2021-06-09T03:04:51 | 2021-06-09T03:04:51 | 304,628,012 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 195 |
py
|
###[백준]4673
memo=[0]*10001
for i in range(1,10001):
newnum=i+sum(list(map(int,list(str(i)))))
if newnum<=10000:
memo[newnum]=1
if memo[i]==0:
print(i)
|
[
"[email protected]"
] | |
4b99c672e34294a5f110b6531518b6d7056de15a
|
1be2cbc9fd62cf77cc05a64807acf7d857b84eee
|
/blackopt/config.py
|
a2114330cc418e849f46670ef206413bcc1d54b6
|
[] |
no_license
|
ikamensh/blackopt
|
4fdce2c0147b1a5a85024c9b59925d3d1a35b13f
|
a6ab24ce1be21a5ca9e26d0bb1f59bb50fd007a2
|
refs/heads/master
| 2023-01-23T12:55:42.087216 | 2020-12-05T19:18:30 | 2020-12-05T19:18:30 | 178,232,685 | 0 | 0 | null | 2020-10-18T20:57:29 | 2019-03-28T15:33:53 |
Python
|
UTF-8
|
Python
| false | false | 382 |
py
|
import os
default_workspace = "_blackopt_workspace"
_rootdir = default_workspace
def set_rootdir(path):
path = os.path.expanduser(path)
global _rootdir
_rootdir = path
def prepend_rootdir(prefix):
prefix = os.path.expanduser(prefix)
path = os.path.join(prefix, default_workspace)
global _rootdir
_rootdir = path
def get_rootdir():
return _rootdir
|
[
"[email protected]"
] | |
3cc86621a38c55c60af190e6064d74da255a9e2b
|
14d8adc86adc14c1d64a5550b1bbd5663e984545
|
/combination_sum_ii.py
|
97ed76da8e3052e379884a175fa1c814d6591641
|
[] |
no_license
|
milllu/leetcode
|
e1b68ef7774cc0c1b49325ec1b87280d27570d94
|
458b3e72cd82a203b10bdca747c4c3ba85708f75
|
refs/heads/master
| 2020-03-30T23:41:46.180308 | 2018-10-11T01:08:31 | 2018-10-11T01:08:31 | 151,709,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,375 |
py
|
"""
给定一个数组 candidates 和一个目标数 target ,找出 candidates 中所有可以使数字和为 target 的组合。
candidates 中的每个数字在每个组合中只能使用一次。
说明:
所有数字(包括目标数)都是正整数。
解集不能包含重复的组合。
示例 1:
输入: candidates = [10,1,2,7,6,1,5], target = 8,
所求解集为:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
示例 2:
输入: candidates = [2,5,2,1,2], target = 5,
所求解集为:
[
[1,2,2],
[5]
]
"""
class Solution(object):
def combinationSum2(self, nums, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
def dfs(dic, target, lst, suml):
if suml == target:
lst.sort()
if lst not in result:
result.append(lst)
return
if suml > target:
return
for key in dic:
if dic[key] > 0:
dic[key] -= 1
dfs(dic, target, lst+[key], suml+key)
dic[key] += 1
dic = {}
for num in nums:
dic[num] = dic.get(num, 0) + 1
result = []
dfs(dic, target, [], 0)
return result
|
[
"[email protected]"
] | |
aace7c3ac8ae4dfbaf9a425ce523bb342eaafc68
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_2204+034/sdB_pg_2204+034_coadd.py
|
1d84ed11150bebd234261af2a75e6b07e15f708b
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[331.818708,3.705497], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_2204+034/sdB_pg_2204+034_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_2204+034/sdB_pg_2204+034_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c0bdac944aed5cb00d3ab2541709a23fecbc22e3
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/application_gateway_rewrite_rule_set.py
|
bf4a4ba184e7533ad006b1884070f8b6fb8071ac
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 2,111 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayRewriteRuleSet(SubResource):
"""Rewrite rule set of an application gateway.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param rewrite_rules: Rewrite rules in the rewrite rule set.
:type rewrite_rules:
list[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayRewriteRule]
:ivar provisioning_state: Provisioning state of the rewrite rule set
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param name: Name of the rewrite rule set that is unique within an
Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rewrite_rules': {'key': 'properties.rewriteRules', 'type': '[ApplicationGatewayRewriteRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayRewriteRuleSet, self).__init__(**kwargs)
self.rewrite_rules = kwargs.get('rewrite_rules', None)
self.provisioning_state = None
self.name = kwargs.get('name', None)
self.etag = None
|
[
"[email protected]"
] | |
c61d44bcc8be1346b3b1a8bb2742d5847838cc8a
|
2b115f9c5929fedd06d4dd6969100ab2df484adf
|
/messenger/urls.py
|
9903187e19c8320adcd0f81ea319f70a102f44a3
|
[] |
no_license
|
sorXCode/Avina-API
|
0b790bfd8ac8b9c84d1db45db0819e0585d954b9
|
7687ba7434b77d6c33944c65fff0409459a9d5ce
|
refs/heads/master
| 2023-04-11T01:17:35.570752 | 2021-04-14T15:01:45 | 2021-04-14T15:01:45 | 342,429,666 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from .views import StartMessaging, Conversation
urlpatterns = [
path('', StartMessaging.as_view()),
path('<str:product_uid>', Conversation.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"[email protected]"
] | |
a21618f0ce0aa6432175d36b0042e7df8e21bb69
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/prime-big-291.py
|
dac9afb7dc0a8d107113bc4bc06e8af627553a69
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,479 |
py
|
# Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
$Definition
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
|
[
"[email protected]"
] | |
4808cbaedeec5b5afd0caf7172bca3b9c3bb2900
|
557ca4eae50206ecb8b19639cab249cb2d376f30
|
/Chapter04/spiral.py
|
b642ee9c1d01400018b8cff8264cad308b034929
|
[] |
no_license
|
philipdongfei/Think-python-2nd
|
781846f455155245e7e82900ea002f1cf490c43f
|
56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf
|
refs/heads/master
| 2021-01-09T19:57:49.658680 | 2020-03-13T06:32:11 | 2020-03-13T06:32:11 | 242,441,512 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
import turtle
def draw_spiral(t, n, length=3, a=0.1, b=0.0002):
theta = 0.0
for i in range(n):
t.fd(length)
dtheta = 1 / (a + b * theta)
t.lt(dtheta)
theta += dtheta
def main():
bob = turtle.Turtle()
draw_spiral(bob, n=1000)
turtle.mainloop()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
f94dc3e35df3d080642dc8f8fd2a3ffb9b4675a5
|
0d2c2ffe431b159a87bcd78c97147422dce8d778
|
/GUI学习/01PyQt5快速开发与实战/ch05高级界面控件/11timer2.py
|
c00045f390bd96d04ec0f63ccf8a09b77033800c
|
[] |
no_license
|
YuanXianguo/Python-Project-ITC
|
9e297fc1e1e8ec2b136e6e8b1db0afaaba81c16c
|
afd14cbe501147ec66b4aa0c1c7907b3ae41d148
|
refs/heads/master
| 2020-04-16T13:54:33.727825 | 2019-12-20T02:16:52 | 2019-12-20T02:16:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
import sys
from PyQt5.QtWidgets import QApplication, QLabel
from PyQt5.QtCore import Qt, QTimer
def test():
print(1)
if __name__ == '__main__':
app = QApplication(sys.argv)
label = QLabel('<font color=red size=128><b>'
'Hello PyQt,窗口会在3秒后消失!</b></font>')
# 无边框窗口
label.setWindowFlags(Qt.SplashScreen | Qt.FramelessWindowHint)
label.show()
# 设置10秒后自动退出
QTimer.singleShot(3000, test)
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
a11c216ccd83de27c2498fc31e7adcb24de5c462
|
69f83bcff8a2bd9c8ef082a2141a39a5322c4b2a
|
/pyenv/env/lib/python2.7/site-packages/transport/tester.py
|
a0c92e0c385d219b834498f737ba0f7ed0dcd5a7
|
[] |
no_license
|
md848-cornell/NRF-ROKIX-sensor-mesh
|
ab12f6572a992ed5c468eb08b8c4586b52b411b2
|
b244207af0fb0fce6e2722c384d3c6c25d5ac025
|
refs/heads/master
| 2020-05-21T10:56:15.013174 | 2019-05-16T16:12:11 | 2019-05-16T16:12:11 | 186,021,295 | 0 | 1 | null | 2020-03-07T21:39:41 | 2019-05-10T16:35:25 |
C
|
UTF-8
|
Python
| false | false | 679 |
py
|
"""
Copyright (c) 2017 Nordic Semiconductor ASA
CoAP transport class for tests.
"""
from transport.base import TransportBase
from ipaddress import ip_address
class TesterTransport(TransportBase):
def __init__(self, port=None):
TransportBase.__init__(self, port)
self.tester_opened = False
self.tester_data = None
self.tester_remote = None
self.output_count = 0
def open(self):
self.tester_opened = True
def close(self):
self.tester_opened = False
def send(self, data, dest):
self.tester_data = data
self.tester_remote = dest
self.output_count += 1
|
[
"Mike DiDomenico"
] |
Mike DiDomenico
|
6f08a86ea414a778c093cdd193e66adf1fa27fb9
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/kuchevn.py
|
a9cc915cd7f15c2358aed743c2373312c26e7f93
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='page_title']/h1",
'price' : "//meta[@property='og:price:amount']/@content",
'category' : "",
'description' : "//div[@class='tab-container']/div[@class='pd_description_content tab-content clearfix ui-tabs-panel ui-widget-content ui-corner-bottom']",
'images' : "//meta[@property='og:image'][1]/@content",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'kuche.vn'
allowed_domains = ['kuche.vn']
start_urls = ['http://kuche.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+($|\?page=\d+$)']), 'parse_item_and_links'),
]
|
[
"[email protected]"
] | |
c0fc94a656f1cee1a7c8ee20e88f8085721c9112
|
c67f449dc7187f154df7093a95ddcc14a3f0a18f
|
/youngseokcoin/test/functional/net.py
|
a4a15da130025b87c9678b54942f92c910989ea7
|
[
"MIT"
] |
permissive
|
youngseokaaa-presentation/A_system_to_ensure_the_integrity_of_Internet_of_things_by_using_Blockchain
|
cee9ba19e9d029759fc2fe4a43235c56fd9abe43
|
b2a47bc63386b5a115fc3ce62997034ebd8d4a1e
|
refs/heads/master
| 2023-02-17T07:58:43.043470 | 2021-01-11T05:40:28 | 2021-01-11T05:40:28 | 295,317,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,225 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Youngseokcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
)
class NetTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# check that getnettotals totalbytesrecv and totalbytessent
# are consistent with getpeerinfo
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 2)
net_totals = self.nodes[0].getnettotals()
assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),
net_totals['totalbytesrecv'])
assert_equal(sum([peer['bytessent'] for peer in peer_info]),
net_totals['totalbytessent'])
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
time.sleep(0.1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
net_totals_after_ping = self.nodes[0].getnettotals()
for before, after in zip(peer_info, peer_info_after_ping):
assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])
assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])
assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])
assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
assert_raises_rpc_error(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
|
[
"[email protected]"
] | |
84c6051cd1c083c73006b2058485e017d4f6a001
|
4d259f441632f5c45b94e8d816fc31a4f022af3c
|
/eventlet/prod_worker.py
|
51d9d239ff441f414a29933caf1e28379ec9f8d3
|
[] |
no_license
|
xiaoruiguo/lab
|
c37224fd4eb604aa2b39fe18ba64e93b7159a1eb
|
ec99f51b498244c414b025d7dae91fdad2f8ef46
|
refs/heads/master
| 2020-05-25T01:37:42.070770 | 2016-05-16T23:24:26 | 2016-05-16T23:24:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
from eventlet.queue import Queue
import eventlet
q = Queue()
def worker():
while True:
q.get()
a = 0
for i in xrange(1000000):
a = a + 1
print 'get'
def producer():
while True:
a = 0
for i in xrange(1000000):
a = a + 1
q.put('lol')
print 'put'
eventlet.spawn(worker)
eventlet.spawn(producer)
eventlet.sleep(30)
|
[
"[email protected]"
] | |
29ff7530a12d24ef2ff8f6e0744b6cf91faba8cf
|
a5b09349bb10685621788f815acfcef23e93b540
|
/tests/test_set_item.py
|
6e7fa7390740d64e413f0be77568016de3a82fe9
|
[] |
no_license
|
yishuihanhan/slimurl
|
05d95edf3b83a118bc22a4fda4f0e8ca9d4662f7
|
d6ee69b0c825dcc4129bb265bd97e61218b73ccc
|
refs/heads/master
| 2020-04-02T08:34:55.228207 | 2017-01-10T10:09:50 | 2017-01-10T10:09:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
#!/usr/bin/env python
# encoding: utf-8
from slimurl import URL
def check_set(url, args, result):
key, value = args
url[key] = value
assert url == result
def test_set():
cases = [
["http://example.net/", ('foo', 'bar'), "http://example.net/?foo=bar"],
["http://example.net/", ('foo', (0, 1)), "http://example.net/?foo=0&foo=1"],
["http://example.net/", ('foo', ("0", "1")), "http://example.net/?foo=0&foo=1"],
["http://example.net/", ('foo', (0, "1")), "http://example.net/?foo=0&foo=1"],
]
for url, args, result in cases:
yield check_set, URL(url), args, URL(result)
|
[
"[email protected]"
] | |
75dbe56cf58aa518de51464a64dfaa8d7f95feea
|
9e929843f73b099456bab9df1b08971288e3b839
|
/tests/integration_tests/vectors_tests/test_lower_than_or_equals.py
|
bedb21d4a788496d3c2bbb9138f82d33ab29733b
|
[
"MIT"
] |
permissive
|
lycantropos/cppstd
|
fd20a37c46bd730d15b6e5c34e795f39907fad75
|
2a44dad540a8cc36e7fac8805cf2f5402be34aee
|
refs/heads/master
| 2023-01-11T01:13:25.821513 | 2020-11-12T23:19:40 | 2020-11-12T23:19:40 | 302,339,499 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
from hypothesis import given
from tests.utils import (BoundPortedVectorsPair,
equivalence)
from . import strategies
@given(strategies.vectors_pairs, strategies.vectors_pairs)
def test_basic(first_pair: BoundPortedVectorsPair,
second_pair: BoundPortedVectorsPair) -> None:
first_bound, first_ported = first_pair
second_bound, second_ported = second_pair
assert equivalence(first_bound <= second_bound,
first_ported <= second_ported)
|
[
"[email protected]"
] | |
1c1917ab1339c7cbb623080afbb9a8125b03933c
|
7c25e479b21b1e3e69a6be140f6511a892901182
|
/python_practice/middle_linked_list.py
|
5379e1fe8bdae8d8f5d08bb398c0fd1504ec458c
|
[] |
no_license
|
ahrav/Python-Django
|
6be3e3b5a39a6eabcf2b97b071232f8b85de64d3
|
8a2a3f706aab557b872f27e780bd7e4ebd274f72
|
refs/heads/master
| 2022-09-09T01:29:31.391833 | 2019-05-23T03:34:15 | 2019-05-23T03:34:15 | 181,137,783 | 0 | 0 | null | 2022-08-23T00:22:08 | 2019-04-13T07:40:44 |
Python
|
UTF-8
|
Python
| false | false | 670 |
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printMiddle(self):
slow_ptr = self.head
fast_ptr = self.head
if self.head is not None:
while (fast_ptr is not None and fast_ptr.next is not None):
fast_ptr = fast_ptr.next.next
slow_ptr = slow_ptr.next
return slow_ptr.data
|
[
"[email protected]"
] | |
4b30d61e07bfa3a4839fcb6fe9d0d2e52479a80d
|
401f783a202949adbf144b5780bcd87a6daf2299
|
/code/python/Day-69/SnakeGame.py
|
c61b7e9c28275ea314027b26a33f30786ac67215
|
[] |
no_license
|
TalatWaheed/100-days-code
|
1934c8113e6e7be86ca86ea66c518d2f2cedf82a
|
b8fd92d4ddb6adc4089d38ac7ccd2184f9c47919
|
refs/heads/master
| 2021-07-04T14:28:45.363798 | 2019-03-05T13:49:55 | 2019-03-05T13:49:55 | 140,101,486 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,380 |
py
|
# SNAKES GAME
# Use ARROW KEYS to play, SPACE BAR for pausing/resuming and Esc Key for exiting
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
from random import randint
curses.initscr()
win = curses.newwin(20, 60, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
key = KEY_RIGHT
score = 0
snake = [[4,10], [4,9], [4,8]]
food = [10,20]
win.addch(food[0], food[1], '*')
while key != 27:
win.border(0)
win.addstr(0, 2, 'Score : ' + str(score) + ' ')
win.addstr(0, 27, ' SNAKE ')
win.timeout(150 - (len(snake)/5 + len(snake)/10)%120)
prevKey = key
event = win.getch()
key = key if event == -1 else event
if key == ord(' '):
key = -1
while key != ord(' '):
key = win.getch()
key = prevKey
continue
if key not in [KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN, 27]:
key = prevKey
snake.insert(0, [snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1), snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1)])
if snake[0][0] == 0: snake[0][0] = 18
if snake[0][1] == 0: snake[0][1] = 58
if snake[0][0] == 19: snake[0][0] = 1
if snake[0][1] == 59: snake[0][1] = 1
# Exit if snake crosses the boundaries (Uncomment to enable)
#if snake[0][0] == 0 or snake[0][0] == 19 or snake[0][1] == 0 or snake[0][1] == 59: break
if snake[0] in snake[1:]: break
if snake[0] == food:
food = []
score += 1
while food == []:
food = [randint(1, 18), randint(1, 58)]
if food in snake: food = []
win.addch(food[0], food[1], '*')
else:
last = snake.pop()
win.addch(last[0], last[1], ' ')
win.addch(snake[0][0], snake[0][1], '#')
curses.endwin()
print("\nScore - " + str(score))
print("http://bitemelater.in\n")
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.