blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d51f24f75bb3b6b21fb1210c3409e1c3063acde
|
f7778bf3b8173915c97193f51ff8a1ac2260a68a
|
/Section 3 code files/Code/webdirbuster.py
|
8645d7b32c1044ce087596ec1ac46444c4785168
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-for-Automating-Information-Security
|
35f5ab480c430788e881017ec8c919be1524cc30
|
d6d1eaa053c3a5f5b103e17fefe8b4d9b33c0858
|
refs/heads/master
| 2023-05-25T12:34:43.912975 | 2023-01-30T09:16:51 | 2023-01-30T09:16:51 | 245,961,846 | 26 | 24 |
MIT
| 2023-05-22T22:44:20 | 2020-03-09T06:39:43 |
Python
|
UTF-8
|
Python
| false | false | 3,798 |
py
|
import argparse
import json
import re
import requests
from typing import List
from urllib3.exceptions import InsecureRequestWarning
quiet = False
def print_message(message: str):
"""
Print message to STDOUT if the quiet option is set to False (this is the default).
:param message: message to print
:return: None
"""
global quiet
if not quiet:
print(message)
def enumerate(base_url: str, dirs_file: str, recurse=False) -> List:
"""
Enumerate valid directories reachable via HTTP/HTTPS.
:param base_url: base URL to search
:param dirs_file: file containing names of commonly hosted directories
:param recurse: whether or not to recursively enumerate discovered directories
:return: list containing valid, reachable URLs
"""
# suppress insecure HTTPS warning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
valid_urls = []
with open(dirs_file, 'r') as f:
while True:
tmp_dir = f.readline()
if not tmp_dir:
break
tmp_dir = tmp_dir.strip()
if tmp_dir == '':
test_url = base_url
else:
if re.search(r'/$', base_url):
test_url = '{}{}'.format(base_url, tmp_dir)
else:
test_url = '{}/{}'.format(base_url, tmp_dir)
print_message('Checking {}'.format(test_url))
result = requests.get('{}'.format(test_url), verify=False)
if result.status_code == 200:
url = result.url
print_message('Found URL: {}'.format(url))
valid_urls.append(url)
if recurse and tmp_dir != '':
recurse_results = enumerate(url, dirs_file, recurse)
valid_urls.extend(recurse_results)
return valid_urls
def main():
"""
Main logic.
:return: None
"""
global quiet
parser = argparse.ArgumentParser(description='A smart-ish web directory enumeration tool.')
parser.add_argument('url', help='Base URL to search (must start with http:// or https://)')
parser.add_argument('dirs_file', help='File containing directory names to enumerate')
parser.add_argument('-r', '--recurse', help='Recursively enumerate subdirectories of discovered directories',
action='store_true')
parser.add_argument('-o', '--output', help='Output file to write to')
parser.add_argument('-f', '--format', help='Output format (default is json)', default='json',
choices=['json', 'plain'])
parser.add_argument('-q', '--quiet', help='Do not print informative messages', action='store_true')
args = parser.parse_args()
base_url = args.url
if not re.search(r'^https?://', base_url):
print('Error, url parameter must begin with either http:// or https://')
return
dirs_file = args.dirs_file
recurse = args.recurse
output = args.output
output_format = args.format
quiet = args.quiet
print_message('Enumerating web directories.')
valid_urls = list(set(enumerate(base_url, dirs_file, recurse)))
# print results
if output:
print_message('Writing output to {}.'.format(output))
with open(output, 'w') as of:
if output_format == 'json':
json.dump(valid_urls, of, indent=2)
else:
for line in valid_urls:
of.write('{}\n'.format(line))
else:
print_message('Writing output to STDOUT.')
if output_format == 'json':
print(json.dumps(valid_urls, indent=2))
else:
for line in valid_urls:
print(line)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
34fc5d7be9fdfc130eb473c15b4b7bdb80a10ee2
|
463c053bcf3f4a7337b634890720ea9467f14c87
|
/python/ray/workflow/tests/test_lifetime.py
|
ece91c8445d32ee09fe290f16ece2e35641d73c0
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
pdames/ray
|
e8faddc4440976211a6bcead8f8b6e62c1dcda01
|
918d3601c6519d333f10910dc75eb549cbb82afa
|
refs/heads/master
| 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 |
Apache-2.0
| 2023-01-14T08:02:21 | 2020-03-06T20:59:04 |
Python
|
UTF-8
|
Python
| false | false | 1,596 |
py
|
import os
import ray
import time
import pytest
from ray._private.test_utils import (
run_string_as_driver_nonblocking,
run_string_as_driver,
)
from ray.tests.conftest import * # noqa
from ray import workflow
from unittest.mock import patch
driver_script = """
import time
import ray
from ray import workflow
@ray.remote
def foo(x):
time.sleep(1)
if x < 20:
return workflow.continuation(foo.bind(x + 1))
else:
return 20
if __name__ == "__main__":
ray.init(storage="{}")
output = workflow.create(foo.bind(0)).run_async(workflow_id="driver_terminated")
time.sleep({})
"""
def test_workflow_lifetime_1(workflow_start_cluster):
# Case 1: driver exits normally
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init(storage=storage_uri)
run_string_as_driver(driver_script.format(storage_uri, 5))
output = workflow.get_output("driver_terminated")
assert ray.get(output) == 20
def test_workflow_lifetime_2(workflow_start_cluster):
# Case 2: driver terminated
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init(storage=storage_uri)
proc = run_string_as_driver_nonblocking(driver_script.format(storage_uri, 100))
time.sleep(10)
proc.kill()
time.sleep(1)
output = workflow.get_output("driver_terminated")
assert ray.get(output) == 20
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"[email protected]"
] | |
56da124f05b01e70233a87435baf0156aca9e476
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/own_year/new_thing.py
|
49f9081472a90473e35618be695869cba50090c3
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
#! /usr/bin/env python
def way_or_great_work(str_arg):
same_way_or_group(str_arg)
print('year')
def same_way_or_group(str_arg):
print(str_arg)
if __name__ == '__main__':
way_or_great_work('few_point_or_different_thing')
|
[
"[email protected]"
] | |
504420d710edfc66b1f1a47c0510b3db3d98bd57
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/remove-invalid-parentheses.py
|
dcc6a2c8e851f973a63f3c1085274a85e39313e6
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 |
C++
|
UTF-8
|
Python
| false | false | 2,367 |
py
|
# Time: O(C(n, c)), try out all possible substrings with the minimum c deletion.
# Space: O(c), the depth is at most c, and it costs n at each depth
class Solution(object):
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: List[str]
"""
# Calculate the minimum left and right parantheses to remove
def findMinRemove(s):
left_removed, right_removed = 0, 0
for c in s:
if c == '(':
left_removed += 1
elif c == ')':
if not left_removed:
right_removed += 1
else:
left_removed -= 1
return (left_removed, right_removed)
# Check whether s is valid or not.
def isValid(s):
sum = 0
for c in s:
if c == '(':
sum += 1
elif c == ')':
sum -= 1
if sum < 0:
return False
return sum == 0
def removeInvalidParenthesesHelper(start, left_removed, right_removed):
if left_removed == 0 and right_removed == 0:
tmp = ""
for i, c in enumerate(s):
if i not in removed:
tmp += c
if isValid(tmp):
res.append(tmp)
return
for i in xrange(start, len(s)):
if right_removed == 0 and left_removed > 0 and s[i] == '(':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed - 1, right_removed)
del removed[i]
elif right_removed > 0 and s[i] == ')':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed, right_removed - 1)
del removed[i]
res, removed = [], {}
(left_removed, right_removed) = findMinRemove(s)
removeInvalidParenthesesHelper(0, left_removed, right_removed)
return res
|
[
"[email protected]"
] | |
2a0c7b9841901436c823d4d5e7c6ff16f4b4e7cc
|
38f765bc213d2c90e46f22922a7425cba28e6f00
|
/fetchr/packages/amplify.py
|
6632a0726d9f8c686a9b5f19a4ddb18a79ad98d3
|
[] |
no_license
|
yejianye/fetchr
|
0d0572dc87beba93c7de3ece625682a4d753626e
|
90f8922617d35fcf24d902f21af398009d80ded4
|
refs/heads/master
| 2021-01-10T02:48:41.737322 | 2013-01-19T09:10:12 | 2013-01-19T09:10:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
from fetchr.packages.base import package, SimplePackage
@package
class Amplify(SimplePackage):
"""A set of tools solve problems of request, store, pub/sub"""
version = '1.1.0'
@property
def cdn_urls(self):
return ['//cdnjs.cloudflare.com/ajax/libs/amplifyjs/$version/amplify.min.js']
|
[
"[email protected]"
] | |
139b52a654e7e288a4b0a4ebaec109926cb274a6
|
2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f
|
/aws.cloudwatch.EventPermission.organization-access-python/__main__.py
|
96a5858ff755253665d97a47249d9fe835f082f2
|
[] |
no_license
|
ehubbard/templates-aws
|
e323b693a18234defe6bd56ffcc64095dc58e3a1
|
2ae2e7a5d05490078017fed6d132dcdde1f21c63
|
refs/heads/master
| 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
import pulumi
import pulumi_aws as aws
organization_access = aws.cloudwatch.EventPermission("organizationAccess",
condition={
"key": "aws:PrincipalOrgID",
"type": "StringEquals",
"value": aws_organizations_organization["example"]["id"],
},
principal="*",
statement_id="OrganizationAccess")
|
[
"[email protected]"
] | |
9c2d9a898db1b9765259f287859f7910b04c5de5
|
c2092dbf89e74e1484f0468d21badfda2eafe19d
|
/backend/users/migrations/0002_auto_20201113_0020.py
|
2383bc55cf5cafb959552769c381e49d82d28c70
|
[] |
no_license
|
crowdbotics-apps/jd-searcher-22572
|
1a47422d2c4f393562fc4389422b0906fed594b8
|
a194536595837400bf0e1fe0c1be5bbd262ff6b2
|
refs/heads/master
| 2023-01-06T21:14:55.102493 | 2020-11-13T00:20:57 | 2020-11-13T00:20:57 | 312,428,665 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,275 |
py
|
# Generated by Django 2.2.17 on 2020-11-13 00:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"[email protected]"
] | |
1a95d366947058c89f9419baffce0086c13280a6
|
36978086cf5f34e16ceac7c2649b49ccb4c5ac90
|
/config/munin/mongodb_replset_lag
|
0c2f3ed4bdbabde170d68abc6b6e9b74d14b19de
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
aragilar/NewsBlur
|
04e754093cd52bc2d9957ea767747d6d604dfbba
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
refs/heads/master
| 2021-08-28T17:39:50.734396 | 2013-06-06T01:52:20 | 2013-06-06T01:52:37 | 10,520,281 | 0 | 0 |
MIT
| 2021-08-13T05:35:33 | 2013-06-06T06:26:24 |
Objective-C
|
UTF-8
|
Python
| false | false | 1,790 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from munin.mongodb import MuninMongoDBPlugin
PRIMARY_STATE = 1
SECONDARY_STATE = 2
class MongoReplicaSetLag(MuninMongoDBPlugin):
vlabel = "seconds"
title = "MongoDB Replica Set Lag"
fields = [("optimeLag", {'label': "Oldest secondary lag"}), ("oplogLength", {"label": "Primary oplog length" })]
def _get_oplog_length(self):
oplog = self.connection['local'].oplog.rs
last_op = oplog.find({}, {'ts': 1}).sort([('$natural', -1)]).limit(1)[0]['ts'].time
first_op = oplog.find({}, {'ts': 1}).sort([('$natural', 1)]).limit(1)[0]['ts'].time
oplog_length = last_op - first_op
return oplog_length
def _get_max_replication_lag(self):
status = self.connection.admin.command('replSetGetStatus')
members = status['members']
primary_optime = None
oldest_secondary_optime = None
for member in members:
member_state = member['state']
optime = member['optime']
if member_state == PRIMARY_STATE:
primary_optime = optime.time
elif member_state == SECONDARY_STATE:
if not oldest_secondary_optime or optime.time < oldest_secondary_optime.time:
oldest_secondary_optime = optime.time
if not primary_optime or not oldest_secondary_optime:
raise Exception("Replica set is not healthy")
return primary_optime - oldest_secondary_optime
def execute(self):
oplog_length = self._get_oplog_length()
replication_lag = self._get_max_replication_lag()
return {
"optimeLag": replication_lag,
"oplogLength": oplog_length
}
if __name__ == "__main__":
MongoReplicaSetLag().run()
|
[
"[email protected]"
] | ||
4680394e14442b9e016dc3834172a4f40eede73b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/239/32114/submittedfiles/swamee.py
|
aac7bfcabaa9fa811d2304d01539530e593c0d46
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input("Digite aqui o valor de f: "))
l = float(input("Digite aqui o valor de l: "))
q = float(input("Digite aqui o valor de q: "))
DH = float(input("Digite aqui o valor de Delta H: "))
v = float(input("Digite aqui o valor de v: "))
g = 9.81
e = 0.000002
pi = 3.14
D = sqrt(8*f*l*q**q)/(pi*pi*g*DH)
print("D=%.4f"%D)
Rey = (4*q)/(pi*D*v)
print("Rey=%.4f"%Rey)
k = 0.25/(math.log10((e)/(3.7*D))+(5.74)/((Rey**0.9))**2)
print("k=%.4f"%k)
|
[
"[email protected]"
] | |
dbf52a834f34fa4f3c3318bcab831ea4e23f15a0
|
2748d523c4ced916b61e8f2a0ebd6c7237705f69
|
/core/forms.py
|
8d56e99d1aec9c278839b44ba66ef4cdee9daa37
|
[] |
no_license
|
Titowisk/meubaz
|
52d5101bc107081c7175f27bb538efc6fecf5b24
|
1af586195123ffd13818695cff8cc286018a1c7b
|
refs/heads/master
| 2021-08-23T16:47:54.960522 | 2017-12-05T19:02:09 | 2017-12-05T19:02:09 | 106,558,018 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,337 |
py
|
from django import forms
from django.conf import settings
from django.core.mail import send_mail
class ContactForm(forms.Form):
name = forms.CharField(label="Nome", max_length=100)
email = forms.EmailField(label="E-mail")
message = forms.CharField(label="Mensagem", widget=forms.Textarea)
def send_mail(self):
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
message = "Nome: {0}\nEmail: {1}\nMensagem: {2}".format(name, email, message)
send_mail(
subject='Contato do MeuBaz',
message=message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[settings.DEFAULT_FROM_EMAIL]
)
"""
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['message'].widget.attrs['class'] = 'form-control'
self.fields['message'].widget.attrs['rows'] = '4'
"""
# self.fields['name'] acessa o campo
#<input type="text" name="name" maxlength="100" required="" id="id_name">
# .widget.attrs[''] = '' pode criar argumentos dentro do campo
|
[
"[email protected]"
] | |
ed3962679f3569de0efc57197373f7139220afbe
|
be0edc20433a6ad3bf4b8f448f1c457437de4c52
|
/huxley/core/admin/delegate.py
|
6f7e07e1c80d5f269090bfe38f1d8dd13775523a
|
[
"BSD-3-Clause"
] |
permissive
|
ethanlee16/huxley
|
eca8c3c1d4ea543a5875c28d4cb5c81dc4e4eddb
|
5d601e952c711e9b6703170c78fb23fcc2734ead
|
refs/heads/master
| 2021-01-15T09:20:25.310737 | 2014-12-03T14:51:33 | 2014-12-03T14:51:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,306 |
py
|
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import csv
from django.conf.urls import patterns, url
from django.contrib import admin
from django.http import HttpResponse
from huxley.core.models import Delegate
class DelegateAdmin(admin.ModelAdmin):
def roster(self, request):
'''Return a CSV file representing the entire roster of registered
delegates, including their committee, country, and school.'''
roster = HttpResponse(content_type='text/csv')
roster['Content-Disposition'] = 'attachment; filename="roster.csv"'
writer = csv.writer(roster)
ordering = 'assignment__school__name'
for delegate in Delegate.objects.all().order_by(ordering):
writer.writerow([
delegate,
delegate.committee,
delegate.country,
delegate.school
])
return roster
def get_urls(self):
urls = super(DelegateAdmin, self).get_urls()
urls += patterns('',
url(
r'roster',
self.admin_site.admin_view(self.roster),
name='core_delegate_roster',
),
)
return urls
|
[
"[email protected]"
] | |
769c233947bb21e73d616adc9283780a1161b902
|
43277f3962edfd5f16d116a3ed35cc08000a0707
|
/modular/badger_utils/sacred/experiment_config_diff.py
|
5d352aa5b1beb2c014775fb68bf4df78bac4dffd
|
[] |
no_license
|
GoodAI/badger-2020
|
0cbeb60bf5b5fa2959504b1ba4489d5725646474
|
bb3822dbcbb04ed9c153c4deffa25a81011c8ce5
|
refs/heads/master
| 2021-07-15T12:49:44.227988 | 2021-03-02T19:06:06 | 2021-03-02T19:06:06 | 243,016,754 | 7 | 1 | null | 2020-08-10T13:13:51 | 2020-02-25T14:15:24 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,324 |
py
|
from typing import Dict, Any, List, Tuple
import pandas as pd
from badger_utils.view.config_utils import tuple_to_dict
class ExperimentConfigDiff:
_diff: Dict[List[Tuple[str, Any]], List[int]]
_common: Dict[str, Any]
def __init__(self, common: Dict[str, Any], diff: Dict[List[Tuple[str, Any]], List[int]]):
"""
Args:
common: dict of config vars, e.g. {'size': 10, 'epochs': 1000}
diff: dict with keys being list of tuples of ('name', 'value') of config and list of run_ids as value,
e.g. {[('n_experts', 4), ('n_inputs', 3)]: [23, 24], [('n_experts', 4), ('n_inputs', 2)]: [25]}
"""
self._common = common
self._diff = diff
def diff_as_df(self, explode_by_run_id: bool = False) -> pd.DataFrame:
"""
Returns:
DataFrame with columns named by config keys
plus one column "run_ids" where are stored comma separated run_ids
"""
df = pd.DataFrame([{**tuple_to_dict(r), **{'run_ids': v}} for r, v in self._diff.items()])
if explode_by_run_id:
df = df.explode('run_ids').astype({'run_ids': int}).set_index('run_ids')
df.index.name = None
return df
def diff_as_lines(self) -> List[str]:
"""
Returns:
List of one_line string representation for diff. Usable e.g. for a plot legend.
"""
return ExperimentConfigDiff.df_as_lines(self.diff_as_df())
def common_as_text(self, line_delimiter: str = '\n') -> str:
return line_delimiter.join([f'{k}: {v}' for k, v in self._common.items()])
def diff_filtered_run_ids(self, filter_dict: Dict[str, Any]) -> List[int]:
"""
Return list of run_ids for runs that match filter_dict. Only runs matching all filter conditions are selected.
Args:
filter_dict: Dict config_item -> expected_value. E.g. {'n_experts': 4, 'rollout_size': 8}
Returns:
List of run_ids
"""
filtered = self.filter_df(self.diff_as_df(), filter_dict)
return self.flatten(filtered['run_ids'])
@staticmethod
def filter_df(df: pd.DataFrame, filter_dict: Dict[str, Any]) -> pd.DataFrame:
for k, v in filter_dict.items():
df = df.loc[df[k] == v]
return df
@staticmethod
def flatten(l):
return [item for sublist in l for item in sublist]
@staticmethod
def df_as_lines(df: pd.DataFrame) -> List[str]:
"""
Convert DataFrame to list of strings representation
Args:
df: DataFrame to be converted
Returns:
List of one_line string representation for DataFrame. Usable e.g. for a plot legend.
"""
def format_config(r):
return ', '.join([f'{c}: {v}' for c, v in zip(r._fields, r)])
return [format_config(r) for r in df.itertuples(index=False, name='Row')]
@staticmethod
def df_as_description_runids_dict(df: pd.DataFrame) -> Dict[str, List[int]]:
result = {}
for idx, row in df.iterrows():
columns_values = [f'{name}: {row[name]}' for name in row.index if name != 'run_ids']
description = ', '.join(columns_values)
result[description] = row['run_ids']
return result
|
[
"[email protected]"
] | |
66335d806ccf0a4f3148e4dabc2ca3baa18b55b8
|
e1fada3a9846a5593e3d3d2fdc32b23b832e38b4
|
/tests/unit/cli/tools/test_eval.py
|
76eee501f89daf8095a5032255767270ab304ab5
|
[
"Apache-2.0"
] |
permissive
|
GalyaZalesskaya/openvino_training_extensions
|
fd1ebb189900008b16b85568449e5c62d8edbad5
|
6116639caeff100b06a6c10a96c7e7f5951f20c7
|
refs/heads/develop
| 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 |
Apache-2.0
| 2019-10-28T16:16:27 | 2019-08-15T15:41:59 |
Python
|
UTF-8
|
Python
| false | false | 4,079 |
py
|
import argparse
import pytest
from otx.cli.tools import eval as target_package
from otx.cli.tools.eval import get_args, main
from tests.test_suite.e2e_test_system import e2e_pytest_unit
@e2e_pytest_unit
def test_get_args(mocker):
mock_options = {
"--test-data-roots": "test/data/root",
"--load-weights": "weight/path",
"--save-performance": "save/path",
"--work-dir": "work/dir/path",
}
mock_command = ["otx"]
for key, value in mock_options.items():
mock_command.extend([key, value])
mocker.patch("sys.argv", mock_command)
mocker.patch.object(
target_package, "get_parser_and_hprams_data", return_value=[argparse.ArgumentParser(), {"param": "test"}, []]
)
mocker.patch.object(target_package, "add_hyper_parameters_sub_parser", return_value=argparse.ArgumentParser())
parsed_args, _ = get_args()
assert parsed_args.test_data_roots == "test/data/root"
assert parsed_args.load_weights == "weight/path"
assert parsed_args.save_performance == "save/path"
assert parsed_args.work_dir == "work/dir/path"
@pytest.fixture
def mock_args(mocker, tmp_path):
mock_args = mocker.MagicMock()
mock_args.test_data_roots = "fake_test_data_root"
mock_args.load_weights = "fake_load_weights.xml"
mock_args.save_performance = tmp_path / "save/performance.json"
mock_args.work_dir = tmp_path / "work_dir"
def mock_contains(self, val):
return val in self.__dict__
mock_args.__contains__ = mock_contains
mock_get_args = mocker.patch("otx.cli.tools.eval.get_args")
mock_get_args.return_value = [mock_args, []]
return mock_args
@pytest.fixture
def mock_config_manager(mocker):
mock_config_manager = mocker.patch.object(target_package, "ConfigManager")
mock_template = mocker.MagicMock()
mock_template.name = "fake_name"
mock_config_manager.return_value.template = mock_template
mock_config_manager.return_value.check_workspace.return_value = True
mock_config_manager.return_value.get_dataset_config.return_value = {}
mock_config_manager.return_value.get_hyparams_config.return_value = {}
return mock_config_manager
@pytest.fixture
def mock_dataset_adapter(mocker):
mock_dataset_adapter = mocker.patch("otx.cli.tools.eval.get_dataset_adapter")
mock_dataset = mocker.MagicMock()
mock_label_schema = mocker.MagicMock()
mock_dataset_adapter.return_value.get_otx_dataset.return_value = mock_dataset
mock_dataset_adapter.return_value.get_label_schema.return_value = mock_label_schema
return mock_dataset_adapter
@pytest.fixture
def mock_task(mocker):
mock_task_class = mocker.MagicMock()
mock_task = mocker.MagicMock()
mock_task_class.return_value = mock_task
mocker.patch.object(target_package, "get_impl_class", return_value=mock_task_class)
return mock_task
@e2e_pytest_unit
def test_main(
mocker,
mock_args,
mock_config_manager,
mock_dataset_adapter,
):
mocker.patch.object(
target_package,
"read_model",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"get_impl_class",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"get_dataset_adapter",
return_value=mock_dataset_adapter,
)
mocker.patch.object(
target_package,
"ResultSetEntity",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"InferenceParameters",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"Subset",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"TaskEnvironment",
return_value=mocker.MagicMock(),
)
mocker.patch("json.dump")
mocker.patch("builtins.open")
mock_get_args = mocker.patch("otx.cli.tools.eval.get_args")
mock_get_args.return_value = [mock_args, []]
ret = main()
assert ret["retcode"] == 0
|
[
"[email protected]"
] | |
cfe29c23297e0b8167a1f1a3e388e74ad9a83c5c
|
c8cd3dbcb783b6daad866be07be950bbc4cd9fe9
|
/boards/models.py
|
95ad56c05b421ac32e2b4d6d8490dcb0569a8431
|
[] |
no_license
|
pauloendoh/django-boards
|
d6b42b829dee0c96c4bda676da5e2ac1977f0922
|
640c0672b049d190213f5bf318f390b40e697262
|
refs/heads/master
| 2020-03-13T03:43:19.604777 | 2018-04-26T02:22:58 | 2018-04-26T02:22:58 | 130,949,060 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,223 |
py
|
from markdown import markdown
from django.db import models
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
import math
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_posts_count(self):
return Post.objects.filter(topic__board=self).count()
def get_last_post(self):
return Post.objects.filter(topic__board=self).order_by('-created_at').first()
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, related_name='topics', on_delete=models.CASCADE)
starter = models.ForeignKey(User, related_name='topics', on_delete=models.CASCADE)
views = models.PositiveIntegerField(default=0) # <- here
def __str__(self):
return self.subject
def get_page_count(self):
count = self.posts.count()
pages = count / 20
return math.ceil(pages)
def has_many_pages(self, count=None):
if count is None:
count = self.get_page_count()
return count > 6
def get_page_range(self):
count = self.get_page_count()
if self.has_many_pages(count):
return range(1, 5)
return range(1, count + 1)
def get_last_ten_posts(self):
return self.posts.order_by('-created_at')[:10]
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, related_name='posts', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name='posts', on_delete=models.CASCADE)
updated_by = models.ForeignKey(User, null=True, related_name='+', on_delete=models.CASCADE)
def __str__(self):
truncated_message = Truncator(self.message)
return truncated_message.chars(30)
def get_message_as_markdown(self):
return mark_safe(markdown(self.message, safe_mode='escape'))
|
[
"[email protected]"
] | |
a56284f885fa48ed63884b6ce71bc6af019845e8
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/Others/past/past201912-open/g/main.py
|
54e6af8dac7ce0af88307994ac3cc8aa07ba57a3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 |
CC0-1.0
| 2023-09-14T21:59:38 | 2018-02-11T00:32:45 |
Python
|
UTF-8
|
Python
| false | false | 1,159 |
py
|
# -*- coding: utf-8 -*-
def get_pairs(group):
from itertools import combinations
pairs = list()
for p1, p2 in combinations(group, 2):
if p1 > p2:
continue
pairs.append((p1, p2))
return pairs
def main():
import sys
from itertools import product
input = sys.stdin.readline
n = int(input())
a = list()
for i in range(n):
ai = [0] * (i + 1) + list(map(int, input().split()))
a.append(ai)
groups = product(range(3), repeat=n)
ans = -(10**18)
for group in groups:
group_one = list()
group_two = list()
group_three = list()
for i, g in enumerate(group):
if g == 0:
group_one.append(i)
elif g == 1:
group_two.append(i)
else:
group_three.append(i)
pairs = get_pairs(group_one)
pairs += get_pairs(group_two)
pairs += get_pairs(group_three)
candidate = 0
for p1, p2 in pairs:
candidate += a[p1][p2]
ans = max(ans, candidate)
print(ans)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c3e09bab0bfed296d9c0504d22539054f33298af
|
3b60e6f4bbc011003ac4929f01eb7409918deb79
|
/Analysis_v1/Simulation/Pythia/RSG/CP2RSGPythia8/RSGfragments/RSGravitonToGammaGamma_kMpl01_M_4750_TuneCP2_13TeV_pythia8_cfi.py
|
fb6cecf4012af20bf07d74d1f2cf406820e124f7
|
[] |
no_license
|
uzzielperez/Analyses
|
d1a64a4e8730325c94e2bc8461544837be8a179d
|
1d66fa94763d7847011ea551ee872936c4c401be
|
refs/heads/master
| 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 |
C++
|
UTF-8
|
Python
| false | false | 1,157 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.541643794389',
'5100039:m0 = 4750.0',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
5f3de75aad1afc4cfe886a3e0fe4d562ec53a65a
|
1a03664e4dd5f5fb12434d32129e612a76bf6d61
|
/core/loaders.py
|
ec96d2b6531d033017d8a3b7ea1fae80ede688e7
|
[] |
no_license
|
SergioAnd95/refactored-adventure
|
6601dabaa0b7125a94f0010157e17862c84c1d32
|
553d1425b6d59f69b9c526eecff07df0f18835b4
|
refs/heads/master
| 2020-03-30T09:05:05.893448 | 2018-10-17T15:10:40 | 2018-10-17T15:10:40 | 151,059,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
import importlib
from settings import settings
def autodiscover_app_module(module_name):
for app in settings.INSTALLED_APPS:
importlib.import_module(f'{app}.{module_name}')
def discover_urls():
"""
Find and return all routes
from apps
:return: list
"""
urlpatterns = []
for app in settings.INSTALLED_APPS:
try:
_temp = __import__(f'{app}.urls', globals(), locals(), ['urlpatterns'], 0)
urlpatterns += _temp.urlpatterns
except ModuleNotFoundError:
pass
return urlpatterns
|
[
"[email protected]"
] | |
f975e0a4d12496012b500813cfc94786bb7d9803
|
644b13f90d43e9eb2fae0d2dc580c7484b4c931b
|
/2019 baekjoon/Math/1676_factorial2.py
|
2481524d4c177197c939031d86cfa79bd6a652e5
|
[] |
no_license
|
yeonnseok/ps-algorithm
|
c79a41f132c8016655719f74e9e224c0870a8f75
|
fc9d52b42385916344bdd923a7eb3839a3233f18
|
refs/heads/master
| 2020-07-09T11:53:55.786001 | 2020-01-26T02:27:09 | 2020-01-26T02:27:09 | 203,962,358 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 564 |
py
|
# def factorial(num):
# if num == 1:
# return 1
# return num * factorial(num - 1)
#
#
# def factorial_count(num):
# target = list(str(factorial(num)))
# count = 0
# for i in reversed(range(len(target))):
# if target[i] == '0':
# count += 1
# else:
# return count
#
#
# def main():
# num = int(input())
# print(factorial_count(num))
#
#
# main()
def main():
num = int(input())
i = 5
ans = 0
while i <= num:
ans += int(num/i)
i *= 5
print(ans)
main()
|
[
"[email protected]"
] | |
61a56e8f561435c4a10d86df63ea689a20d4c8df
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/220/users/4266/codes/1693_1879.py
|
55a2e7d2215d87de2d3bec79e75dfecd21f22dde
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 319 |
py
|
# Ao testar sua solução, não se limite ao caso de exemplo.
extras = float(input("Digite o numero de horas extras: "))
faltou = float(input("Digite o numero de horas que faltou: "))
h = extras - ((1/4)*faltou)
if (h>400):
g = 500.0
else:
g = 100.0
print(extras, " extras e ", faltou, " de falta")
print("R$ ",g)
|
[
"[email protected]"
] | |
056be20ed1e3365c7fdde9a90eaa63c5dcb36b19
|
cce1e235c2c8e58d83af6dbadeb471ca62b710a1
|
/hackerrank/data_structures/linked_lists/print_in_reverse.py
|
2ce2ada1121b872bcca83b376be97f1e9c07e040
|
[] |
no_license
|
SebastianThomas1/coding_challenges
|
6b51ce046b458c44db809687b6809d16d066566f
|
bd3bc6be7a975b6255e4b2198c953d56bd74e75a
|
refs/heads/master
| 2023-03-03T00:18:00.147369 | 2021-02-08T21:52:02 | 2021-02-08T21:52:02 | 336,688,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 750 |
py
|
# Sebastian Thomas (coding at sebastianthomas dot de)
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list-in-reverse
#
# Print in Reverse
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
def reverse(head):
current_node = head
predecessor = None
while current_node:
successor = current_node.next
current_node.next = predecessor
predecessor = current_node
current_node = successor
return predecessor
def reverse_print(head):
head = reverse(head)
current_node = head
while current_node is not None:
print(current_node.data)
current_node = current_node.next
reverse(head)
|
[
"[email protected]"
] | |
828390637851af3ac878569b4c3b034030f07415
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/setting.py
|
0c3511543b950ff7b9b7c9005a2fe6122a67016d
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 9,314 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Setting:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'workers': 'int',
'batch_size': 'int',
'batch_delay_ms': 'int',
'queue_type': 'str',
'queue_check_point_writes': 'int',
'queue_max_bytes_mb': 'int'
}
attribute_map = {
'workers': 'workers',
'batch_size': 'batchSize',
'batch_delay_ms': 'batchDelayMs',
'queue_type': 'queueType',
'queue_check_point_writes': 'queueCheckPointWrites',
'queue_max_bytes_mb': 'queueMaxBytesMb'
}
def __init__(self, workers=None, batch_size=None, batch_delay_ms=None, queue_type=None, queue_check_point_writes=None, queue_max_bytes_mb=None):
"""Setting
The model defined in huaweicloud sdk
:param workers: 并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:type workers: int
:param batch_size: 单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:type batch_size: int
:param batch_delay_ms: 每个event被pipeline调度等待的最小时间。 单位毫秒。
:type batch_delay_ms: int
:param queue_type: 用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:type queue_type: str
:param queue_check_point_writes: 如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:type queue_check_point_writes: int
:param queue_max_bytes_mb: 如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:type queue_max_bytes_mb: int
"""
self._workers = None
self._batch_size = None
self._batch_delay_ms = None
self._queue_type = None
self._queue_check_point_writes = None
self._queue_max_bytes_mb = None
self.discriminator = None
if workers is not None:
self.workers = workers
if batch_size is not None:
self.batch_size = batch_size
if batch_delay_ms is not None:
self.batch_delay_ms = batch_delay_ms
self.queue_type = queue_type
if queue_check_point_writes is not None:
self.queue_check_point_writes = queue_check_point_writes
if queue_max_bytes_mb is not None:
self.queue_max_bytes_mb = queue_max_bytes_mb
@property
def workers(self):
"""Gets the workers of this Setting.
并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:return: The workers of this Setting.
:rtype: int
"""
return self._workers
@workers.setter
def workers(self, workers):
"""Sets the workers of this Setting.
并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:param workers: The workers of this Setting.
:type workers: int
"""
self._workers = workers
@property
def batch_size(self):
"""Gets the batch_size of this Setting.
单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:return: The batch_size of this Setting.
:rtype: int
"""
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size):
"""Sets the batch_size of this Setting.
单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:param batch_size: The batch_size of this Setting.
:type batch_size: int
"""
self._batch_size = batch_size
@property
def batch_delay_ms(self):
"""Gets the batch_delay_ms of this Setting.
每个event被pipeline调度等待的最小时间。 单位毫秒。
:return: The batch_delay_ms of this Setting.
:rtype: int
"""
return self._batch_delay_ms
@batch_delay_ms.setter
def batch_delay_ms(self, batch_delay_ms):
"""Sets the batch_delay_ms of this Setting.
每个event被pipeline调度等待的最小时间。 单位毫秒。
:param batch_delay_ms: The batch_delay_ms of this Setting.
:type batch_delay_ms: int
"""
self._batch_delay_ms = batch_delay_ms
@property
def queue_type(self):
"""Gets the queue_type of this Setting.
用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:return: The queue_type of this Setting.
:rtype: str
"""
return self._queue_type
@queue_type.setter
def queue_type(self, queue_type):
"""Sets the queue_type of this Setting.
用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:param queue_type: The queue_type of this Setting.
:type queue_type: str
"""
self._queue_type = queue_type
@property
def queue_check_point_writes(self):
"""Gets the queue_check_point_writes of this Setting.
如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:return: The queue_check_point_writes of this Setting.
:rtype: int
"""
return self._queue_check_point_writes
@queue_check_point_writes.setter
def queue_check_point_writes(self, queue_check_point_writes):
"""Sets the queue_check_point_writes of this Setting.
如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:param queue_check_point_writes: The queue_check_point_writes of this Setting.
:type queue_check_point_writes: int
"""
self._queue_check_point_writes = queue_check_point_writes
@property
def queue_max_bytes_mb(self):
"""Gets the queue_max_bytes_mb of this Setting.
如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:return: The queue_max_bytes_mb of this Setting.
:rtype: int
"""
return self._queue_max_bytes_mb
@queue_max_bytes_mb.setter
def queue_max_bytes_mb(self, queue_max_bytes_mb):
"""Sets the queue_max_bytes_mb of this Setting.
如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:param queue_max_bytes_mb: The queue_max_bytes_mb of this Setting.
:type queue_max_bytes_mb: int
"""
self._queue_max_bytes_mb = queue_max_bytes_mb
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Setting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
8585c66c448b22e2add5a38e02bc37cc636d7286
|
395b2e9718eeb5035995130b2377c47b8df05614
|
/tests/attack/test_pgd.py
|
87973fe5b11c7e424ec3918db6a543beef229bab
|
[] |
no_license
|
gatheluck/fourier-attack
|
0a6d773e268bf1e480f04a43dcc72905af804b43
|
1668f0d2eed6182cb69904c49fe223e78cb5d0cc
|
refs/heads/master
| 2023-03-10T05:15:10.897205 | 2021-03-01T08:19:10 | 2021-03-01T08:19:10 | 320,191,916 | 1 | 0 | null | 2021-03-01T08:19:11 | 2020-12-10T07:21:19 |
Python
|
UTF-8
|
Python
| false | false | 2,035 |
py
|
import pathlib
from typing import Final
import torch
import torchvision
import fourier_attack.attack.pgd
from fourier_attack.util import Denormalizer
class TestPgdAttack:
def test__forward(
self, pretrained_cifar10_resnet50, cifar10_stats, normalize_cifar10_loader
):
input_size: Final = 32
num_iteration: Final = 8
eps_max: Final = 16.0
step_size: Final = eps_max / num_iteration
rand_init: Final = True
scale_eps: Final = True
scale_each: Final = True
avoid_target: Final = True
norms = {"linf", "l2"}
devices = set(["cuda"]) if torch.cuda.is_available() else set()
output_root: Final = pathlib.Path("logs/test/")
output_root.mkdir(exist_ok=True, parents=True)
model = pretrained_cifar10_resnet50
criterion_func = torch.nn.functional.cross_entropy
mean, std = cifar10_stats
for norm in norms:
for device in devices:
attacker = fourier_attack.attack.pgd.PgdAttack(
input_size,
mean,
std,
num_iteration,
eps_max,
step_size,
norm,
rand_init,
scale_eps,
scale_each,
avoid_target,
criterion_func,
device,
)
for x, t in normalize_cifar10_loader:
x, t = x.to(device), t.to(device)
batch_size = x.size(0)
x_adv = attacker(model, x, t)
denormalizer = Denormalizer(input_size, mean, std, device, False)
torchvision.utils.save_image(
denormalizer(x_adv), output_root / f"forward-pgd-{norm}.png"
)
assert x_adv.size() == torch.Size([batch_size, 3, 32, 32])
break # test only first batch
|
[
"[email protected]"
] | |
ee47a4b6d23e2e42d141640137a6287efceccf21
|
951f4f4611e5bf2dc3970cc38aa545a54b78690b
|
/google/cloud/billing/budgets_v1/services/budget_service/pagers.py
|
7dd43d59a473e66b6fa0df2247a29735ee27397a
|
[
"Apache-2.0"
] |
permissive
|
renovate-bot/python-billingbudgets
|
e54771361e1e9239697e23255b00f6551a1d18b7
|
2b1e66fa19415f56e33713d57fcc516efca6d03a
|
refs/heads/master
| 2023-06-08T01:23:04.490451 | 2021-08-18T15:42:00 | 2021-08-18T15:42:00 | 227,000,311 | 0 | 0 |
Apache-2.0
| 2019-12-10T01:13:52 | 2019-12-10T01:13:51 | null |
UTF-8
|
Python
| false | false | 5,860 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.billing.budgets_v1.types import budget_model
from google.cloud.billing.budgets_v1.types import budget_service
class ListBudgetsPager:
"""A pager for iterating through ``list_budgets`` requests.
This class thinly wraps an initial
:class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``budgets`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBudgets`` requests and continue to iterate
through the ``budgets`` field on the
corresponding responses.
All the usual :class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., budget_service.ListBudgetsResponse],
request: budget_service.ListBudgetsRequest,
response: budget_service.ListBudgetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.billing.budgets_v1.types.ListBudgetsRequest):
The initial request object.
response (google.cloud.billing.budgets_v1.types.ListBudgetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = budget_service.ListBudgetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[budget_service.ListBudgetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[budget_model.Budget]:
for page in self.pages:
yield from page.budgets
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBudgetsAsyncPager:
"""A pager for iterating through ``list_budgets`` requests.
This class thinly wraps an initial
:class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``budgets`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBudgets`` requests and continue to iterate
through the ``budgets`` field on the
corresponding responses.
All the usual :class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[budget_service.ListBudgetsResponse]],
request: budget_service.ListBudgetsRequest,
response: budget_service.ListBudgetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.billing.budgets_v1.types.ListBudgetsRequest):
The initial request object.
response (google.cloud.billing.budgets_v1.types.ListBudgetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = budget_service.ListBudgetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[budget_service.ListBudgetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[budget_model.Budget]:
async def async_generator():
async for page in self.pages:
for response in page.budgets:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
[
"[email protected]"
] | |
c764dffcf73e377fbeab0b1e3fe032ab8004b975
|
5b19ced6bd173baf11c4b5e9d1c08f17ca635773
|
/Python/数字在排序数组中出现的次数.py
|
3f6a48c6ebcbfd2edb992331e21d261b5f5d29a5
|
[] |
no_license
|
zhaoqun05/Coding-Interviews
|
8efe579b6a1a6186107f599a31a9e96389df52f3
|
e05c1e6390b3df49dd02571e13fb8a3822eae649
|
refs/heads/master
| 2022-01-08T13:30:06.542796 | 2019-06-18T14:00:55 | 2019-06-18T14:00:55 | 282,934,693 | 2 | 0 | null | 2020-07-27T15:13:54 | 2020-07-27T15:13:53 | null |
UTF-8
|
Python
| false | false | 1,253 |
py
|
'''
统计一个数字在排序数组中出现的次数。
'''
# -*- coding:utf-8 -*-
class Solution:
def GetNumberOfK(self, data, k):
# 直观的想法从前到后的顺序遍历,但是算法题几乎不会将顺序查找作为考察要点……
def getFirst(nums):
start, end = 0, len(nums) - 1
while start <= end:
mid = (start + end) // 2
if data[mid] >= k: # 注意前后两个二分查找条件不一致
end = mid - 1
else:
start = mid + 1
# 导致两个函数越界的指针不一致,应该返回的指针是非越界指针
return start if start < len(nums) and nums[start] == k else -1
def getLast(nums):
start, end = 0, len(nums) - 1
while start <= end:
mid = (start + end) // 2
if data[mid] <= k:
start = mid + 1
else:
end = mid - 1
return end if end < len(nums) and nums[end] == k else -1
if not data: return 0
first, last = getFirst(data), getLast(data)
return last - first + 1 if first != -1 and last != -1 else 0
|
[
"[email protected]"
] | |
104aa7236b2381b4af5aa9d9053a24f682ac2631
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/39/usersdata/86/15445/submittedfiles/dec2bin.py
|
0fccfb359d06440edc18ae24c0887321b3568f73
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
d = int(input('d:'))
soma=0
cont=0
while d>0:
soma=soma+d%2*10**cont
d=d//2
cont=cont+1
print(soma)
|
[
"[email protected]"
] | |
847428dbe3d202faf10a5e562519c1f606de2698
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/third_party/catapult/telemetry/bin/run_snap_it_unittest
|
288474f3a3d87e0d7a4684e5fbb8c7beafe91870
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 |
BSD-3-Clause
| 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null |
UTF-8
|
Python
| false | false | 1,727 |
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
TELEMETRY_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.append(TELEMETRY_DIR)
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_options
from telemetry.internal.util import binary_manager
from telemetry.util import wpr_modes
_SNAP_IT_TEST_URL = 'file:///%s' % (os.path.join(
util.GetCatapultThirdPartyDir(), 'snap-it', 'tests', 'tests.html'))
def RunSnapItUnittest(finder_options):
possible_browser = browser_finder.FindBrowser(finder_options)
with possible_browser.BrowserSession(
finder_options.browser_options) as browser:
tab = browser.tabs[0]
tab.Navigate(_SNAP_IT_TEST_URL)
tab.WaitForJavaScriptCondition('window.allTestDone')
num_tests = tab.EvaluateJavaScript('window.total')
failed_tests = tab.EvaluateJavaScript('window.failedTests')
for test in failed_tests:
print "Test '%s' failed" % test
if failed_tests:
print 'Failed %s tests (out of %s tests)' % (len(failed_tests), num_tests)
return 1
else:
print 'All %s tests passed' % num_tests
return 0
def main(args):
binary_manager.InitDependencyManager([])
options = browser_options.BrowserFinderOptions()
options.browser_options.extra_browser_args.add('--headless')
parser = options.CreateParser(usage="Run snap-it's unittests")
parser.parse_args(args)
return RunSnapItUnittest(options)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | ||
4e2d44d096408a816838502d7c6b3b8ddca6a483
|
737a67744a98a536eccf5e2012628271f9120f79
|
/django/integration/apps/coursesApp/urls.py
|
6d242cf48c1525964f3352f9758845e37abff9d8
|
[] |
no_license
|
iota-cohort-dc/Daniel-Perez
|
31d581cf6494d69404925685ca55ec9a9b97611c
|
57b6a69e4f6e02f8b0694787ab195e08ad5dc52b
|
refs/heads/master
| 2021-01-20T16:59:30.730460 | 2017-07-03T04:12:16 | 2017-07-03T04:12:16 | 82,850,732 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name= "my_index"),
url(r'^addcourse$', views.addcourse),
url(r'^remove/(?P<id>\d+)$', views.remove, name= "remove"),
url(r'^remove/nah$', views.nah),
url(r'^delete/(?P<id>\d+)$', views.delete, name= "delete"),
url(r'^choose$', views.choose, name ="choose"),
url(r'^regUser$', views.regUser, name = 'regUser')
]
|
[
"[email protected]"
] | |
e6f473220e6ba826010f02a19ed5052645008b2f
|
179c9b2983ba3d4a3757f84fd55ac1356850c363
|
/jinete/algorithms/heuristics/local_search/strategies/routes/one_shift.py
|
67ed11e8558cde8355886ac88d94ef2aa23b27ae
|
[
"MIT"
] |
permissive
|
garciparedes/jinete
|
0d2fbf68a88b0ec565b8c1ed5c417f8f7cacceb0
|
6ed5687b2016aa7eb1f6499470c6ea21a9a57b8a
|
refs/heads/master
| 2023-03-29T00:35:34.575828 | 2020-09-19T11:02:05 | 2020-09-19T11:02:05 | 150,865,909 | 9 | 2 |
MIT
| 2023-03-16T21:41:17 | 2018-09-29T13:17:05 |
Python
|
UTF-8
|
Python
| false | false | 1,834 |
py
|
import logging
from ......models import (
Route,
Stop,
)
from ..abc import (
LocalSearchStrategy,
)
logger = logging.getLogger(__name__)
class OneShiftLocalSearchStrategy(LocalSearchStrategy):
def _improve(self) -> None:
logger.info(f'Starting to improve "Result" with "{self.__class__.__name__}"...')
for route in self._routes:
cost = self._objective.optimization_function(route)
for i in range(1, len(route.stops) - 1):
j = i + 1
k = i + 2
first = route.stops[i]
second = route.stops[j]
third = route.stops[k] if k < len(route.stops) else None
if not set(first.pickup_planned_trips).isdisjoint(second.delivery_planned_trips):
continue
self._flip(route, first, second, third)
if not route.feasible or cost == self._objective.best(cost, route):
self._flip(route, second, first, third)
continue
cost = self._objective.optimization_function(route)
logger.info(f'Flipped "{i}"-th and "{j}"-th stops from "{route}".')
def _flip(self, route: Route, previous: Stop, other: Stop, following: Stop = None) -> None:
assert following is None or following.previous == other
assert other.previous == previous
self_index = route.stops.index(other)
other_index = route.stops.index(previous)
route.stops[self_index], route.stops[other_index] = route.stops[other_index], route.stops[self_index]
if following is not None:
following.previous = previous
other.previous = previous.previous
previous.previous = other
for stop in route.stops[self_index:]:
stop.flush()
|
[
"[email protected]"
] | |
8cef7439cfc2680d9b2889fa9559a4c1184b7d58
|
f508da1fd2e65491e1e6b5dd3a64d8bf6039bc6c
|
/eho/openstack/common/jsonutils.py
|
f800779f11987bc90c847efa74d36a2629bee971
|
[
"Apache-2.0"
] |
permissive
|
senhuang/eho
|
b9595739b109829b44e6d538b36348ac84529af8
|
ca4dba1d0e7ab24d748b746e115ca6dc2191997a
|
refs/heads/master
| 2021-01-21T00:36:50.271363 | 2013-03-19T07:55:50 | 2013-03-19T07:55:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,110 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import xmlrpclib
from eho.openstack.common import timeutils
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
# value of itertools.count doesn't get caught by inspects
# above and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return unicode(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)):
return [recursive(v) for v in value]
elif isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
else:
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
[
"[email protected]"
] | |
79ce11ae807730b501809588bdbc2b9dec1e9067
|
effce116340b7d937bd285e43b49e1ef83d56156
|
/data_files/662 Maximum Width of Binary Tree.py
|
ee04fcc76e296c46188c5e41ed7b3c9c54194e8b
|
[] |
no_license
|
DL2021Spring/CourseProject
|
a7c7ef57d69bc1b21e3303e737abb27bee3bd585
|
108cdd906e705e9d4d05640af32d34bfc8b124da
|
refs/heads/master
| 2023-04-11T18:52:30.562103 | 2021-05-18T09:59:59 | 2021-05-18T09:59:59 | 365,733,976 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 668 |
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
ret = 0
q = [(0, root)]
while q:
cur_q = []
left, right = q[0][0], q[-1][0]
ret = max(ret, right - left + 1)
for idx, node in q:
if node.left:
cur_q.append((idx * 2, node.left))
if node.right:
cur_q.append((idx * 2 + 1, node.right))
q = cur_q
return ret
|
[
"[email protected]"
] | |
881083827d5bbb4da11a5b9d7edec6b217bc34d4
|
033da72a51c76e5510a06be93229a547a538cf28
|
/Data Engineer with Python Track/21. Cleaning Data in SQL Server Databases/Chapter/04. Combining, splitting, and transforming data/01-Combining cities and states using +.py
|
c42e7743cf2ea24d935eb8842b2408c0e660b1ad
|
[] |
no_license
|
ikhwan1366/Datacamp
|
d5dcd40c1bfeb04248977014260936b1fb1d3065
|
7738614eaebec446842d89177ae2bc30ab0f2551
|
refs/heads/master
| 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,403 |
py
|
'''
Combining cities and states using +
In this lesson, you learned how to combine columns into one.
The clients table has one column, city, to store the cities where the clients live, and another column, state, to store the state of the city.
| client_id | client_name | client_surname | city | state |
|-----------|-------------|----------------|-----------|----------|
| 1 | Miriam | Antona | Las Vegas | Nevada |
| 2 | Astrid | Harper | Chicago | Illinois |
| 3 | David | Madden | Phoenix | Arizona |
| ... | ... | ... | ... | ... |
You need to combine city and state columns into one, to have the following format: 'Las Vegas, Nevada'.
You will use + operator to do it.
Instructions 1/2
50 XP
- Concatenate the names of the cities with the states using the + operator without worrying about NULL values.
'''
SELECT
client_name,
client_surname,
-- Concatenate city with state
city + ', ' + state AS city_state
FROM clients
'''
Instructions 2/2
50 XP
- Replace each instance of NULL in city and state with an ISNULL() function, so that if either column has a NULL value, an empty string '' is returned instead.
'''
SELECT
client_name,
client_surname,
-- Consider the NULL values
ISNULL(city, '') + ISNULL(', ' + state, '') AS city_state
FROM clients
|
[
"[email protected]"
] | |
65f649c9c12a0a4648e29cef731ddb6c40fbe6ef
|
100193a599cd9961356b2c2ee13c734e467b9713
|
/cc/apps/coder/migrations/0003_auto__add_participant.py
|
e0a754b87b3635d299c77eea42dd6f992853ed21
|
[
"MIT"
] |
permissive
|
mavroskardia/codechallenge
|
bd3678003d933b834eddc1d36dda74e53b5afa52
|
a5fee4ba73be186d90daafca50819a6817ad3d27
|
refs/heads/master
| 2016-09-09T17:16:57.818465 | 2015-01-13T14:45:00 | 2015-01-13T14:45:00 | 15,058,542 | 0 | 0 | null | 2014-03-21T21:25:37 | 2013-12-09T20:42:54 |
Python
|
UTF-8
|
Python
| false | false | 6,721 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Participant'
db.create_table('coder_participant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('coder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['coder.Coder'])),
('challenge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge.Challenge'])),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 3, 10, 0, 0))),
))
db.send_create_signal('coder', ['Participant'])
# Removing M2M table for field challenges on 'Coder'
db.delete_table(db.shorten_name('coder_coder_challenges'))
def backwards(self, orm):
# Deleting model 'Participant'
db.delete_table('coder_participant')
# Adding M2M table for field challenges on 'Coder'
m2m_table_name = db.shorten_name('coder_coder_challenges')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('coder', models.ForeignKey(orm['coder.coder'], null=False)),
('challenge', models.ForeignKey(orm['challenge.challenge'], null=False))
))
db.create_unique(m2m_table_name, ['coder_id', 'challenge_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Group']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenge.challenge': {
'Meta': {'object_name': 'Challenge'},
'duration': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['coder.Coder']"})
},
'coder.coder': {
'Meta': {'object_name': 'Coder'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '256'}),
'tagline': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '1024'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'unique': 'True', 'to': "orm['auth.User']"}),
'xp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'coder.level': {
'Meta': {'object_name': 'Level'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'starting_xp': ('django.db.models.fields.BigIntegerField', [], {})
},
'coder.participant': {
'Meta': {'object_name': 'Participant'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge.Challenge']"}),
'coder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['coder.Coder']"}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 10, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'object_name': 'ContentType', 'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['coder']
|
[
"[email protected]"
] | |
f6de5f780c60294f59c4fd49a6ee574b9a0d8d34
|
1316cd6763e784811c769c1de577235c921af0de
|
/Apps/AlignOnBPMs/SAMPL/sourceCode/SAMPLcore/Components/ComponentBase.py
|
f2ed9848ab5bc4adadc7b8a0aa16629bf0f7f015
|
[] |
no_license
|
VELA-CLARA-software/Software
|
a6fb6b848584e5893fd6939a447d23134ce636cc
|
2e2a88ac0b2b03a495c868d2e11e6481e05097c3
|
refs/heads/master
| 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 |
Mathematica
|
UTF-8
|
Python
| false | false | 565 |
py
|
# SAM to Python Conversion
# DJS August 2017
# Version 0.1
#
from ..SAMPLlab import Beam
class ComponentBase(object):
def __init__(self, length=0, name="", aperture=[]):
#super(ComponentBase, self).__init__(**kwargs)
# device length, in meters
self.length = length
# device name, string
self.name = name
# 1x2 array of elliptical aperture half-axes, in metres
self.aperture = aperture
# Each componet stores last beam that was tracked last (TP added)
self.lastTrackedBeam = Beam.Beam()
|
[
"[email protected]"
] | |
9b2e55341c9f7148c5dfe553e2bec953871d0db2
|
377fc6e13101a2a45826cd118110c790f396a805
|
/utpc2014-a.py
|
9ac43857b565674f15f63bbc23a3181047f353ff
|
[] |
no_license
|
number09/atcoder
|
4076e7223f424b9923754e73992d6442e0bb0de7
|
f521ca1205b254d99744abaf6a7a5bfe69845fe0
|
refs/heads/master
| 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
li_w = input().split()
li_answer = list()
flag = False
for w in reversed(li_w):
if w != 'not':
flag = True
li_answer.append(w)
else:
if flag == False:
li_answer.append(w)
else:
if li_answer[-1] == 'not':
li_answer = li_answer[:-1]
else:
li_answer.append(w)
print(' '.join(reversed(li_answer)))
|
[
"[email protected]"
] | |
f98658569da2852dc39597141a14f473e098e230
|
d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3
|
/python/algorithm/leetcode/91.py
|
514aea9145662dfce4819b437de33ec85483955a
|
[] |
no_license
|
yanxurui/keepcoding
|
3e988c76b123d55b32cf7cc35fbffb12c4ccb095
|
d6b9f07e2d1437681fa77fee0687ea9b83cab135
|
refs/heads/master
| 2021-01-24T09:01:41.306597 | 2020-05-21T05:36:04 | 2020-05-21T05:36:04 | 93,400,267 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,072 |
py
|
# https://leetcode.com/problems/decode-ways/discuss/30358/Java-clean-DP-solution-with-explanation
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
table = [0] * (len(s)+1)
table[0] = 1
table[1] = 0 if s[0] == '0' else 1
for i in range(2, len(s)+1):
if int(s[i-1:i]) >= 1 and int(s[i-1:i]) <= 9:
table[i] += table[i-1]
if int(s[i-2:i]) >= 10 and int(s[i-2:i]) <= 26:
table[i] += table[i-2]
return table[len(s)]
if __name__ == '__main__':
from testfunc import test
test_data = [
(
"12",
2
),
(
"226",
3
),
(
'0',
0
),
(
'10',
1
),
(
'00',
0
),
(
'01',
0
)
]
test(Solution().numDecodings, test_data)
|
[
"[email protected]"
] | |
0812527774fef2d427c2e1e56a7966441f10632c
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_005_20180620140617.py
|
2ded808b8af91e0513d070dc92160645c14e005e
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,433 |
py
|
from random import randint
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("dupa", sudoku_number)
sudoku = listaSudoku[sudoku_number]
#print("ktore = ", sudoku)
elif int(choice) == 1:
s = 1
sudoku = sudoku
elif int(choice) == 2:
sudoku = sudoku2
elif int(choice) == 3:
sudoku = sudoku3
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
# sudoku =
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
list = []
while i < 9:
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
#p rint(list)
# print("Suma columny ", i, " = ", column)
i += 1
is45 = 0
for listElement in list:
if listElement == 45:
is45 = is45 + 1
# print("Ile kolumen OK", is45)
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
|
[
"[email protected]"
] | |
8e4afcc449f15b7d6b73cbcd4fc8e9b213912c94
|
bc441bb06b8948288f110af63feda4e798f30225
|
/container_sdk/api/workload/get_summary_pb2.pyi
|
14f7cdbe1ae990f7ac049e645c1ccfa3bde880a7
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,748 |
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from container_sdk.model.container.pod_detail_pb2 import (
PodDetail as container_sdk___model___container___pod_detail_pb2___PodDetail,
)
from container_sdk.model.container.workload_pb2 import (
Workload as container_sdk___model___container___workload_pb2___Workload,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetSummaryRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
instanceId = ... # type: typing___Text
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instanceId",b"instanceId"]) -> None: ...
class GetSummaryResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def workload(self) -> container_sdk___model___container___workload_pb2___Workload: ...
@property
def pods(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[container_sdk___model___container___pod_detail_pb2___PodDetail]: ...
def __init__(self,
*,
workload : typing___Optional[container_sdk___model___container___workload_pb2___Workload] = None,
pods : typing___Optional[typing___Iterable[container_sdk___model___container___pod_detail_pb2___PodDetail]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"workload",b"workload"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"pods",b"pods",u"workload",b"workload"]) -> None: ...
class GetSummaryResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetSummaryResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetSummaryResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
|
[
"[email protected]"
] | |
fce03c8e1456ccbcd305145c27e222f513c4d844
|
c2f92d75d235ff5ed7b213c02c4a0657545ba02f
|
/oliveapp/home/urls.py
|
ab2bdd20079d2d83da9374cda7ebebccbcb65b68
|
[] |
no_license
|
cash2one/tstpthon
|
fab6112691eb15a8a26bd168af3f179913e0c4e0
|
fc5c42c024065c7b42bea2b9de1e3874a794a30d
|
refs/heads/master
| 2021-01-20T01:52:06.519021 | 2017-04-14T09:50:55 | 2017-04-14T09:50:55 | 89,338,193 | 0 | 1 | null | 2017-04-25T08:46:06 | 2017-04-25T08:46:06 | null |
UTF-8
|
Python
| false | false | 715 |
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from home import views
import os
css_media = os.path.join(
os.path.dirname(__file__),'templates/css/'
)
images_media = os.path.join(
os.path.dirname(__file__),'templates/images/'
)
js_media = os.path.join(
os.path.dirname(__file__),'templates/js/'
)
urlpatterns = patterns('',
url(r'^$', views.index, name='home'),
url(r'^images/(?P<path>.*)$','django.views.static.serve',{'document_root': images_media }),
url(r'^css/(?P<path>.*)$','django.views.static.serve',{'document_root': css_media }),
url(r'^js/(?P<path>.*)$','django.views.static.serve',{'document_root': js_media }),
)
|
[
"[email protected]"
] | |
f9a7d373fd0a22027404c7c536075e139ac3a6b3
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/site-packages/django/contrib/gis/db/models/__init__.py
|
9c0171e697081762f1b1e195bdfdad25bd682f41
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 |
C#
|
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:051de440079f22fe0ed4d92c8950944a1c2548ee0e08da1419a9fa7424462325
size 817
|
[
"[email protected]"
] | |
9bed6ed6c401fac8dd4b07157b505d6d45bf5404
|
0a65d42f4f0e491cb2aada408401b94909f821c2
|
/Attendance_Monitoring/hrg/hrg_hr/migrations/0007_auto_20200625_1027.py
|
f121581d75c52217f396b1f22613076fa26f7155
|
[] |
no_license
|
jmadlansacay/_Office
|
3acde7655784e91c7dcecfc853d4f36cdfeef028
|
7f46449b9f7e8e892e2e0025ba493259197fa592
|
refs/heads/main
| 2023-07-28T10:23:54.680822 | 2021-09-11T02:28:07 | 2021-09-11T02:28:07 | 379,155,026 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 519 |
py
|
# Generated by Django 2.2.5 on 2020-06-25 02:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hrg_hr', '0006_auto_20200625_1006'),
]
operations = [
migrations.AlterField(
model_name='tblmaster',
name='employeestatus',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hrg_hr_ref.employeestatuscode'),
),
]
|
[
"[email protected]"
] | |
a5016bfefccde0f94ae1caf65fdcdc907a1c441f
|
fa7f66e12223a11a17d42c9a672d03c845b604bd
|
/pyvisa/resources/helpers.py
|
4fa00202bba32d4cd5c69c74c7aa0cf9c8952d9e
|
[
"MIT"
] |
permissive
|
caryan/pyvisa
|
5756e65c42810553f6f4b9f14800b5007b9dee0a
|
1529fce2ac42ac8b47cf6f2c8ad1de22c9e88488
|
refs/heads/master
| 2020-12-03T03:31:59.688014 | 2014-08-29T22:44:24 | 2014-08-29T22:44:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,584 |
py
|
# -*- coding: utf-8 -*-
"""
pyvisa.resources.helpers
~~~~~~~~~~~~~~~~~~~~~~~~
Helper functions.
This file is part of PyVISA.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from ..compat import string_types
from .. import constants
def _redoc(attribute_name, doc, extra_doc=''):
if isinstance(attribute_name, string_types):
if doc is None:
doc = ''
if not doc.endswith('\n\n'):
doc += '\n\n'
doc += ':VISA Attribute: %s.' % attribute_name
if extra_doc:
doc += '\n' + extra_doc
attribute_name = getattr(constants, attribute_name)
return attribute_name, doc
def attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc)
def getter(self):
return self.get_visa_attribute(attribute_name)
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def enum_attr(attribute_name, enum_type, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':type: :class:%s.%s' % (enum_type.__module__, enum_type.__name__))
def getter(self):
return enum_type(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
if value not in enum_type:
raise ValueError('%r is an invalid value for attribute %s, should be a %r',
value, attribute_name, enum_type)
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def range_attr(attribute_name, min_value, max_value, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':range: %s <= value <= %s\n' % (min_value, max_value))
def getter(self):
return int(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
if not min_value <= value <= max_value:
raise ValueError('%r is an invalid value for attribute %s, should be between %r and %r',
value, attribute_name, min_value, max_value)
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def boolean_attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':type: bool')
def getter(self):
return self.get_visa_attribute(attribute_name) == constants.VI_TRUE
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, constants.VI_TRUE if value else constants.VI_FALSE)
return property(fget=getter, fset=setter, doc=doc)
def char_attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':range: 0 <= x <= 255\n:type: int')
def getter(self):
return chr(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, ord(value))
return property(fget=getter, fset=setter, doc=doc)
|
[
"[email protected]"
] | |
26e8321387d7acb7136a76bcf11db56c990ad589
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/notifications/client/controls/notificationScrollContainer.py
|
430068214b444982858c2a3b313c78d2ca6a9572
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,742 |
py
|
#Embedded file name: notifications/client/controls\notificationScrollContainer.py
from carbonui.control.scrollContainer import ScrollContainer
from carbonui.primitives.base import ReverseScaleDpi, ScaleDpiF
from carbonui.primitives.container import Container
import carbonui.const as uiconst
from carbonui.primitives.frame import Frame
class NotificationScrollContainer(ScrollContainer):
entryLoadEnabled = True
contentHeight = 0
mainContTopHeight = (0, 0)
def ApplyAttributes(self, attributes):
ScrollContainer.ApplyAttributes(self, attributes)
self.mainCont.Close()
self.mainCont = Container(name='mainCont', parent=self.clipCont, state=uiconst.UI_NORMAL, align=uiconst.TOPLEFT)
self.mainContTopHeight = (0, 0)
self.mainCont._OnResize = self._OnMainContResize
def EnableEntryLoad(self):
self.entryLoadEnabled = True
self.LoadVisibleEntries()
def DisableEntryLoad(self):
self.entryLoadEnabled = False
def _OnMainContResize(self, *args):
newTopHeight = (self.mainCont.top, self.mainCont.height)
if newTopHeight != self.mainContTopHeight:
self.mainContTopHeight = newTopHeight
self.LoadVisibleEntries()
def LoadVisibleEntries(self):
if not self.entryLoadEnabled:
return
for each in self.mainCont.children:
self.LoadEntryIfVisible(each)
def LoadEntryIfVisible(self, entry):
topOffset = self.mainCont.top
visibleHeight = ReverseScaleDpi(self.clipCont.displayHeight)
if topOffset + entry.top + entry.height >= 0 and topOffset + entry.top <= visibleHeight:
entry.UpdateAlignmentAsRoot()
entry.LoadContent()
entry.display = True
else:
entry.display = False
def _OnVerticalScrollBar(self, posFraction):
posFraction = max(0.0, min(posFraction, 1.0))
self.mainCont.top = -posFraction * (self.mainCont.height - ReverseScaleDpi(self.clipCont.displayHeight))
def _InsertChild(self, idx, obj):
self.mainCont.children.insert(idx, obj)
contentWidth = ReverseScaleDpi(self.displayWidth)
minContentHeight = ReverseScaleDpi(self.clipCont.displayHeight)
self.mainCont.width = contentWidth
obj.top = self.contentHeight
obj.width = contentWidth
obj.displayY = ScaleDpiF(self.contentHeight)
obj.displayWidth = ScaleDpiF(contentWidth)
self.contentHeight += obj.height
self.mainCont.height = max(minContentHeight, self.contentHeight)
self._UpdateScrollbars()
self.LoadEntryIfVisible(obj)
def Flush(self):
ScrollContainer.Flush(self)
self.contentHeight = 0
|
[
"[email protected]"
] | |
794aec261c5a2a0b22f17b996021749049c4c913
|
396f93d8e73c419ef82a94174815a2cecbb8334b
|
/.history/tester2_20200321145335.py
|
eca006aa8c88a49574cbf23f7e9263b1bfe377c0
|
[] |
no_license
|
mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch
|
8c73d9448b916009c9431526864a4441fdeb682a
|
90b2dca920c85cddd7c1b3335344ac7b10a9b061
|
refs/heads/master
| 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
import os
import subprocess
import re
from datetime import datetime
import time
process = subprocess.Popen(['./algo_tabou.exe', '1000', '1000', '50', 'distances_entre_villes_50.txt'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = st
print(result)
|
[
"[email protected]"
] | |
17049ed6b3705fb01190a0f5025f74cd714f8f3e
|
1b26d0023ad7eb302a9dd7d0d6696ef836c17c05
|
/HyeonJinGithub/2020-10-07/12100 2048 (Easy).py
|
20832b880e18018074fca0d6cc9aa019aaeb8ae8
|
[
"MIT"
] |
permissive
|
Team-NTO/NTO
|
93e643ddd3c6cad308f1f984aaa9abc43d9e3bb8
|
133f19e1e15e423589bd7b94b698d2afc76c3ef6
|
refs/heads/master
| 2023-06-23T06:26:16.374869 | 2021-07-11T06:43:08 | 2021-07-11T06:43:08 | 298,460,899 | 1 | 3 |
MIT
| 2021-07-11T06:43:09 | 2020-09-25T03:47:51 |
Java
|
UTF-8
|
Python
| false | false | 2,831 |
py
|
import sys
from copy import deepcopy
def print_max(arr):
global res
for i in range(len(arr)):
for j in range(len(arr[i])):
res = max(res, arr[i][j])
def dfs(tmp_board, n):
if n == 5:
print_max(tmp_board)
return
dfs(move_left(deepcopy(tmp_board)), n + 1)
dfs(move_right(deepcopy(tmp_board)), n + 1)
dfs(move_up(deepcopy(tmp_board)), n + 1)
dfs(move_down(deepcopy(tmp_board)), n + 1)
def move_left(board):
for i in range(N):
p = 0
x = 0
for j in range(N):
if board[i][j] == 0: continue
if x == 0:
x = board[i][j]
else:
if x == board[i][j]:
board[i][p] = x * 2
x = 0
p += 1
else:
board[i][p] = x
x = board[i][j]
p += 1
board[i][j] = 0
if x != 0: board[i][p] = x
return board
def move_right(board):
for i in range(N):
p = N - 1
x = 0
for j in range(N - 1, -1, -1):
if board[i][j] == 0: continue
if x == 0:
x = board[i][j]
else:
if x == board[i][j]:
board[i][p] = x * 2
p -= 1
x = 0
else:
board[i][p] = x
p -= 1
x = board[i][j]
board[i][j] = 0
if x != 0: board[i][p] = x
return board
def move_up(board):
for i in range(N):
p = 0
x = 0
for j in range(N):
if board[j][i] == 0: continue
if x == 0:
x = board[j][i]
else:
if x == board[j][i]:
board[p][i] = x * 2
p += 1
x = 0
else:
board[p][i] = x
p += 1
x = board[j][i]
board[j][i] = 0
if x != 0: board[p][i] = x
return board
def move_down(board):
for i in range(N):
p = N - 1
x = 0
for j in range(N - 1, -1, -1):
if board[j][i] == 0: continue
if x == 0:
x = board[j][i]
else:
if x == board[j][i]:
board[p][i] = x * 2
p -= 1
x = 0
else:
board[p][i] = x
p -= 1
x = board[j][i]
board[j][i] = 0
if x != 0: board[p][i] = x
return board
if __name__ == '__main__':
N = int(input())
a = [[int(x) for x in sys.stdin.readline().split()] for _ in range(N)]
res = 0
dfs(a, 0)
print(res)
|
[
"[email protected]"
] | |
b2e11180449c5cbd4123a1f6c4e49af8b9b06064
|
ba3c06f9ae89479fa4987fe841ac09b5b5d71383
|
/python_for_kids/book/Examples/mean.py
|
1d733629a2eb40a1124f5270aca68952c682f52e
|
[] |
no_license
|
mary-tano/python-programming
|
6d806e25011e770a04a0922d0b71bf38c222d026
|
829654a3274be939fa529ed94ea568c12f7f1a27
|
refs/heads/master
| 2021-05-17T15:30:32.710838 | 2020-04-01T13:37:18 | 2020-04-01T13:37:18 | 250,846,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
# Среднее значение
print("Введи число: ", end="")
Sum = 0
Value = int(input())
for Number in range(1,Value+1) :
Sum += Number
Mean = Sum / Value
print("Результат: " + str(Mean))
|
[
"[email protected]"
] | |
358893e35bbd56734f3c0df20f6129c87583d727
|
abf4757a51e38b3cde6fc55b0251e77652521a2d
|
/models.py
|
2e4f0d45edd16ef57cc997df440fe3bae2aa42ca
|
[] |
no_license
|
stephenroller/relationstest
|
fd5284e6035682e0bfe1b13ff3c51dfec8e6f5ab
|
92b66e3dd800107b5489f662264f87d5d178af61
|
refs/heads/master
| 2021-01-21T11:45:20.095298 | 2016-01-26T23:24:21 | 2016-01-26T23:24:21 | 37,383,124 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,369 |
py
|
#!/usr/bin/env python
import numpy as np
from sklearn import svm, linear_model
from custom_classifiers import ThresholdClassifier
SETUPS = {
# baseline "unsupervised"
'cosine': ('threshold', 'cosine'),
# baseline memorizations
'lhs': ('linear', 'lhs'),
'rhs': ('linear', 'rhs'),
'concat': ('linear', 'concat'),
# asym models
'diff': ('linear', 'diff'),
'diffsq': ('linear', 'diffsq'),
# rb models
'diffrbf': ('rbf', 'diff'),
'concatrbf': ('rbf', 'concat'),
# others I dont want now
#('lhs', 'lr1', 'lhs'),
#('rhs', 'lr1', 'rhs'),
#('concat', 'lr1', 'concat'),
#('diff', 'lr1', 'diff'),
#('diffsq', 'lr1', 'diffsq'),
#('lhs', 'lr2', 'lhs'),
#('rhs', 'lr2', 'rhs'),
#('concat', 'lr2', 'concat'),
#('diff', 'lr2', 'diff'),
#('diffsq', 'lr2', 'diffsq'),
#('diffpoly', 'poly2', 'diff'),
}
def words2matrix(dataseries, space):
return np.array(list(dataseries.apply(lambda x: space[x])))
def generate_cosine_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
return np.array([np.sum(np.multiply(lhs, rhs), axis=1)]).T
def generate_diff_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
# difference vector
diff = rhs - lhs
return diff
def generate_diffsq_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
# difference vector
diff = rhs - lhs
# element wise squared diffs
diff_sq = np.power(diff, 2)
X = np.concatenate([diff, diff_sq], axis=1)
return X
def generate_concat_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
X = np.concatenate([lhs, rhs], axis=1)
return X
def generate_lhs_matrix(data, space):
lhs = words2matrix(data.word2, space)
return lhs
def generate_rhs_matrix(data, space):
rhs = words2matrix(data.word2, space)
return rhs
def generate_feature_matrix(data, space, features):
if features == 'cosine':
X = generate_cosine_matrix(data, space)
elif features == 'lhs':
X = generate_lhs_matrix(data, space)
elif features == 'rhs':
X = generate_rhs_matrix(data, space)
elif features == 'concat':
X = generate_concat_matrix(data, space)
elif features == 'diff':
X = generate_diff_matrix(data, space)
elif features == 'diffsq':
X = generate_diffsq_matrix(data, space)
else:
raise ValueError("Can't generate %s features" % features)
y = data.entails.as_matrix()
return X, y
def classifier_factory(name):
if name == 'linear':
return svm.LinearSVC()
elif name == 'poly2':
return svm.SVC(kernel='poly', degree=2)
elif name == 'threshold':
return ThresholdClassifier()
elif name == 'rbf':
return svm.SVC(kernel='rbf')
elif name == 'lr2':
return linear_model.LogisticRegression(penalty='l2')
elif name == 'lr1':
return linear_model.LogisticRegression(penalty='l1')
elif name == 'levy':
# todo this
return None
else:
raise ValueError("Don't know about %s models." % name)
def load_setup(setupname):
kl, fe = SETUPS[setupname]
return classifier_factory(kl), fe
|
[
"[email protected]"
] | |
599c16f31a7bdd1841ee6346e94922f0cb6d2c86
|
3ec50b750e788d018ff0f6afd3528350a8956f6a
|
/ch_04/tests/test_classifier.py
|
680a46568379375ea3e6dcaf7eb588014dcb498b
|
[
"MIT"
] |
permissive
|
jegarciaor/Python-Object-Oriented-Programming---4th-edition
|
45fb68f04c905a27865c40a48705da803fbdc27a
|
2f3d6b09326dab6a0488c72c96d7368bee28fef4
|
refs/heads/main
| 2023-06-05T05:02:54.428716 | 2021-06-26T15:23:11 | 2021-06-26T15:23:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,536 |
py
|
"""
Python 3 Object-Oriented Programming Case Study
Chapter 4, Expecting the Unexpected
"""
import base64
import csv
from pathlib import Path
from pytest import *
import classifier
@fixture(scope="module")
def app_client():
test_users = [
classifier.User(
username='noriko',
email='[email protected]',
real_name='Noriko K. L.',
role=classifier.Role.BOTANIST,
password='md5$H5W30kno$10a2327b2fce08c1ad0f65a12d40552f'
),
classifier.User(
username='emma',
email='[email protected]',
real_name='Emma K.',
role=classifier.Role.RESEARCHER,
password='md5$F8ZVxsuE$ebf71d15067ed7c887c0408550b671e2'
)
]
with classifier.app.app_context():
classifier.app.config['TESTING'] = True
classifier.app.config['USER_FILE'] = Path.cwd()/"test_data"
for u in test_users:
classifier.users.add_user(u)
yield classifier.app.test_client()
def test_health_check(app_client):
result = app_client.get("health")
assert result.status_code == 200
assert result.json == {
"status": "OK",
"user_count": 2,
"users": [
{
'email': '[email protected]',
'role': 'botanist',
'password': 'md5$H5W30kno$10a2327b2fce08c1ad0f65a12d40552f',
'real_name': 'Noriko K. L.',
'username': 'noriko'
},
{
'email': '[email protected]',
'role': 'researcher',
'password': 'md5$F8ZVxsuE$ebf71d15067ed7c887c0408550b671e2',
'real_name': 'Emma K.',
'username': 'emma'
},
]
}
def test_whoami_good(app_client):
credentials = base64.b64encode("noriko:Hunter2".encode("utf-8"))
result = app_client.get(
"whoami",
headers={
"Authorization": f"BASIC {credentials.decode('ASCII')}"
}
)
assert result.status_code == 200
print(result.json)
assert result.json["status"] == "OK"
def test_whoami_bad(app_client):
credentials = base64.b64encode("noriko:not my passowrd".encode("utf-8"))
result = app_client.get(
"whoami",
headers={
"Authorization": f"BASIC {credentials.decode('ASCII')}"
}
)
assert result.status_code == 401
print(result.json)
assert result.json["message"] == "Unknown User"
|
[
"[email protected]"
] | |
0551c05c3a0fcbffde3afd42eec059f9cc7d51a4
|
4d675034878c4b6510e1b45b856cc0a71af7f886
|
/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
|
7fb8e82ece225ab6f88f1f4f83bea56a42cf1a57
|
[
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
] |
permissive
|
shinya7y/UniverseNet
|
101ebc2ad8f15482ee45ea8d6561aa338a0fa49e
|
3652b18c7ce68122dae7a32670624727d50e0914
|
refs/heads/master
| 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 |
Apache-2.0
| 2023-01-27T01:13:31 | 2020-05-13T07:23:43 |
Python
|
UTF-8
|
Python
| false | false | 376 |
py
|
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
[
"[email protected]"
] | |
1799efdff1eb4bb6b7e8ba832d454375c9017ab7
|
e40091711a9900350939556374cee5f3e41c2c3c
|
/tourism/ratting/models.py
|
129148c4edcd9dd3177d7a74515f01a81254f5d7
|
[] |
no_license
|
rg3915/drf-tourism
|
951249f64450b6b710bb971aa52ed4d2efe2a85d
|
c6648a42eed77ab82cf10af242ffb20690404fc0
|
refs/heads/main
| 2023-03-25T21:15:08.185176 | 2021-03-24T08:49:28 | 2021-03-24T08:49:28 | 347,816,562 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 739 |
py
|
from django.contrib.auth.models import User
from django.db import models
class Ratting(models.Model):
comment = models.CharField('comentário', max_length=100, unique=True)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='usuário',
related_name='ratting_users',
null=True,
blank=True
)
note = models.DecimalField('nota', max_digits=3, decimal_places=2)
created = models.DateTimeField(
'criado em',
auto_now_add=True,
auto_now=False
)
class Meta:
ordering = ('comment',)
verbose_name = 'avaliação'
verbose_name_plural = 'avaliações'
def __str__(self):
return self.comment
|
[
"[email protected]"
] | |
30841bd3f6a6a979eeeab80457b83222c00be2d3
|
3330ed9c8f0aed91638b3a07ad697668346db930
|
/meiduo_mall/meiduo_mall/apps/payment/views.py
|
efbe83168b0fcbd31e9b45953a65fa112fb21d9b
|
[] |
no_license
|
chengong825/meiduo_mall_django
|
fcee163bb6256672cbc5dcbd649aad2605bfb5e6
|
e3ca5e48f0c043d3f3f6d24a198ac2812df6d719
|
refs/heads/master
| 2020-03-29T09:48:09.189141 | 2018-09-21T14:33:43 | 2018-09-21T14:33:43 | 149,774,584 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,574 |
py
|
import os
from alipay import AliPay
from django.conf import settings
from django.shortcuts import render
# Create your views here.
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from orders.models import OrderInfo
from payment.models import Payment
class PaymentView(APIView):
"""
支付
"""
permission_classes = (IsAuthenticated,)
def get(self, request, order_id):
"""
获取支付链接
"""
# 判断订单信息是否正确
try:
order = OrderInfo.objects.get(order_id=order_id, user=request.user,
pay_method=OrderInfo.PAY_METHODS_ENUM["ALIPAY"],
status=OrderInfo.ORDER_STATUS_ENUM["UNPAID"])
except OrderInfo.DoesNotExist:
return Response({'message': '订单信息有误'}, status=status.HTTP_400_BAD_REQUEST)
# 构造支付宝支付链接地址
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/alipay_public_key.pem"), # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id,
total_amount=str(order.total_amount),
subject="美多商城%s" % order_id,
return_url="http://www.meiduo.site:8080/pay_success.html",
)
# 需要跳转到https://openapi.alipay.com/gateway.do? + order_string
# 拼接链接返回前端
alipay_url = settings.ALIPAY_URL + "?" + order_string
return Response({'alipay_url': alipay_url})
class PaymentStatusView(APIView):
"""
支付结果
"""
def put(self, request):
data = request.query_params.dict()
signature = data.pop("sign")
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
"keys/alipay_public_key.pem"), # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
success = alipay.verify(data, signature)
if success:
# 订单编号
order_id = data.get('out_trade_no')
# 支付宝支付流水号
trade_id = data.get('trade_no')
Payment.objects.create(
order_id=order_id,
trade_id=trade_id
)
OrderInfo.objects.filter(order_id=order_id, status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']).update(status=OrderInfo.ORDER_STATUS_ENUM["UNCOMMENT"])
return Response({'trade_id': trade_id})
else:
return Response({'message': '非法请求'}, status=status.HTTP_403_FORBIDDEN)
|
[
"[email protected]"
] | |
503cbc78d9f6e0910d577259e1c733d92a4a3a30
|
2eff2b24d5b6f5dffc42c9cbde6102ec9317502f
|
/src/Calculator.py
|
8893fae57e0cb5134c33ea8b75b81954ec9c8cbf
|
[] |
no_license
|
JakobKallestad/Python-Kattis
|
599a14e71a8d5c52aae779b8db3d35f0e4d01e88
|
51656964e79cc861e53f574785aacb213ef10b46
|
refs/heads/master
| 2022-10-24T23:12:45.599813 | 2021-12-08T12:31:54 | 2021-12-08T12:31:54 | 156,881,692 | 2 | 1 | null | 2022-10-02T12:36:57 | 2018-11-09T15:34:09 |
Python
|
UTF-8
|
Python
| false | false | 147 |
py
|
while True:
try:
line = input()
result = eval(line)
print("{:.2f}".format(result))
except EOFError:
break
|
[
"[email protected]"
] | |
5e502e6a8f31e345307af4c6bcc63e0a2132c326
|
4dbaea97b6b6ba4f94f8996b60734888b163f69a
|
/LeetCode/48.py
|
c4d4841c02151f4c9dd1b1d227a3fef532cd49d0
|
[] |
no_license
|
Ph0en1xGSeek/ACM
|
099954dedfccd6e87767acb5d39780d04932fc63
|
b6730843ab0455ac72b857c0dff1094df0ae40f5
|
refs/heads/master
| 2022-10-25T09:15:41.614817 | 2022-10-04T12:17:11 | 2022-10-04T12:17:11 | 63,936,497 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 667 |
py
|
class Solution:
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
matrix_len = len(matrix)
for i in range(matrix_len // 2):
for j in range(matrix_len - matrix_len // 2):
tmp = matrix[i][j]
matrix[i][j] = matrix[matrix_len - j - 1][i]
matrix[matrix_len - j - 1][i] = matrix[matrix_len - i - 1][matrix_len - j - 1]
matrix[matrix_len - i - 1][matrix_len - j - 1] = matrix[j][matrix_len - i - 1]
matrix[j][matrix_len - i - 1] = tmp
|
[
"[email protected]"
] | |
2751437f81253f6762b521912bf1187f9551bfb7
|
bfdab27f224d9cac02e319fe55b53172fbf8d1a2
|
/motion_editor_core/data/atlas_old/motions/drive_pull_right.py
|
a5356f2535ecc1d9343c53befe481a534536d151
|
[] |
no_license
|
tu-darmstadt-ros-pkg/motion_editor
|
c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71
|
178a7564b18420748e1ca4413849a44965823655
|
refs/heads/master
| 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 |
Python
|
UTF-8
|
Python
| false | false | 2,338 |
py
|
{ 'drive_pull_right': { 'l_arm': [],
'l_leg': [],
'r_arm': [ { 'duration': 1.0,
'name': u'vm_arm_r_retract_up',
'positions': [ -0.2258,
-0.5361,
3.1416,
-2.3456,
-0.3547,
-1.5618],
'starttime': 0.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_up_up',
'positions': [ -0.2258,
-1.2716,
3.1416,
-2.3562,
-0.3547,
-1.5618],
'starttime': 1.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_up_right',
'positions': [ -0.2258,
-1.2716,
3.1416,
-1.4144,
-0.3547,
-0.759],
'starttime': 2.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_down',
'positions': [ -0.2258,
1.3963,
3.1416,
-1.4144,
-0.3547,
-0.759],
'starttime': 3.0}],
'r_leg': [],
'torso': []}}
|
[
"[email protected]"
] | |
7e7db89059aa6482d6801ca06d86ca389c337e25
|
4ca821475c57437bb0adb39291d3121d305905d8
|
/models/research/swivel/vecs.py
|
61a2b7a852dd4c1a577d240c1c990423ddcbb77c
|
[
"Apache-2.0"
] |
permissive
|
yefcion/ShipRec
|
4a1a893b2fd50d34a66547caa230238b0bf386de
|
c74a676b545d42be453729505d52e172d76bea88
|
refs/heads/master
| 2021-09-17T04:49:47.330770 | 2018-06-28T02:25:50 | 2018-06-28T02:25:50 | 112,176,613 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,226 |
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import numpy as np
import os
from six import string_types
class Vecs(object):
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
"""Initializes the vectors from a text vocabulary and binary data."""
with open(vocab_filename, 'r') as lines:
self.vocab = [line.split()[0] for line in lines]
self.word_to_idx = {word: idx for idx, word in enumerate(self.vocab)}
n = len(self.vocab)
with open(rows_filename, 'r') as rows_fh:
rows_fh.seek(0, os.SEEK_END)
size = rows_fh.tell()
# Make sure that the file size seems reasonable.
if size % (4 * n) != 0:
raise IOError(
'unexpected file size for binary vector file %s' % rows_filename)
# Memory map the rows.
dim = size / (4 * n)
rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ)
rows = np.matrix(
np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim))
# If column vectors were specified, then open them and add them to the
# row vectors.
if cols_filename:
with open(cols_filename, 'r') as cols_fh:
cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ)
cols_fh.seek(0, os.SEEK_END)
if cols_fh.tell() != size:
raise IOError('row and column vector files have different sizes')
cols = np.matrix(
np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim))
rows += cols
cols_mm.close()
# Normalize so that dot products are just cosine similarity.
self.vecs = rows / np.linalg.norm(rows, axis=1).reshape(n, 1)
rows_mm.close()
def similarity(self, word1, word2):
"""Computes the similarity of two tokens."""
idx1 = self.word_to_idx.get(word1)
idx2 = self.word_to_idx.get(word2)
if not idx1 or not idx2:
return None
return float(self.vecs[idx1] * self.vecs[idx2].transpose())
def neighbors(self, query):
"""Returns the nearest neighbors to the query (a word or vector)."""
if isinstance(query, string_types):
idx = self.word_to_idx.get(query)
if idx is None:
return None
query = self.vecs[idx]
neighbors = self.vecs * query.transpose()
return sorted(
zip(self.vocab, neighbors.flat),
key=lambda kv: kv[1], reverse=True)
def lookup(self, word):
"""Returns the embedding for a token, or None if no embedding exists."""
idx = self.word_to_idx.get(word)
return None if idx is None else self.vecs[idx]
|
[
"[email protected]"
] | |
f9bcb3dcc1970423f97e39ba9072f214fd2b4bf9
|
a2a14995c95e024644623ea26add2f27d186ea16
|
/go.py
|
dff7c109c90a18c87abe03834f8ab27f33530049
|
[
"MIT"
] |
permissive
|
swdevbali/lit
|
89db51ae912770ac4030a3c491ad775a68b95a4b
|
dbc01ee8e4e600a0a43e49ffd18873653cc3f7cc
|
refs/heads/master
| 2021-01-21T00:25:50.001045 | 2013-02-16T13:52:50 | 2013-02-16T13:52:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,447 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import windows as winutils
from datetime import datetime
from utils import Query
from PyQt4.QtCore import (
Qt,
QAbstractListModel,
QMutex,
QMutexLocker
)
import itertools
import logging
from lcs import lcs
NAME_LIMIT = 42
class Task(object):
def __init__(self, hwnd, query, usetime):
self.hwnd = hwnd
self.query = query
self.usetime = usetime
def use(self):
self.usetime = datetime.now()
@property
def digest(self):
if len(self.name) > NAME_LIMIT:
shortname = self.name[:NAME_LIMIT - 3] + '...'
else:
shortname = self.name
if self.filename:
return '%s (%s)' % (shortname, self.filename)
else:
return shortname
@property
def title(self):
return self.name
@property
def fullname(self):
if self.filename:
return self.title + self.filename
else:
return self.title
@property
def filename(self):
if not hasattr(self, '_filename'):
self._filename = winutils.get_app_name(self.hwnd)
return self._filename
@property
def name(self):
return winutils.window_title(self.hwnd)
@property
def icon(self):
if not hasattr(self, '_icon'):
self._icon = winutils.get_window_icon(self.hwnd)
return self._icon
class WindowModel(QAbstractListModel):
NAME_ROLE = Qt.DisplayRole
HWND_ROLE = Qt.UserRole
def __init__(self, items):
self.super.__init__()
self.items = items
@property
def super(self):
return super(WindowModel, self)
def rowCount(self, parent):
return len(self.items)
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.TextAlignmentRole:
return int(Qt.AlignLeft | Qt.AlignVCenter)
elif role == Qt.DisplayRole:
return self.items[index.row()].digest
elif role == Qt.DecorationRole:
return self.items[index.row()].icon
elif role == Qt.UserRole:
return self.items[index.row()].hwnd
else:
return None
class Go(object):
def __init__(self, worker, client):
self.tasks = {}
self.mutex = QMutex()
self.worker = worker
self.client = client
@property
def name(self):
return 'g'
def lit(self, query, upper_bound, finished, *args, **kargs):
self.worker.do(
make=lambda: WindowModel(
self.sorted_active_runnable(
query,
winutils.top_level_windows()
)[:upper_bound]
),
catch=finished,
main=True
)
def sorted_active_runnable(self, query, hwnds):
with QMutexLocker(self.mutex):
# update query and collect active ones
self._refresh_tasks(hwnds, query)
active_tasks = [self.tasks[h] for h in hwnds]
# sort by last use
if not query:
return sorted(active_tasks, key=lambda t: t.usetime, reverse=True)
titles = [task.fullname.lower() for task in active_tasks]
def f(task, title):
return task.query.distance_to(title)
ds = [f(task, title) * (10 ** len(query)) for task, title in zip(active_tasks, titles)]
best = ds[0]
for i in itertools.takewhile(lambda i: ds[i] == best, range(len(ds))):
ds[i] -= len(lcs(query, titles[i]))
#return sorted(active_tasks, key=f)
return [task for i, task in sorted(enumerate(active_tasks), key=lambda i: ds[i[0]])]
def _refresh_tasks(self, hwnds, query=None):
for hwnd in hwnds:
if not hwnd in self.tasks:
self.tasks[hwnd] = Task(
hwnd=hwnd,
usetime=datetime.now(),
query=Query(
text='' if query is None else query,
insertion_cost=1,
first_insertion_cost=50,
prepend_first_insertion_cost=5,
append_first_insertion_cost=10,
deletion_cost=100,
substitution_cost=100,
transposition_cost=10
)
)
elif not query is None:
self.tasks[hwnd].query.update(query.lower())
def update_usetime(self, hwnd):
"""Update with one time delay."""
if hasattr(self, 'after_select') and self.after_select:
self.after_select()
self.after_select = self.tasks[hwnd].use
def select(self, content, index):
# check content type
if not isinstance(content, WindowModel):
logging.info('wrong content type {}'.format(type(content)))
return
for hwnd in winutils.top_level_windows():
if content.data(index, WindowModel.HWND_ROLE) == hwnd:
self._refresh_tasks([hwnd])
self.client.goto(hwnd=hwnd)
self.update_usetime(hwnd)
return
# remove invalid tasks
del self.tasks[content.data(index, WindowModel.HWND_ROLE)]
|
[
"[email protected]"
] | |
0659d7826f012a4a77173ff1cd94f53a96dcf0ad
|
1db2e2238b4ef9c1b6ca3b99508693ee254d6904
|
/develop/align_atoms/make_alignment.py
|
e2c2c5a27921bc05091c8e561cda56f058e951c7
|
[] |
no_license
|
pgreisen/pythonscripts
|
8674e08095f76edf08ef2059300349218079724c
|
0aadf8f96d19b306c1bc44a772e766a06fe3408b
|
refs/heads/master
| 2021-07-06T23:54:57.774342 | 2021-06-08T19:36:36 | 2021-06-08T19:36:36 | 22,017,192 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,251 |
py
|
import os,shutil,sys
from translate_rotate import *
class pdbfile:
# Requires floating point number
# Returns floating point with correct
# number of digits for pdb
def set_number_digits(self,number):
return '%.3f' %number
def set_length_digit(self,number):
lngth = len(number)
if lngth == 7:
return ' '+number
if lngth == 6:
return ' '+number
if lngth == 5:
return ' '+number
if lngth == 4:
return ' '+number
else:
return number
# Method to get data from each rotamer
def get_data_to_align(self,filename):
tmp_chi = open(filename,'r')
atoms = ['ZN1','ZN2','O5','O1']
dic = {}
for line in tmp_chi:
tmp = line.split()
if tmp[2] in atoms:
dic[tmp[2]] = line
wrt = open('tmp.pdb','w')
wrt.write(str(dic['ZN2']))
wrt.write(str(dic['ZN1']))
wrt.write(str(dic['O5']))
wrt.write(str(dic['O1']))
wrt.close()
# took directory with conformations of ligand ensemble
# if we generate the ensemble after the alignment this is not
# necessary
# Returns a list with transformed coordinates
def get_aligned_coor(self,path,VIZ,templateFile,crystal_coor):
RMSD_TRESHOLD = 0.8
obj = align_to_substrate()
files = os.listdir(path)
outfile = []
# Reading data from crystal structure where one wants
# the alignment from
cry_data,atom_names = obj.get_data(crystal_coor)
for fl in files:
ph = path+'/'+fl
rd = open(ph,'r')
# Hvad indeholder denne file og hvor er den genereret
# Filen indeholder data fra modellen, altsaa de data
# som vi har lavet for vores model system
self.get_data_to_align(ph)
fname = 'tmp.pdb'
# her faar vi navne
sub_data,atom_names = obj.get_data(fname)
# Superimpose substrate data in crystal structure
# getting the translation and rotation matrix
t_m, r_m = obj.get_rotate_translate(sub_data,cry_data)
# Getting the transformed coordinates
nw = obj.get_transformed_coor(sub_data,cry_data)
rmsd_align = obj.get_rmsd(nw,cry_data)
print 'rmsd_align',rmsd_align
print 'rmsd ', rmsd_align
if rmsd_align < RMSD_TRESHOLD:
# We transform the original data
sub,at = obj.get_data(ph)
# The transformed coordinates
# what is their construction
t_c = dot(sub,r_m)+t_m
# Writing the coordinates
# Files name of coordinates is
# Writing to a file called superimposed.pdb
obj.write_pdb(at,t_c)
# Rosetta naming convention file which is generated
# earlier.
# File for rosetta with the correct naming
# I/O of file
sp_file = open('superimposed.pdb','r')
rosetta = open(templateFile,'r')
fileOne = sp_file.readlines()
fileTwo = rosetta.readlines()
rosetta.close()
# Variable to count line number in other file
# used to insert at the right line
ct = 0
for i in fileTwo:
ln = fileOne[ct].split()
# A very temporary fix for the number greater 100
x = self.set_number_digits(float(ln[6]))
y = self.set_number_digits(float(ln[7]))
z = self.set_number_digits(float(ln[8]))
x = self.set_length_digit(x)
y = self.set_length_digit(y)
z = self.set_length_digit(z)
i = str(i[0:30])+x+y+z+str(i[55:81])
outfile.append(i)
ct = ct +1
outfile.append(VIZ)
return outfile
|
[
"[email protected]"
] | |
226c4d09fa5cdc1ca4d9713500b37dcc362f0d99
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_142/671.py
|
10347da035e5343026f9408225894450ce90b99c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,367 |
py
|
def parseString(word):
dico = []
c = word[0]
cpt = 0
for i in xrange(len(word)):
if c != word[i]:
dico.append((word[i-1],cpt))
cpt = 1
c = word[i]
else:
cpt += 1
c = word[i]
dico.append((word[len(word)-1],cpt))
return dico
def checkSize(tab):
occ = len(tab[0])
for i in xrange(len(tab)):
if occ != len(tab[i]):
return False
return True
def checkLetter(tab):
sent = tab[0]
for i in xrange(len(tab)):
for j in xrange(len(tab[i])):
if sent[j][0] != tab[i][j][0]:
return False
return True
def findInterval(tab):
cpt = 0
for i in xrange(len(tab[0])):
t_max = 0
t_min = 10000
for j in xrange(len(tab)):
if tab[j][i][1] > t_max:
t_max = tab[j][i][1]
if tab[j][i][1] < t_min:
t_min = tab[j][i][1]
cpt += (t_max - t_min)
return cpt
######################################################
#### MAIN :)
######################################################
nb_case = int(raw_input())
for i in xrange(nb_case):
nb_row = int(raw_input())
res = []
for j in xrange(nb_row):
res.append(parseString(str(raw_input())))
if checkSize(res):
if checkLetter(res):
print("Case #%d: %d" % (i+1,findInterval(res)))
else:
print("Case #%d: Fegla Won" % (i+1))
else:
print("Case #%d: Fegla Won" % (i+1))
|
[
"[email protected]"
] | |
d01277bf95b44d3ea01150d8c57d628e1b8f6eb4
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2180/60671/249029.py
|
9b092ed28dbaf417cbff0c51b1ee7e7e1ab2000a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 339 |
py
|
s1=input()
s2=input()
list1=[]
list2=[]
for x in range(len(s1)):
for i in range(len(s1) - x):
list1.append(s1[i:i + x + 1])
for x in range(len(s2)):
for i in range(len(s2) - x):
list2.append(s2[i:i + x + 1])
list1.sort()
list2.sort()
count=0
for mem in list1:
if(mem in list2):
count+=1
print(10,end='')
|
[
"[email protected]"
] | |
e0beeb0e7fa4d5516f5433f69c91e40e77eabe06
|
659f10ae3ad036bbb6293b0cd585a4be2bc2dcc9
|
/containers/migrations/0005_auto__add_field_container_meta.py
|
7d9c7a532d2fabdceeeb662c0217c38e40611106
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
newrelic/shipyard
|
e58649adf46b65e30ea93307c53b064abc4495dc
|
e4e990583a646b77e7e1767682e1ecf94c278fb8
|
refs/heads/master
| 2023-07-22T11:47:31.472994 | 2013-09-27T19:13:37 | 2013-09-27T19:13:37 | 12,735,507 | 3 | 2 | null | 2023-07-06T03:58:58 | 2013-09-10T17:08:31 |
Python
|
UTF-8
|
Python
| false | false | 5,101 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Container.meta'
db.add_column(u'containers_container', 'meta',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Container.meta'
db.delete_column(u'containers_container', 'meta')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'containers.container': {
'Meta': {'object_name': 'Container'},
'container_id': ('django.db.models.fields.CharField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Host']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'containers.host': {
'Meta': {'object_name': 'Host'},
'enabled': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.SmallIntegerField', [], {'default': '4243', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['containers']
|
[
"[email protected]"
] | |
03da3037aa5075dd5cc26a9b6f22f10ac33ea3dc
|
3cc8af76b1fd487eea86610d7a07f477afeab048
|
/setup.py
|
da827dc7ffda6b32ae816d398f0fb9cec5e512e5
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
expresschen/HanLP
|
20ff6d03b01b508e4395ea3532e8af712e065ebf
|
24b48966e90dfafa1faa65765eb6f35e19cac801
|
refs/heads/doc-zh
| 2023-07-13T10:16:30.231114 | 2020-02-15T17:19:28 | 2021-08-24T02:15:49 | 401,305,599 | 1 | 0 |
Apache-2.0
| 2021-08-30T10:37:28 | 2021-08-30T10:37:27 | null |
UTF-8
|
Python
| false | false | 1,990 |
py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 19:26
from os.path import abspath, join, dirname
from setuptools import find_packages, setup
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
long_description = file.read()
version = {}
with open(join(this_dir, "hanlp", "version.py")) as fp:
exec(fp.read(), version)
setup(
name='hanlp',
version=version['__version__'],
description='HanLP: Han Language Processing',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/hankcs/HanLP',
author='hankcs',
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
"Development Status :: 3 - Alpha",
'Operating System :: OS Independent',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Topic :: Text Processing :: Linguistic"
],
keywords='corpus,machine-learning,NLU,NLP',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=[
'termcolor',
'pynvml',
'alnlp',
'toposort==1.5',
'transformers>=4.1.1',
'sentencepiece>=0.1.91'
'torch>=1.6.0',
'hanlp-common>=0.0.9',
'hanlp-trie>=0.0.2',
'hanlp-downloader',
],
extras_require={
'full': [
'fasttext==0.9.1',
'tensorflow==2.3.0',
'bert-for-tf2==0.14.6',
'py-params==0.9.7',
'params-flow==0.8.2',
'penman==0.6.2',
],
},
python_requires='>=3.6',
# entry_points={
# 'console_scripts': [
# 'hanlp=pyhanlp.main:main',
# ],
# },
)
|
[
"[email protected]"
] | |
4c5824d086f61db8d6a64e10bf494165a522f574
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/ag/models/UpdateStepAsRuleSpec.py
|
defba859f89b925fee619a7ec0fa42170e4f650c
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 |
Apache-2.0
| 2023-09-07T06:54:49 | 2018-03-22T03:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,341 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class UpdateStepAsRuleSpec(object):
def __init__(self, adjustmentType=None, stepAdjustments=None):
"""
:param adjustmentType: (Optional) 伸缩调整方式,取值范围:[`Number`,`Percentage`,`Total`]
- `Number`:增加或减少指定数量的实例
- `Percentage`:增加或减少指定百分比的实例
- `Total`:将当前伸缩组的实例数量调整到指定数量
如果修改了参数 `adjustmentType`,则参数 `stepAdjustments` 也必须传,否则报错
:param stepAdjustments: (Optional) 步进调整策略数组
"""
self.adjustmentType = adjustmentType
self.stepAdjustments = stepAdjustments
|
[
"[email protected]"
] | |
84191deb0a80f8875e115aa3f5eae0046025e1d7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04031/s251975826.py
|
5d8d02c8d8407057ca16c1bec6857fff705531e1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
import math
import collections
import fractions
import itertools
import functools
import operator
def solve():
n = int(input())
a = list(map(int, input().split()))
cost = []
for i in range(-100, 101):
ramen = 0
for j in range(n):
ramen += abs(a[j]-i)**2
cost.append(ramen)
print(min(cost))
return 0
if __name__ == "__main__":
solve()
|
[
"[email protected]"
] | |
f95f8a329e6279fc0ee59df351c887432ee6fec1
|
93736e8d0d5517eb73af91eeda6e9b0f4b07439e
|
/Python/Intro_Python/exercise3.py
|
e85c0cc0eae27917d1839cebdafb0e37e1cd146e
|
[] |
no_license
|
aayushgupta97/TTN
|
0de1a5d3a25d7399d68a81ea51f17233f81029e0
|
324466cbdf0a9b0953dd4ae574bd0b3f753c4fd7
|
refs/heads/master
| 2020-04-21T12:18:25.721602 | 2019-04-15T11:09:13 | 2019-04-15T11:09:13 | 169,557,867 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,564 |
py
|
from abc import ABC, abstractmethod
class Box(ABC):
def add(self, *items):
raise NotImplementedError()
def empty(self):
raise NotImplementedError()
def count(self):
raise NotImplementedError()
class Item():
def __init__(self, name, value):
self.name = name
self.value = value
class ListBox(Box):
def __init__(self):
self._items = []
def add(self, *items):
self._items.extend(items)
def empty(self):
items = self._items
self._items = []
return items
def count(self):
return len(self._items)
# class DictBox(Box):
# def __init__(self):
# self._items = {}
# def add(self, *items):
# self._items.update(dict((i.name, i) for i in items))
# def empty(self):
# items = list(self._items.values())
# self._items = {}
# return items
# def count(self):
# return len(self._items)
# #repack
# def repack_boxes(*boxes):
# items = []
# for box in boxes:
# items.extend(box.empty())
# while items:
# for box in boxes:
# try:
# box.add(items.pop())
# except IndexError:
# break
# box1 = ListBox()
# box1.add(Item(str(i), i) for i in range(20))
# box2 = ListBox()
# box2.add(Item(str(i), i) for i in range(9))
# # box3 = DictBox()
# # box3.add(Item(str(i), i) for i in range(5))
# repack_boxes(box1, box2) #, box2, box3
# print(box1.count())
# print(box2.count())
# # print(box3.count())
|
[
"[email protected]"
] | |
f2e9d1fa4f806aa5430bcc405d3ed2f4ea3e94d2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03165/s926838179.py
|
74a1ba1007c89e718147b0b4242828dcbc0a88f7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 594 |
py
|
s = input()
t = input()
dp = [[0]*(len(t)+1) for _ in range(len(s)+1)]
for i in range(len(s)):
for j in range(len(t)):
if s[i]==t[j]:
dp[i+1][j+1] = dp[i][j]+1
else:
if dp[i+1][j] < dp[i][j+1]:
dp[i+1][j+1] = dp[i][j+1]
else:
dp[i+1][j+1] = dp[i+1][j]
i = len(s)-1
j = len(t)-1
ans = ''
while i>=0 and j>=0:
if s[i]==t[j]:
ans += t[j]
i -= 1
j -= 1
else:
if dp[i][j+1]>dp[i+1][j]:
i -= 1
else:
j -= 1
print(ans[::-1])
|
[
"[email protected]"
] | |
e91848d3129b01eeac17497f4be7ff57f9e5a2d5
|
215cafb0a79338a2a268c19629f07df20cf68f76
|
/venv/bin/pip-3.8
|
6de21cb3dcc8b330bf64648feb3896c8b6bb5f2d
|
[] |
no_license
|
safwanvk/erp
|
c95741c5873ebaa53a8a96093928745e02000be9
|
d4e427dbb6b71eb9aa6e2d15a039e2e669c53cbe
|
refs/heads/master
| 2022-12-08T12:38:36.817514 | 2020-08-16T15:10:55 | 2020-08-16T15:10:55 | 287,913,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 262 |
8
|
#!/home/safwan/Desktop/projects/erp-pro/erp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
9f2833105773edd29e1268cc3705ad9ff9dc2a1c
|
98be00ee32971cade82d10c067aff532c3394a62
|
/Competitions/Xtreme/xplore.py
|
f6e47c98fc808d7cc0c5fe1f897956f31365aa4a
|
[] |
no_license
|
vigneshhari/Competitive_solutions
|
5ab34933ea8d84eab67bdef9bb9e4562f6b90782
|
7a35e1386e5cff71cb5746b6797ccc0f03ceb3f4
|
refs/heads/master
| 2023-01-11T02:53:01.456863 | 2022-12-29T13:50:03 | 2022-12-29T13:50:03 | 115,146,700 | 4 | 2 | null | 2019-10-26T09:15:03 | 2017-12-22T20:03:51 |
Python
|
UTF-8
|
Python
| false | false | 4,010 |
py
|
import json
from collections import defaultdict
authors_citations = defaultdict(list)
for i in range(input()):
data = raw_input()
temp = json.loads(data)
citation_count = temp["citing_paper_count"]
for i in temp["authors"]["authors"]:
authors_citations[i["full_name"]].append(citation_count)
answers = defaultdict(list)
for i in authors_citations:
values = authors_citations[i]
values.sort()
length = len(values)
out = 0
for j in range(length):
if( length - j >= values[j] ):
out = values[j]
else:
if(values[j] > length - j and length - j > out ):
out = length - j
answers[out].append(i)
temp = sorted(answers.keys())
temp = temp[::-1]
for i in temp:
for k in sorted(answers[i]):
print k , i
"""
10
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Echo"}, {"author_order": 2,"affiliation": "","full_name": "Bravo"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}]},"title": "Article Title 1","article_number": "1","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Bravo"}]},"title": "Article Title 2","article_number": "2","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Echo"}, {"author_order": 2,"affiliation": "","full_name": "Delta"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}, {"author_order": 4,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 3","article_number": "3","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 4","article_number": "4","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}]},"title": "Article Title 5","article_number": "5","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 5,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}]},"title": "Article Title 6","article_number": "6","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 6,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Delta"}]},"title": "Article Title 7","article_number": "7","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 8","article_number": "8","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Delta"}, {"author_order": 2,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 9","article_number": "9","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Bravo"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}]},"title": "Article Title 10","article_number": "10","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 6,"publisher": "IEEE"}
"""
# Solved Completely
|
[
"[email protected]"
] | |
6d57c6855dd53ede783641ec65bed681aa69e10a
|
1196fe960947b4a7d6bba5df6cdfc7010bb118fb
|
/examples/apikeys/apikeys.py
|
8b921f815ddb9dc3eea6e33e1ad7b042f43026be
|
[
"MIT"
] |
permissive
|
Nextdoor/sendgrid-python
|
a4afe5cda9015c7cf6a3a1303785fda05e844277
|
a7c834b6391775b796969ef65a3ef259ccabf0f0
|
refs/heads/master
| 2021-01-22T11:12:08.221546 | 2016-04-22T21:20:07 | 2016-04-22T21:20:07 | 56,885,507 | 0 | 0 | null | 2016-04-22T21:11:50 | 2016-04-22T21:11:49 | null |
UTF-8
|
Python
| false | false | 1,657 |
py
|
import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey='YOUR_SENDGRID_API_KEY')
# You can also store your API key an .env variable 'SENDGRID_API_KEY'
##################################################
# List all API Keys belonging to the authenticated user #
# GET /api_keys #
response = sg.client.api_keys.get()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Update the name & scopes of an API Key #
# PUT /api_keys/{api_key_id} #
data = {'sample': 'data'}
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).put(request_body=data)
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Update API keys #
# PATCH /api_keys/{api_key_id} #
data = {'sample': 'data'}
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).patch(request_body=data)
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Get an existing API Key #
# GET /api_keys/{api_key_id} #
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).get()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Delete API keys #
# DELETE /api_keys/{api_key_id} #
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).delete()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
|
[
"[email protected]"
] | |
df55ffe5751d160215654f44ca59df406536a410
|
c03edd979ad6fd4a8abd155e3e63bcefbd93d5c2
|
/Image/band_stats.py
|
7b83630a66a15155d6b74a944ca88a2b17ef34e5
|
[
"MIT"
] |
permissive
|
xiangtaoxu/earthengine-py-examples
|
538dafc88a22a351b762ba02df09db583df955bb
|
76ae8e071a71b343f5e464077afa5b0ed2f9314c
|
refs/heads/master
| 2022-11-03T03:16:11.933616 | 2020-06-12T15:47:52 | 2020-06-12T15:47:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,987 |
py
|
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# get highest value
def maxValue(img, scale=30):
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return max_value
# get lowest value
def minValue(img, scale=30):
min_value = img.reduceRegion(**{
'reducer': ee.Reducer.min(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return min_value
# get mean value
def meanValue(img, scale=30):
mean_value = img.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return mean_value
# get standard deviation
def stdValue(img, scale=30):
std_value = img.reduceRegion(**{
'reducer': ee.Reducer.stdDev(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return std_value
dataset = ee.Image('USGS/NED')
dem = dataset.select('elevation')
# dem = ee.Image('srtm90_v4')
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(dem, vis_params, 'NED', False)
roi = ee.Geometry.Polygon(
[[[-120.18204899532924, 38.53481618819663],
[-120.18204899532924, 36.54889033300136],
[-116.75431462032924, 36.54889033300136],
[-116.75431462032924, 38.53481618819663]]])
image = dem.clip(roi)
Map.centerObject(image, 9)
Map.addLayer(image, vis_params, 'DEM')
scale = image.projection().nominalScale()
print("Resolution: ", scale.getInfo())
scale = 30
print("Minimum value: ", minValue(image, scale).get('elevation').getInfo())
print("Maximum value: ", maxValue(image, scale).get('elevation').getInfo())
print("Average value: ", meanValue(image, scale).get('elevation').getInfo())
print("Standard deviation: ", stdValue(image, scale).get('elevation').getInfo())
# Display the map.
Map
|
[
"[email protected]"
] | |
9242782550ab6ddf1a26238b272e633e1ed1d3c8
|
c342c8b9b2437d6474b9ae7da154ba47c6fc447c
|
/src/data/memory_store.py
|
81d6cb406f3ba13f5011c2669584a64d0cdc0b4a
|
[] |
no_license
|
nezaj/menu-api
|
0d5118f3a1392f85e51700b5e8ac234bac605518
|
bcf759b91893bf72821323c41f963923d9184e68
|
refs/heads/master
| 2021-01-10T07:09:15.664561 | 2015-11-16T21:28:45 | 2015-11-16T21:28:45 | 44,832,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,572 |
py
|
"""
Store implementation using in-memory data
"""
import json
import os
from .store_interface import StoreInterface
class MemoryStore(object):
__implements__ = (StoreInterface, )
def __init__(self, data_dir):
self.data_dir = data_dir
self.data = self._load_data(data_dir)
def _load_data(self, data_dir):
"""
Loads data from directory defined in settings. We expect there can be
multiple collections of data and that each collection lives in its
own subdirectory. As a result, we go through each directory and load
it's data into it's own key.
"""
data = {}
for d in os.listdir(data_dir):
subd = os.path.join(data_dir, d)
if os.path.isdir(subd):
data[d] = self._load_json_files(subd)
return data
def _load_json_files(self, data_dir):
"""
Return a dictionary representing a collection of json from the given
data directory.
We iterate through each json file and load it's data. We then key
the data in each file by the id defined in the file itself.
"""
collection = {}
for item in os.listdir(data_dir):
df = os.path.join(data_dir, item)
if df.endswith(".json"):
jd = self._load_json_file(df)
d_id, d_meta = self._process_json(jd)
collection[d_id] = d_meta
return collection
@staticmethod
def _load_json_file(f):
with open(f) as jf:
jd = json.load(jf)
return jd
@staticmethod
def _process_json(jd):
jd_id = jd["id"]
return jd_id, jd
def create_item(self, collection_id, params):
c = self.data[collection_id]
item_id, item = self._process_json(params)
c[item_id] = item
return item
def delete_item(self, collection_id, item_id):
collection = self.get_collection(collection_id)
item = self.get_item(collection_id, item_id)
if collection and item:
del collection[item_id]
return item
def get_collection(self, collection_id):
return self.data.get(collection_id)
def get_item(self, collection_id, item_id):
collection = self.get_collection(collection_id)
if collection:
return collection.get(item_id)
def update_item(self, collection_id, item_id, params):
item = self.get_item(collection_id, item_id)
if item:
item.update(params)
return item
|
[
"[email protected]"
] | |
7ebec6c0a7924a9438dc5a473cc822f219125df8
|
402ed5374ab189c8599b56864c5ce066f34b26c6
|
/tests/test_pdf_normal.py
|
e1fa17bd053bb3771266064aae86ee311b5c241c
|
[
"BSD-3-Clause"
] |
permissive
|
kailiu77/zfit
|
db354e9c3eb4a41274af5363834fe231823c6d66
|
8bddb0ed3a0d76fde0aa2cdbf74434b0ee0ae8bb
|
refs/heads/master
| 2020-10-01T23:49:55.751825 | 2019-12-06T15:48:47 | 2019-12-06T15:48:47 | 227,650,723 | 1 | 0 |
BSD-3-Clause
| 2019-12-12T16:33:54 | 2019-12-12T16:33:53 | null |
UTF-8
|
Python
| false | false | 2,899 |
py
|
# Copyright (c) 2019 zfit
import numpy as np
import pytest
import tensorflow as tf
import zfit
from zfit import Parameter
from zfit.models.dist_tfp import Gauss
from zfit.core.testing import setup_function, teardown_function, tester
mu1_true = 1.
mu2_true = 2.
mu3_true = 0.6
sigma1_true = 1.4
sigma2_true = 2.3
sigma3_true = 1.8
test_values = np.random.uniform(low=-3, high=5, size=100)
norm_range1 = (-4., 2.)
obs1 = 'obs1'
limits1 = zfit.Space(obs=obs1, limits=(-0.3, 1.5))
def create_gauss():
mu1 = Parameter("mu1a", mu1_true)
mu2 = Parameter("mu2a", mu2_true)
mu3 = Parameter("mu3a", mu3_true)
sigma1 = Parameter("sigma1a", sigma1_true)
sigma2 = Parameter("sigma2a", sigma2_true)
sigma3 = Parameter("sigma3a", sigma3_true)
gauss1 = Gauss(mu=mu1, sigma=sigma1, obs=obs1, name="gauss1a")
normal1 = Gauss(mu=mu1, sigma=sigma1, obs=obs1, name="normal1a")
gauss2 = Gauss(mu=mu2, sigma=sigma2, obs=obs1, name="gauss2a")
normal2 = Gauss(mu=mu2, sigma=sigma2, obs=obs1, name="normal2a")
gauss3 = Gauss(mu=mu3, sigma=sigma3, obs=obs1, name="gauss3a")
normal3 = Gauss(mu=mu3, sigma=sigma3, obs=obs1, name="normal3a")
return gauss1, gauss2, gauss3, normal1, normal2, normal3
# gauss1, gauss2, gauss3, normal1, normal2, normal3 = create_gauss()
def test_gauss1():
gauss1, gauss2, gauss3, normal1, normal2, normal3 = create_gauss()
probs1 = gauss1.pdf(x=test_values, norm_range=norm_range1)
probs1_tfp = normal1.pdf(x=test_values, norm_range=norm_range1)
probs1 = zfit.run(probs1)
probs1_tfp = zfit.run(probs1_tfp)
np.testing.assert_allclose(probs1, probs1_tfp, rtol=1e-2)
probs1_unnorm = gauss1.pdf(x=test_values, norm_range=False)
probs1_tfp_unnorm = normal1.pdf(x=test_values, norm_range=False)
probs1_unnorm = zfit.run(probs1_unnorm)
probs1_tfp_unnorm = zfit.run(probs1_tfp_unnorm)
assert not np.allclose(probs1_tfp, probs1_tfp_unnorm, rtol=1e-2)
assert not np.allclose(probs1, probs1_unnorm, rtol=1e-2)
# np.testing.assert_allclose(probs1_unnorm, probs1_tfp_unnorm, rtol=1e-2)
def test_truncated_gauss():
high = 2.
low = -0.5
truncated_gauss = zfit.pdf.TruncatedGauss(mu=1, sigma=2, low=low, high=high, obs=limits1)
gauss = zfit.pdf.Gauss(mu=1., sigma=2, obs=limits1)
probs_truncated = truncated_gauss.pdf(test_values)
probs_gauss = gauss.pdf(test_values)
probs_truncated_np, probs_gauss_np = zfit.run([probs_truncated, probs_gauss])
bool_index_inside = np.logical_and(low < test_values, test_values < high)
inside_probs_truncated = probs_truncated_np[bool_index_inside]
outside_probs_truncated = probs_truncated_np[np.logical_not(bool_index_inside)]
inside_probs_gauss = probs_gauss_np[bool_index_inside]
assert inside_probs_gauss == pytest.approx(inside_probs_truncated, rel=1e-3)
assert all(0 == outside_probs_truncated)
|
[
"[email protected]"
] | |
3e70f5bce473ccd4c866c43a7f594f03af071dca
|
f569978afb27e72bf6a88438aa622b8c50cbc61b
|
/douyin_open/EnterprisePersonaPersonaCreate/api/__init__.py
|
d436a85b96fd99e2b5e5d7a6b654b4348bb48850
|
[] |
no_license
|
strangebank/swagger-petstore-perl
|
4834409d6225b8a09b8195128d74a9b10ef1484a
|
49dfc229e2e897cdb15cbf969121713162154f28
|
refs/heads/master
| 2023-01-05T10:21:33.518937 | 2020-11-05T04:33:16 | 2020-11-05T04:33:16 | 310,189,316 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 208 |
py
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from douyin_open.EnterprisePersonaPersonaCreate.api.enterprise_im_persona_create_api import EnterpriseImPersonaCreateApi
|
[
"[email protected]"
] | |
d44f23bdc3a2ebd7b826cebb9d784a04528b90e6
|
5af277b5819d74e61374d1d78c303ac93c831cf5
|
/tabnet/experiment_covertype.py
|
c00ea76b7c9058be7df642fae0d69184a435f921
|
[
"Apache-2.0"
] |
permissive
|
Ayoob7/google-research
|
a2d215afb31513bd59bc989e09f54667fe45704e
|
727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7
|
refs/heads/master
| 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 |
Apache-2.0
| 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null |
UTF-8
|
Python
| false | false | 6,699 |
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment to train and evaluate the TabNet model on Forest Covertype."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
import data_helper_covertype
import numpy as np
import tabnet_model
import tensorflow as tf
# Run Tensorflow on GPU 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Training parameters
TRAIN_FILE = "data/train_covertype.csv"
VAL_FILE = "data/val_covertype.csv"
TEST_FILE = "data/test_covertype.csv"
MAX_STEPS = 1000000
DISPLAY_STEP = 5000
VAL_STEP = 10000
SAVE_STEP = 40000
INIT_LEARNING_RATE = 0.02
DECAY_EVERY = 500
DECAY_RATE = 0.95
BATCH_SIZE = 16384
SPARSITY_LOSS_WEIGHT = 0.0001
GRADIENT_THRESH = 2000.0
SEED = 1
def main(unused_argv):
# Fix random seeds
tf.set_random_seed(SEED)
np.random.seed(SEED)
# Define the TabNet model
tabnet_forest_covertype = tabnet_model.TabNet(
columns=data_helper_covertype.get_columns(),
num_features=data_helper_covertype.NUM_FEATURES,
feature_dim=128,
output_dim=64,
num_decision_steps=6,
relaxation_factor=1.5,
batch_momentum=0.7,
virtual_batch_size=512,
num_classes=data_helper_covertype.NUM_CLASSES)
column_names = sorted(data_helper_covertype.FEATURE_COLUMNS)
print(
"Ordered column names, corresponding to the indexing in Tensorboard visualization"
)
for fi in range(len(column_names)):
print(str(fi) + " : " + column_names[fi])
# Input sampling
train_batch = data_helper_covertype.input_fn(
TRAIN_FILE, num_epochs=100000, shuffle=True, batch_size=BATCH_SIZE)
val_batch = data_helper_covertype.input_fn(
VAL_FILE,
num_epochs=10000,
shuffle=False,
batch_size=data_helper_covertype.N_VAL_SAMPLES)
test_batch = data_helper_covertype.input_fn(
TEST_FILE,
num_epochs=10000,
shuffle=False,
batch_size=data_helper_covertype.N_TEST_SAMPLES)
train_iter = train_batch.make_initializable_iterator()
val_iter = val_batch.make_initializable_iterator()
test_iter = test_batch.make_initializable_iterator()
feature_train_batch, label_train_batch = train_iter.get_next()
feature_val_batch, label_val_batch = val_iter.get_next()
feature_test_batch, label_test_batch = test_iter.get_next()
# Define the model and losses
encoded_train_batch, total_entropy = tabnet_forest_covertype.encoder(
feature_train_batch, reuse=False, is_training=True)
logits_orig_batch, _ = tabnet_forest_covertype.classify(
encoded_train_batch, reuse=False)
softmax_orig_key_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_orig_batch, labels=label_train_batch))
train_loss_op = softmax_orig_key_op + SPARSITY_LOSS_WEIGHT * total_entropy
tf.summary.scalar("Total loss", train_loss_op)
# Optimization step
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
INIT_LEARNING_RATE,
global_step=global_step,
decay_steps=DECAY_EVERY,
decay_rate=DECAY_RATE)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gvs = optimizer.compute_gradients(train_loss_op)
capped_gvs = [(tf.clip_by_value(grad, -GRADIENT_THRESH,
GRADIENT_THRESH), var) for grad, var in gvs]
train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
# Model evaluation
# Validation performance
encoded_val_batch, _ = tabnet_forest_covertype.encoder(
feature_val_batch, reuse=True, is_training=False)
_, prediction_val = tabnet_forest_covertype.classify(
encoded_val_batch, reuse=True)
predicted_labels = tf.cast(tf.argmax(prediction_val, 1), dtype=tf.int32)
val_eq_op = tf.equal(predicted_labels, label_val_batch)
val_acc_op = tf.reduce_mean(tf.cast(val_eq_op, dtype=tf.float32))
tf.summary.scalar("Val accuracy", val_acc_op)
# Test performance
encoded_test_batch, _ = tabnet_forest_covertype.encoder(
feature_test_batch, reuse=True, is_training=False)
_, prediction_test = tabnet_forest_covertype.classify(
encoded_test_batch, reuse=True)
predicted_labels = tf.cast(tf.argmax(prediction_test, 1), dtype=tf.int32)
test_eq_op = tf.equal(predicted_labels, label_test_batch)
test_acc_op = tf.reduce_mean(tf.cast(test_eq_op, dtype=tf.float32))
tf.summary.scalar("Test accuracy", test_acc_op)
# Training setup
model_name = "tabnet_forest_covertype_model"
init = tf.initialize_all_variables()
init_local = tf.local_variables_initializer()
init_table = tf.tables_initializer(name="Initialize_all_tables")
saver = tf.train.Saver()
summaries = tf.summary.merge_all()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter("./tflog/" + model_name, sess.graph)
sess.run(init)
sess.run(init_local)
sess.run(init_table)
sess.run(train_iter.initializer)
sess.run(val_iter.initializer)
sess.run(test_iter.initializer)
for step in range(1, MAX_STEPS + 1):
if step % DISPLAY_STEP == 0:
_, train_loss, merged_summary = sess.run(
[train_op, train_loss_op, summaries])
summary_writer.add_summary(merged_summary, step)
print("Step " + str(step) + " , Training Loss = " +
"{:.4f}".format(train_loss))
else:
_ = sess.run(train_op)
if step % VAL_STEP == 0:
feed_arr = [
vars()["summaries"],
vars()["val_acc_op"],
vars()["test_acc_op"]
]
val_arr = sess.run(feed_arr)
merged_summary = val_arr[0]
val_acc = val_arr[1]
print("Step " + str(step) + " , Val Accuracy = " +
"{:.4f}".format(val_acc))
summary_writer.add_summary(merged_summary, step)
if step % SAVE_STEP == 0:
saver.save(sess, "./checkpoints/" + model_name + ".ckpt")
if __name__ == "__main__":
app.run(main)
|
[
"[email protected]"
] | |
7d8af894f2c76cc47cf868d00ed53d834dc11006
|
138f2550bb088a0597e1e71124d9ae32b1fe59c9
|
/xbrr/edinet/reader/element_schema.py
|
b78030ff98b2a4b89b4ca5131bc6e0a11deb5645
|
[
"MIT"
] |
permissive
|
chakki-works/xbrr
|
9009539e1821c3d9c815f694eb52158ccbbeeb78
|
a9783acbb6c23eb0be0e1fbfb47e5b0b0e2cbfb8
|
refs/heads/master
| 2022-07-22T22:30:17.054418 | 2021-06-16T13:27:40 | 2021-06-16T13:27:40 | 182,622,738 | 23 | 5 |
MIT
| 2022-07-15T18:42:36 | 2019-04-22T04:26:21 |
Python
|
UTF-8
|
Python
| false | false | 1,947 |
py
|
from xbrr.base.reader.base_element_schema import BaseElementSchema
class ElementSchema(BaseElementSchema):
def __init__(self,
name="", reference="", label="", alias="",
abstract="", data_type="",
period_type="", balance=""):
super().__init__()
self.name = name
self.reference = reference
self.label = label
self.alias = alias
self.abstract = abstract
self.data_type = data_type
self.period_type = period_type
self.balance = balance
def set_alias(self, alias):
self.alias = alias
return self
@classmethod
def create_from_reference(cls, reader, reference,
label_kind="", label_verbose=False):
name = reference.split("#")[-1]
label = ""
abstract = ""
data_type = ""
period_type = ""
balance = ""
if reader.xbrl_dir:
_def = reader.read_by_link(reference)
if label_kind is not None:
label = _def.label(label_kind, label_verbose)
xsd = _def.xsd
abstract = xsd["abstract"]
data_type = xsd["type"]
if "xbrli:periodType" in xsd.attrs:
period_type = xsd["xbrli:periodType"]
if "xbrli:balance" in xsd.attrs:
balance = xsd["xbrli:balance"]
instance = cls(name=name, reference=reference, label=label,
abstract=abstract, data_type=data_type,
period_type=period_type, balance=balance)
return instance
def to_dict(self):
return {
"name": self.name,
"reference": self.reference,
"label": self.label,
"abstract": self.abstract,
"data_type": self.data_type,
"period_type": self.period_type,
"balance": self.balance
}
|
[
"[email protected]"
] | |
bb394288997b3ae09c3bf5e93b767c0a5aa8fcdb
|
7ad616ab89e9b67bd27df2df3c8ca7487c5e4564
|
/ood/4_stack_overflow.py
|
102433a9d98dbc8f0d47c4101d2b370291a90a1b
|
[] |
no_license
|
zihuaweng/algorithm-snacks
|
cd7643c7d80d0bcb680336231214c1700fe74cc9
|
aa3d88f861bb8b0aceb7ef6c6d05523f54202d77
|
refs/heads/master
| 2023-01-13T11:03:04.395542 | 2020-11-10T04:42:41 | 2020-11-10T04:42:41 | 149,380,311 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,997 |
py
|
#!/usr/bin/env python3
# coding: utf-8
class QuestionStatus(Enum):
OPEN, CLOSED, ON_HOLD, DELETED = 1, 2, 3, 4
class QuestionClosingRemark(Enum):
DUPLICATE, OFF_TOPIC, TOO_BROAD, NOT_CONSTRUCTIVE, NOT_A_REAL_QUESTION, PRIMARILY_OPINION_BASED = 1, 2, 3, 4, 5, 6
class AccountStatus(Enum):
ACTIVE, CLOSED, CANCELED, BLACKLISTED, BLOCKED = 1, 2, 3, 4, 5
# For simplicity, we are not defining getter and setter functions. The reader can
# assume that all class attributes are private and accessed through their respective
# public getter methods and modified only through their public methods function.
class Account:
def __init__(self, id, password, name, address, email, phone, status=AccountStatus.Active):
self.__id = id
self.__password = password
self.__name = name
self.__address = address
self.__email = email
self.__phone = phone
self.__status = status
self.__reputation = 0
def reset_password(self):
None
class Member:
def __init__(self, account):
self.__account = account
self.__badges = []
def get_reputation(self):
return self.__account.get_reputation()
def get_email(self):
return self.__account.get_email()
def create_question(self, question):
None
def create_tag(self, tag):
None
class Admin(Member):
def block_member(self, member):
None
def unblock_member(self, member):
None
class Moderator(Member):
def close_question(self, question):
None
def undelete_question(self, question):
None
class Badge:
def __init__(self, name, description):
self.__name = name
self.__description = description
class Tag:
def __init__(self, name, description):
self.__name = name
self.__description = description
self.__daily_asked_frequency = 0
self.__weekly_asked_frequency = 0
# import datetime
class Notification:
def __init__(self, id, content):
self.__notification_id = id
self.__created_on = datetime.datetime.now()
self.__content = content
def send_notification(self):
None
import datetime
class Photo:
def __init__(self, id, path, member):
self.__photo_id = id
self.__photo_path = path
self.__creation_date = datetime.datetime.now()
self.__creating_member = member
def delete(self):
None
# import datetime
class Bounty:
def __init__(self, reputation, expiry):
self.__reputation = reputation
self.__expiry = expiry
def modify_reputation(self, reputation):
None
from abc import ABC, abstractmethod
class Search(ABC):
def search(self, query):
None
import datetime
class Question(Search):
def __init__(self, title, description, bounty, asking_member):
self.__title = title
self.__description = description
self.__view_count = 0
self.__vote_count = 0
self.__creation_time = datetime.datetime.now()
self.__update_time = datetime.datetime.now()
self.__status = QuestionStatus.OPEN
self.__closing_remark = QuestionClosingRemark.DUPLICATE
self.__bounty = bounty
self.__asking_member = asking_member
self.__photos = []
self.__comments = []
self.__answers = []
def close(self):
None
def undelete(self):
None
def add_comment(self, comment):
None
def add_bounty(self, bounty):
None
def search(self, query):
# return all questions containing the string query in their title or description.
None
class Comment:
def __init__(self, text, member):
self.__text = text
self.__creation_time = datetime.datetime.now()
self.__flag_count = 0
self.__vote_count = 0
self.__asking_member = member
def increment_vote_count(self):
None
class Answer:
def __init__(self, text, member):
self.__answer_text = text
self.__accepted = False
self.__vote_count = 0
self.__flag_count = 0
self.__creation_time = datetime.datetime.now()
self.__creating_member = member
self.__photos = []
def increment_vote_count(self):
None
|
[
"[email protected]"
] | |
b208e49da531e72d4264b91f912ebd1523d749d6
|
731c3f2f85f6002725322eedc0b2c8b5e74f610e
|
/0-jakc/jakc_hr/__openerp__.py
|
2df1fff1bd5ba0cb87ac4230b96c9fe3ed3e6001
|
[] |
no_license
|
babarlhr/project-0021
|
1ac824657f893c8f25d6eb3b839051f350d7cc9d
|
e30b8a9f5d2147d3ca5b56b69ec5dbd22f712a91
|
refs/heads/master
| 2021-09-22T15:45:47.431000 | 2018-09-11T14:59:49 | 2018-09-11T14:59:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
# -*- coding: utf-8 -*-
{
'name': 'Jakc Labs - HR Enhancement',
'version': '9.0.0.1.0',
'category': 'General',
'license': 'AGPL-3',
'summary': 'HR Enchancement',
'author': "Jakc Labs",
'website': 'http://www.jakc-labs.com/',
'depends': [
'hr'
],
'data': [
'views/jakc_hr_view.xml',
],
'installable': True,
'application': True,
}
|
[
"[email protected]"
] | |
91b6b76a311bc1f86fdb741e4608f8220dbde146
|
30d61ce0b728f31a830db6b6b1954a32551990b2
|
/src/gui_config/custom/sr_source_mode_tab.py
|
b2a24e0a0387ec82695aca2a32af5633db14603c
|
[
"MIT"
] |
permissive
|
hgiesel/anki_set_randomizer
|
6755dc8489b703887c55a5427bbbdab858f58a65
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
refs/heads/master
| 2022-08-24T05:45:13.339132 | 2020-01-15T17:04:26 | 2020-01-30T13:56:50 | 197,258,760 | 5 | 0 |
MIT
| 2022-07-20T17:28:42 | 2019-07-16T19:56:27 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,391 |
py
|
from aqt.qt import QWidget
from ...lib.config import deserialize_source_mode, deserialize_cloze_options, deserialize_occlusion_options
from ..sr_source_mode_tab_ui import Ui_SRSourceModeTab
class SRSourceModeTab(QWidget):
def __init__(self):
super().__init__()
self.ui = Ui_SRSourceModeTab()
self.ui.setupUi(self)
def setupUi(self, source_mode):
cloze_options = source_mode.cloze_options
self.ui.clozeShortcutsEnabledCheckBox.setChecked(cloze_options.shortcuts_enabled)
self.ui.clozeVsPrefixLineEdit.setText(cloze_options.vs_prefix)
self.ui.clozeOpenDelimLineEdit.setText(cloze_options.open_delim)
self.ui.clozeCloseDelimLineEdit.setText(cloze_options.close_delim)
def exportClozeOptions(self):
return deserialize_cloze_options({
'shortcutsEnabled': self.ui.clozeShortcutsEnabledCheckBox.isChecked(),
'vsPrefix': self.ui.clozeVsPrefixLineEdit.text(),
'openDelim': self.ui.clozeOpenDelimLineEdit.text(),
'closeDelim': self.ui.clozeCloseDelimLineEdit.text(),
})
def exportOcclusionOptions(self):
return deserialize_occlusion_options({})
def exportData(self):
return deserialize_source_mode({
'clozeOptions': self.exportClozeOptions(),
'occlusionOptions': self.exportOcclusionOptions(),
})
|
[
"[email protected]"
] | |
223bd273f49b7e533b590ec4dc1f9394ef62d3c7
|
bfbe642d689b5595fc7a8e8ae97462c863ba267a
|
/bin/Python27/Lib/site-packages/OMPython/OMTypedParser.py
|
a0e4c90b6d536f97341c456f18de90f519d82e80
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mcanthony/meta-core
|
0c0a8cde1669f749a4880aca6f816d28742a9c68
|
3844cce391c1e6be053572810bad2b8405a9839b
|
refs/heads/master
| 2020-12-26T03:11:11.338182 | 2015-11-04T22:58:13 | 2015-11-04T22:58:13 | 45,806,011 | 1 | 0 | null | 2015-11-09T00:34:22 | 2015-11-09T00:34:22 | null |
UTF-8
|
Python
| false | false | 4,041 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Martin Sjölund"
__license__ = """
This file is part of OpenModelica.
Copyright (c) 1998-CurrentYear, Open Source Modelica Consortium (OSMC),
c/o Linköpings universitet, Department of Computer and Information Science,
SE-58183 Linköping, Sweden.
All rights reserved.
THIS PROGRAM IS PROVIDED UNDER THE TERMS OF THE BSD NEW LICENSE OR THE
GPL VERSION 3 LICENSE OR THE OSMC PUBLIC LICENSE (OSMC-PL) VERSION 1.2.
ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS PROGRAM CONSTITUTES
RECIPIENT'S ACCEPTANCE OF THE OSMC PUBLIC LICENSE OR THE GPL VERSION 3,
ACCORDING TO RECIPIENTS CHOICE.
The OpenModelica software and the OSMC (Open Source Modelica Consortium)
Public License (OSMC-PL) are obtained from OSMC, either from the above
address, from the URLs: http://www.openmodelica.org or
http://www.ida.liu.se/projects/OpenModelica, and in the OpenModelica
distribution. GNU version 3 is obtained from:
http://www.gnu.org/copyleft/gpl.html. The New BSD License is obtained from:
http://www.opensource.org/licenses/BSD-3-Clause.
This program is distributed WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, EXCEPT AS
EXPRESSLY SET FORTH IN THE BY RECIPIENT SELECTED SUBSIDIARY LICENSE
CONDITIONS OF OSMC-PL.
Author : Anand Kalaiarasi Ganeson, [email protected], 2012-03-19
Version: 1.0
"""
__status__ = "Prototype"
__maintainer__ = "https://openmodelica.org"
from pyparsing import *
import sys
def convertNumbers(s,l,toks):
n = toks[0]
try:
return int(n)
except ValueError, ve:
return float(n)
def convertString(s,s2):
return s2[0].replace("\\\"",'"')
def convertDict(d):
return dict(d[0])
def convertTuple(t):
return tuple(t[0])
omcRecord = Forward()
omcValue = Forward()
TRUE = Keyword("true").setParseAction( replaceWith(True) )
FALSE = Keyword("false").setParseAction( replaceWith(False) )
NONE = (Keyword("NONE") + Suppress("(") + Suppress(")") ).setParseAction( replaceWith(None) )
SOME = (Suppress( Keyword("SOME") ) + Suppress("(") + omcValue + Suppress(")") )
omcString = QuotedString(quoteChar='"',escChar='\\', multiline = True).setParseAction( convertString )
omcNumber = Combine( Optional('-') + ( '0' | Word('123456789',nums) ) +
Optional( '.' + Word(nums) ) +
Optional( Word('eE',exact=1) + Word(nums+'+-',nums) ) )
ident = Word(alphas+"_",alphanums+"_") | Combine( "'" + Word(alphanums+"!#$%&()*+,-./:;<>=?@[]^{}|~ ") + "'" )
fqident = Forward()
fqident << ( (ident + "." + fqident) | ident )
omcValues = delimitedList( omcValue )
omcTuple = Group( Suppress('(') + Optional(omcValues) + Suppress(')') ).setParseAction(convertTuple)
omcArray = Group( Suppress('{') + Optional(omcValues) + Suppress('}') ).setParseAction(convertTuple)
omcValue << ( omcString | omcNumber | omcRecord | omcArray | omcTuple | SOME | TRUE | FALSE | NONE | Combine(fqident) )
recordMember = delimitedList( Group( ident + Suppress('=') + omcValue ) )
omcRecord << Group( Suppress('record') + Suppress( ident ) + Dict( recordMember ) + Suppress('end') + Suppress( ident ) + Suppress(';') ).setParseAction(convertDict)
omcGrammar = omcValue + StringEnd()
omcNumber.setParseAction( convertNumbers )
def parseString(string):
return omcGrammar.parseString(string)[0]
if __name__ == "__main__":
testdata = """
(1.0,{{1,true,3},{"4\\"
",5.9,6,NONE ( )},record ABC
startTime = ErrorLevel.warning,
'stop*Time' = SOME(1.0)
end ABC;})
"""
expected = (1.0, ((1, True, 3), ('4"\n', 5.9, 6, None), {"'stop*Time'": 1.0, 'startTime': 'ErrorLevel.warning'}))
results = parseString(testdata)
if results <> expected:
print "Results:",results
print "Expected:",expected
print "Failed"
sys.exit(1)
print "Matches expected output",
print type(results),repr(results)
|
[
"[email protected]"
] | |
6ddbc8154053d1a105be3ce47e7b58a27e253eb8
|
de479d4a8af0e070b2bcae4186b15a8eb74971fb
|
/cn/iceknc/study/k_python_mini_web/__init__.py
|
2c29f9732decc87fd29a825cf08dd49ab11e8eb8
|
[] |
no_license
|
iceknc/python_study_note
|
1d8f6e38be57e4dc41a661c0a84d6ee223c5a878
|
730a35890b77ecca3d267fc875a68e96febdaa85
|
refs/heads/master
| 2020-05-19T18:44:55.957392 | 2019-09-27T01:15:54 | 2019-09-27T01:15:54 | 185,160,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
# -*- coding: utf-8 -*-
# @Author: 徐志鹏
# @Date : 2019/5/29
# @Desc :
def main():
pass
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
f66be245d49c0500c212fbf3f7565976f9419b1f
|
80755ce68bf894bfa7c7cec50051b18a6069c552
|
/nkamg_malware/collector/samples/file_monitor.py
|
0d59cd857a599c427bd46b6fa686fa151a915729
|
[
"Apache-2.0"
] |
permissive
|
NKQiuKF/malware_update
|
6538c9308dd7b476b687fca4ea120209207257bc
|
a875b5011fee2486da5618e01da61d730d6ac0dd
|
refs/heads/master
| 2022-10-17T09:08:34.605641 | 2019-09-02T09:00:45 | 2019-09-02T09:00:45 | 205,817,190 | 0 | 0 | null | 2022-10-06T18:33:50 | 2019-09-02T08:59:47 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,497 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Nankai University Information Security
#QiuKF [email protected]
#get file results at fixed time
#create processed.csv at sub dirctories
#create Total_File_Data.csv at /collection
from multiprocessing import Process,Pool
import os
import pandas as pd
import time
import sys
sys.path.append('../core/')
from setting import SAMPLES_PATH
#samples_path='/data/malware/'
def merge_file():
data = {"sha256":[],"type":[]}
total_df=pd.DataFrame(data)
chr=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
for first in chr:
sub_dir_list=make_file_dir(first)
for each in sub_dir_list:
sub_pd=pd.read_csv(SAMPLES_PATH+each+'processed.csv')
total=[total_df,sub_pd]
total_df=pd.concat(total)
print 'concat '+each+'processed.csv'
total_df.to_csv('Total_File_Data.csv',index=False)
def exe_file(first_dir):
count=0
#print 'test'
print 'Run task %s (%s)...' % (first_dir, os.getpid())
child_dir=make_file_dir(first_dir)
#print child_dir
for each_dir in child_dir:
data = {"sha256":[],"type":[]}
processed_df=pd.DataFrame(data)
all_files=os.listdir(SAMPLES_PATH+each_dir)
for each_file in all_files:
file_command = os.popen('file ' +SAMPLES_PATH+each_dir+each_file)
#print 'file ' +SAMPLES_PATH+each_dir+each_file
read_data= file_command.read()
tmp=read_data[read_data.index(':') + 2 : read_data.index('\n')]
#print tmp
processed_df.loc[len(processed_df)]=[each_file,tmp]
processed_df.to_csv(SAMPLES_PATH+each_dir+'processed.csv',index=False)
print 'created '+SAMPLES_PATH+each_dir+'processed.csv'
def make_file_dir(first):
ret=[]
chr_list=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
tmp=''
for second in chr_list:
two='/'+second
for third in chr_list:
three=two+'/'+third+'/'
ret.append(first+three)
#print len(ret)
#print ret
return ret
def main():
#print SAMPLES_PATH
print('Parent process %s.' %os.getpid())
#dic_list=make_file_dir()
first_dic=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
p=Pool(16)
for each in first_dic:
#print each
p.apply_async(exe_file,args=(each,))
p.close()
p.join()
print 'start merging file results...'
merge_file()
#is_apk('1.apk')
#is_apk(base_path+'three')
if __name__=='__main__':
while True:
main()
time.sleep(36000)
|
[
"[email protected]"
] | |
8d77c1ca5725c5efe3918715e630d4c0b280af6f
|
cf803d382d6e0bc7492d787e91a695a2fda944b8
|
/model.py
|
a1971dd66b502e9a7ab9cad39d075165745a907a
|
[
"BSD-2-Clause"
] |
permissive
|
parenthetical-e/fmri
|
d676d524cf1606f098864c5bf9e98607674db1ab
|
32c5571d8767684ec6586320e85485cd89ed9327
|
refs/heads/master
| 2021-01-02T22:17:28.243141 | 2020-04-07T06:07:26 | 2020-04-07T06:07:26 | 3,848,746 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,361 |
py
|
"""
A set of functions for creating or maipulating files needed for design
matrices, both in spm and python.
"""
def spm_onsets(trialfile='', durations=3, recode=None):
"""
Map <trialfile> (a 1d csv) into onset/TR time which is determind by
<durations> (which can be an int if every trial had the same length in the
model or a list if not).
If <recode> is a dict of the form {1:1,2:1,3:1} where the key is the
current code in trialfile and value is what you would
like that one to be recoded as. In this example, 1,2,3 all become 1.
Any value without a key, but with an entry in trialfile is silently left
as is.
"""
import csv
fs = open(trialfile, 'r')
trials = csv.reader(fs).next()
fs.close()
if isinstance(durations, int):
tmp = [durations, ] * len(trials)
elif isinstance(duration,(list,tuple)):
pass
else:
raise TypeError('<durations> must be an int, list or tuple.')
if recode != None:
print('Recoding....')
[rtrials.extend(recode.get(t)) for t in trials]
# mMap the trialfile data into TR/onset time.
onsets = []
for t,d in zip(trials,durations):
onsets.extend([t,] + [0,]*(d-1))
## if t = 2 and d = 3 then [t,] + [0,]*(d-1)
## should give the list: [2 0 0]
return onsets,durations
|
[
"[email protected]"
] | |
4906a33c2bde49d3d89e48c0aa86f333a1aef478
|
1602b8f6f40e27269a6d9fe42dbc720a5127b175
|
/fleet/category.py
|
9b756897fb0f2b29b020ab0444a68c1526aa3707
|
[] |
no_license
|
mit-jp/fleet-model
|
a9f581c2cb56196a13e2db8ef883c1f8b61b2682
|
2c1b293299741a076384114572dc74a988bb8581
|
refs/heads/master
| 2020-04-11T01:30:26.634473 | 2017-01-29T04:08:31 | 2017-01-29T04:08:31 | 32,412,401 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,201 |
py
|
class Category:
"""Rudimentary ordered tree data structure for vehicle classes."""
_parent = None
_children = []
_label = ''
def __init__(self, label, children=dict(), parent=None):
self._parent = parent
self._label = label
try:
self._children = list([Category(k, v, self) for k, v in
children.items()])
except AttributeError:
pass
def __str__(self):
return self._label
def children(self):
return list(map(str, self._children))
def parent(self):
return str(self._parent)
def nodes(self):
return sum([child.nodes() for child in self._children], [self._label])
def leaves(self, root):
if len(self._children) == 0:
return self._label
else:
return sum([child.leaves() for child in self._children], [])
def find(self, label):
"""Return the subtree with *label* at its head."""
if label == self._label:
return self
for child in self._children:
result = child.find(label)
if result:
return result
return None
|
[
"[email protected]"
] | |
87e8b16a2d83845e4d137ca080069e56f6a1690d
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/awentzonline_keras-rtst/keras-rtst-master/keras_rtst/models/style_xfer.py
|
d88c20d29b26ef489cc52a716031330e201234f5
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 |
Python
|
UTF-8
|
Python
| false | false | 5,226 |
py
|
'''Texture Network for style transfer.'''
import time
import keras_vgg_buddy
import numpy as np
from keras import activations
from keras import backend as K
from keras.layers import advanced_activations
from keras.layers.core import Layer
from keras.layers.convolutional import AveragePooling2D
from keras.models import Graph
from keras.optimizers import Adam
from .base import create_res_texture_net, create_sequential_texture_net, dumb_objective
from .regularizers import (
AnalogyRegularizer, FeatureContentRegularizer, FeatureStyleRegularizer,
MRFRegularizer, TVRegularizer)
def make_model(args, style_img=None):
model = Graph()
model.add_input('content', batch_input_shape=(args.batch_size, 3, args.max_height, args.max_width))
try: # if it's a standard activation then just keep the string
activations.get(args.activation)
activation = args.activation
except: # otherwise we need to look up the class in advanced activations (e.g. LeakyReLU)
activation = getattr(advanced_activations, args.activation, 'activation function')
if args.sequential_model:
texnet = create_sequential_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_inner_blocks=args.num_blocks)
else:
texnet = create_res_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_res_blocks=args.num_blocks)
# add the texture net to the model
model.add_node(texnet, 'texnet', 'content')
model.add_output('texture_rgb', 'texnet')
# hook up the training network stuff
if args.train:
model.add_node(Layer(), 'vgg_concat', inputs=['texnet', 'content'], concat_axis=0)
# add VGG and the constraints
keras_vgg_buddy.add_vgg_to_graph(model, 'vgg_concat', pool_mode=args.pool_mode,
trainable=False, weights_path=args.vgg_weights)
# add the regularizers for the various feature layers
vgg = keras_vgg_buddy.VGG16(args.max_height, args.max_width, pool_mode=args.pool_mode, weights_path=args.vgg_weights)
print('computing static features')
feature_layers = set()
if args.style_weight:
feature_layers.update(args.style_layers)
if args.content_weight:
feature_layers.update(args.content_layers)
if args.mrf_weight:
feature_layers.update(args.mrf_layers)
if args.analogy_weight:
feature_layers.update(args.analogy_layers)
style_features = vgg.get_features(np.expand_dims(style_img, 0), feature_layers)
regularizers = []
if args.style_weight != 0.0:
for layer_name in args.style_layers:
layer = model.nodes[layer_name]
style_regularizer = FeatureStyleRegularizer(
target=style_features[layer_name],
weight=args.style_weight / len(args.style_layers))
style_regularizer.set_layer(layer)
regularizers.append(style_regularizer)
if args.content_weight != 0.0:
for layer_name in args.content_layers:
layer = model.nodes[layer_name]
content_regularizer = FeatureContentRegularizer(
weight=args.content_weight / len(args.content_layers))
content_regularizer.set_layer(layer)
regularizers.append(content_regularizer)
if args.mrf_weight != 0.0:
for layer_name in args.mrf_layers:
layer = model.nodes[layer_name]
mrf_regularizer = MRFRegularizer(
K.variable(style_features[layer_name]),
weight=args.mrf_weight / len(args.mrf_layers))
mrf_regularizer.set_layer(layer)
regularizers.append(mrf_regularizer)
if args.analogy_weight != 0.0:
style_map_img = keras_vgg_buddy.load_and_preprocess_image(args.style_map_image_path, width=args.max_width, square=True)
style_map_features = vgg.get_features(np.expand_dims(style_map_img, 0), args.analogy_layers)
for layer_name in args.analogy_layers:
layer = model.nodes[layer_name]
analogy_regularizer = AnalogyRegularizer(
style_map_features[layer_name],
style_features[layer_name],
weight=args.analogy_weight / len(args.analogy_layers))
analogy_regularizer.set_layer(layer)
regularizers.append(analogy_regularizer)
if args.tv_weight != 0.0:
tv_regularizer = TVRegularizer(weight=args.tv_weight)
tv_regularizer.set_layer(model.nodes['texnet'])
regularizers.append(tv_regularizer)
setattr(model.nodes['vgg_concat'], 'regularizers', regularizers) # Gotta put em somewhere?
print('compiling')
start_compile = time.time()
adam = Adam(lr=args.learn_rate, beta_1=0.7)
model.compile(optimizer=adam, loss=dict(texture_rgb=dumb_objective))
print('Compiled model in {:.2f}'.format(time.time() - start_compile))
return model
|
[
"[email protected]"
] | |
366ad807aedcc7af54f5060dcaa12dc46f0f7613
|
4beb10c8a8023f4945c996a1487ec1b3968cb5da
|
/f5_lbaas_dashboard/enabled/_1480_project_loadbalancersv2_panel.py
|
d89ef94a631dd6c077b8bc716031d61c12c3ef69
|
[
"Apache-2.0"
] |
permissive
|
F5Networks/f5-lbaas-dashboard
|
7aebb669a27d8ebdc9feaa7f088f9158fb157046
|
62cb1dfbb87c94bdcb3f53f6ec2ab0004ac43d54
|
refs/heads/master
| 2023-03-28T01:59:58.666570 | 2022-09-27T01:16:34 | 2022-09-27T01:16:34 | 147,327,541 | 0 | 0 |
Apache-2.0
| 2022-09-27T01:16:37 | 2018-09-04T10:15:51 |
JavaScript
|
UTF-8
|
Python
| false | false | 993 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'loadbalancersv2'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'network'
# Python panel class of the PANEL to be added.
ADD_PANEL = (
'f5_lbaas_dashboard.dashboards.project.loadbalancersv2.panel'
'.LoadBalancersUI')
|
[
"[email protected]"
] | |
5bb9776224c4813a523963dc2805bc70a092fa60
|
40d5394eea0c1288fcdd57180a0141672cb198fa
|
/users/views.py
|
d8f37654f883f0bde0e2232915ec025e0a7e6ec4
|
[
"MIT"
] |
permissive
|
mornicamwende/ranker
|
6c12b0297703ac559de84bb0b36396ec2738f970
|
107bcaad61bb5e726570a8250b55eb2e6245dc7a
|
refs/heads/master
| 2023-01-07T00:18:54.192155 | 2020-10-27T17:07:21 | 2020-10-27T17:07:21 | 306,592,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,522 |
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Profile
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# Create your views here..
def register(request):
if request.method =='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
# profile = UserProfile.objects.create(user=request.user)
username = form.cleaned_data.get('username')
messages.success(request, f' Your account has been created! You are now able to log in!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form':form})
@login_required
def profile(request):
if request.method =='POST':
u_form=UserUpdateForm(request.POST, instance=request.user)
p_form=ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f' Your account has been updated!')
return redirect('profile')
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context={
'u_form':u_form,
'p_form':p_form
}
return render(request, 'users/profile.html', context)
|
[
"[email protected]"
] | |
449e5f4d3e112507dc7540c319584658b70805eb
|
560df0c3f859ae2d4c279f4669f9ab8758c486fb
|
/old/Euler063.py
|
defeccb9479085d6e4df2722c16167b7442a9de6
|
[] |
no_license
|
gronkyzog/Puzzles
|
0e7cdd7fa5ab8139d63a721cac5ee30e80728c7a
|
cdc145857f123a98f1323c95b5744d36ce50355f
|
refs/heads/master
| 2021-03-13T00:01:17.715403 | 2015-02-22T11:59:03 | 2015-02-22T11:59:03 | 17,100,928 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 176 |
py
|
import math
counter = 0
for p in range(1,1000):
for n in range(1,1000):
x = p**n
z = len(str(x))
if z == n:
counter +=1
print counter,p,n,x
if z > n:
break
|
[
"[email protected]"
] | |
b44c18c0337ef4ede7f2ee27dff0c56a32873a98
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5639104758808576_0/Python/hongkai/standing_ovation.py
|
85c7aaf75714250e3ffa80b7c69e7aa3067301b0
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
fin = open("../../Downloads/A-small-attempt0 (1).in", "r")
out = open("standing_ovation.out", "w")
cases = int(fin.readline())
for i in range(cases):
d, shy = fin.readline().split()
min = 0
curr = 0
for x in shy:
curr += int(x)
curr -= 1
if curr < min:
min = curr
out.write("Case #%d: %d\n" % (i + 1, -min))
out.close()
|
[
"[email protected]"
] | |
97f78d057353db5df358e1e31bac1b98392279f5
|
646b0a41238b96748c7d879dd1bf81858651eb66
|
/archive/memd.archive/gulp/Potential.py
|
ac129df6be3cb0a4594a14181a7914d174181b84
|
[] |
no_license
|
danse-inelastic/molDynamics
|
ded0298f8219064e086472299e1383d3dff2dac3
|
c8e0bfd9cb65bcfc238e7993b6e7550289d2b219
|
refs/heads/master
| 2021-01-01T19:42:29.904390 | 2015-05-03T17:27:38 | 2015-05-03T17:27:38 | 34,993,746 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,489 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Brandon Keith
# California Institute of Technology
# (C) 2005 All Rights Reserved All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
#from molDynamics.gulp.forcefields.ForcefieldLoader import ForcefieldLoader
from memd.gulp.forcefields.InputFile import InputFile
class Potential(Component):
'''This class serves as an API/interface for gulp potential construction.'''
class Inventory(Component.Inventory):
import pyre.inventory as inv
dispersionInRecipSpace = inv.bool('dispersionInRecipSpace', default = False)
dispersionInRecipSpace.meta['tip'] = '''whether to calculate dispersion forces
partly in reciprocal space'''
useInitialBondingOnly = inv.bool('useInitialBondingOnly', default = False)
useInitialBondingOnly.meta['tip'] = '''instead of reassigning
bonding based on every optimization or time step, use intial geometry only to assign bonding'''
forcefield = inv.facility('forcefield', default=InputFile('gulpLibrary'))
forcefield.meta['tip'] = 'a class containing forcefield types'
#forcefield.meta['known_plugins'] = ['gulpLibrary','manualEntry']
moleculeIdentification = inv.str('moleculeIdentification', default = 'None')
moleculeIdentification.meta['tip'] = '''identify molecules based on covalent radii
and deal with intramolecular coulomb interactions'''
moleculeIdentification.validator=inv.choice(['None','identify molecules; remove intramolecular Coulomb forces',
'identify molecules; retain intramolecular Coulomb forces'])
def __init__(self, name='potential', facility='Potential'):
Component.__init__(self, name, facility)
self.i=self.inventory
# def _configure(self):
# Component._configure(self)
# #self.sample = self.i.sample
def identifyOptions( self, visitor):
return visitor.writePotentialOptions(self)
def identifyKeywords( self, visitor):
return visitor.writePotentialKeywords(self)
# version
__id__ = "$Id$"
# Generated automatically by PythonMill on Mon Apr 16 12:44:30 2007
# End of file
|
[
"[email protected]"
] | |
9450412ca95624708fe0ba54ba1780d0d0691d95
|
4c639c521834f4349ba2165e72c5857ddecee625
|
/acoustic_X_text_X_visual/AttComb_aXtXv/gender/attention_fusion_network/archived_models/archived_model_1_(MSE_best)/metrics.py
|
9b7d3a09c1cd9ee948834703374dc115f06d923e
|
[] |
no_license
|
arbaazQureshi/attention_based_multimodal_fusion_for_estimating_depression
|
f4ea86746d9961fe4b9cf4f88f6cec604a201656
|
e4c57ac51c271c36c244c260b01a22fa1897ffcb
|
refs/heads/master
| 2020-05-19T22:48:03.665953 | 2019-05-06T19:34:31 | 2019-05-06T19:34:31 | 185,252,875 | 7 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
import numpy as np
import sklearn.metrics
from load_data import load_development_data
from load_model import load_model
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
if __name__ == "__main__":
model = load_model()
model.load_weights('optimal_weights.h5')
dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_transcript, dev_Y, dev_X_gender = load_development_data()
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mean_absolute_error'])
dev_Y_hat = model.predict([dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_X_gender, dev_transcript])
dev_Y = np.array(dev_Y)
dev_Y_hat = dev_Y_hat.reshape((dev_Y.shape[0],))
RMSE = np.sqrt(sklearn.metrics.mean_squared_error(dev_Y, dev_Y_hat))
MAE = sklearn.metrics.mean_absolute_error(dev_Y, dev_Y_hat)
EVS = sklearn.metrics.explained_variance_score(dev_Y, dev_Y_hat)
print('RMSE :', RMSE)
print('MAE :', MAE)
#print(np.std(dev_Y - dev_Y_hat))
print('EVS :', EVS)
with open('regression_metrics.txt', 'w') as f:
f.write('RMSE\t:\t' + str(RMSE) + '\nMAE\t\t:\t' + str(MAE) + '\nEVS\t\t:\t' + str(EVS))
|
[
"[email protected]"
] | |
a2453d90db22aca756d619b74b525d6186f4875d
|
699c7f26a91106a2fc79bb15299ce0cee532a2dd
|
/xrayspecprocessing/multi.combine.group.py
|
ff57d3b4c05ec312c219fc8fc8133076e2dafd82
|
[] |
no_license
|
samconnolly/astro
|
70581a4d3f2086716aace3b5db65b74aaaa5df95
|
3731be313592c13dbb8af898e9734b98d83c0cc2
|
refs/heads/master
| 2020-04-06T03:40:27.454279 | 2014-03-12T14:36:34 | 2014-03-12T14:36:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,509 |
py
|
# multi.combine.group.py
# Uses the HEADAS tool addspec and grppha to sum sets of spectra, assign their
# summed background and response files and produce a grouped spectrum
# Uses a text file of input spectra. Does so from output file from listbinmin.py
# Sam Connolly 4/3/13
import os
# ====================== PARAMATERS ============================================
# file route - directory containing spectra and spectrum list
inroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/all/"
outroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/summed/"
# file names
fname = "speclist.txt"
outname = "13.14.25.summed"
# Grouping command (e.g. "group min 15" for min of 15 counts per bin,
# "group 25 150 4" to group channels 25-150 into groups of 4
# [Swift XRT has 1024 channels] )
groupcommand = 'group min 15'
# overwrite existing files?
overwrite = False
# ==============================================================================
# get current directory, to return to
originaldir = os.getcwd()
# change to directory of spectra
os.chdir(inroute)
#===============================================================================
# sum spectra
#===============================================================================
# creat sum command
sumcommand = "addspec " + fname + " " + outname + " qaddrmf = yes"\
+ " qsubback = yes" + " clobber = " + str(overwrite)
# add spectra
os.system(sumcommand)
#===============================================================================
# group spectra
#===============================================================================
# file names
spectrum = outname + ".pha"
back = outname + ".bak"
rmf = outname + ".rsp"
output = outname + "_grp.pha"
# overwriting or not
if overwrite == True:
over = '!'
else:
over = ''
# generate grppha command
gcommand = 'grppha ' + spectrum + ' ' + over + output + ' comm = "' + \
'chkey BACKFILE ' + back + \
' & chkey RESPFILE ' + rmf + \
' & ' + groupcommand + ' & exit"'
# execute command
os.system(gcommand)
# move files to output folder
movecommand = "mv " + spectrum + " " + outroute \
+ " & mv " + back + " " + outroute \
+ " & mv " + rmf + " " + outroute\
+ " & mv " + output + " " + outroute
os.system(movecommand)
#-------------------------------------------------------------------------------
# switch back to original directory
os.chdir(originaldir)
|
[
"[email protected]"
] | |
d3249edfbd3bfe038c605e6a6c80a59a783bba05
|
4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446
|
/Python基础笔记/19/代码/2.协程.py
|
b87a5c7b38c3ac5ebbe4f72a39d93ec54e0ed60b
|
[] |
no_license
|
zhenguo96/test1
|
fe21510aea7feb674e52fd7a86d4177666f841c5
|
0d8de7e73e7e635d26462a0bc53c773d999498be
|
refs/heads/master
| 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
# 协程
def sub():
print("开始")
x = yield
print("x = ",x)
y = yield x + 1
print("x = ", x, "y = ", y)
yield
x1 = sub()
next(x1)
print(x1.send(3))
x1.send(4)
|
[
"[email protected]"
] | |
3260dc302f4391afe755256b44ea9ca140f33a0e
|
8ad8ee4e3a4e0e8ae0ed8e92c68cf122f5ba3723
|
/jk_en/jk_en/sendEmail.py
|
e87f6f678832966b752cbb243ab64a762fe3c534
|
[] |
no_license
|
yangyangyanga/automatic_update
|
5b5065713853c4a1225142ece4ea39be1a05d011
|
53c1777cbb84e489b887f38e2745477d6b6f4604
|
refs/heads/master
| 2020-05-25T21:18:24.979779 | 2019-05-22T08:34:02 | 2019-05-22T08:34:02 | 187,996,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,194 |
py
|
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import pymysql
def sendEmail(subject='无标题',messageText='无内容'):
# accepter = '[email protected]'
accepter = "[email protected]"
sender = "[email protected]"
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(messageText, 'plain', 'utf-8')
message['From'] = sender
message['To'] = accepter
#邮件标题subject
subject = subject
message['Subject'] = Header(subject, 'utf-8')
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.163.com', '25')
smtp.login('[email protected]', 'cyh1995')
smtp.sendmail(sender,accepter, message.as_string())
print("发送成功")
smtp.quit()
except smtplib.SMTPException as e:
print(e, "发送失败")
def SendEmailMain():
conn = pymysql.connect(host='172.16.10.71', port=3306, user='python_team', passwd='shiqiyu', db='hooli_school',charset="utf8")
cursor = conn.cursor()
#获取变化的学校数据
conn.ping(reconnect=True)
sql = "select old_id,url_old,university,change_context from Label_content where old_id like 'e%' and change_context like '%1%' order by university"
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
sql2 = "select count(*),university from Label_content where change_context like '%1%' and old_id like 'e%' GROUP BY university"
cursor.execute(sql2)
conn.commit()
result2=cursor.fetchall()
# print(result)
# print(result2)
conn.close()
sendemailschool=''.join(list(map(lambda x:x[1]+'有'+str(x[0])+'条专业发送变化'+'\n',result2)))
sendemaillists=''.join(list(map(lambda x:'id为: '+x[0]+' 的专业'+x[3].replace('01','内容发生变化').replace('11','内容和标签发生变化').replace('10','标签发生变化')+' 学校: '+x[2]+' 链接为:'+x[1]+'\n',result)))
messagetext=sendemailschool+'\n'+sendemaillists
if messagetext!='\n':
sendEmail(subject='英国变化邮件',messageText=messagetext)
# SendEmailMain()
|
[
"[email protected]"
] | |
9a93a2420acc3eb462984f16192bf378b923dbf2
|
0f880fab72fe18a2e5c4718ba4bf78fbe800f988
|
/code/CityList.py
|
6dd00a6060c4d7c719719ac0c9f538ffcdc1ab89
|
[] |
no_license
|
clwater/GetCityGeo
|
ce208abb69130b091acaf9ac77b194035d7d96d4
|
c82b922c25c07ace0245eaa20055bfe8290d7072
|
refs/heads/master
| 2021-01-19T17:33:09.171032 | 2017-08-24T16:23:00 | 2017-08-24T16:23:00 | 101,068,888 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,244 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import requests
import json , re
import mysql.connector
conn = mysql.connector.connect(user='root', password='root', database='Utils')
def getCityGeo(cityname):
url = 'http://ditu.amap.com/service/poiInfo?query_type=TQUERY&keywords=%s' % (cityname)
html = requests.get(url).text
print html
if len(html) < len('{"status":"2","data":"Return Failure!"12312323}') :
return -1
data = json.loads(html)
cityList = []
try:
searchList = data['data']['locres']['poi_list']
# searchList = data['data']['poi_list']
# city = searchList[0]
# _city = {'level': '', 'child_station_count': city['child_station_count'],
# 'adcode': city['adcode'], 'coords': '', 'address': city['address'],
# 'ename': '', 'name': city['name'], 'longitude': city['longitude'],
# 'latitude': city['latitude']}
# return _city
for city in searchList:
_city = { 'level' : city['level'] , 'child_station_count' : city['child_station_count'],
'adcode': city['adcode'] , 'coords' : city['coords'] , 'address' : city['address'],
'ename' : city['ename'], 'name' : city['name'] , 'longitude' : city['longitude'],
'latitude': city['latitude']}
return _city
except Exception:
return cityList
def saveInfo(cityInfo , city):
if cityInfo < 3:
print city + 'not include'
return
print city
try:
print cityInfo['ename']
cursor = conn.cursor()
tem = cityInfo['ename']
tem = str(tem).replace('\'' , '`')
_sql = 'insert into CityGeo(ename , name , level , adcode ,child_station_count,coords , address , longitude ,latitude ) values (\'%s\',\'%s\',\'%s\',\'%s\',%s, \'%s\' ,\'%s\' ,\'%s\', \'%s\')' % (
tem, city, cityInfo['level'], cityInfo['adcode'], cityInfo['child_station_count'],
# cityInfo['coords'] ,
"",
cityInfo['address'] ,cityInfo['longitude'] ,cityInfo['latitude'])
print(_sql)
cursor.execute(_sql)
conn.commit()
except Exception:
with open('errorcity' ,'a') as f:
# print city
f.write(city + '\n')
print (city + 'error')
def getCityListDB():
cursor = conn.cursor()
_sql = 'SELECT `ChinaCity`.`cityName`,`ChinaCity`.`regionName` FROM `ChinaCity` WHERE `ChinaCity`.`cityName` != \'\' and id > 248'
cursor.execute(_sql)
cityList = cursor.fetchall()
for city in cityList:
if len(city) > 1:
if '盟' in city[0]:
temp = city[0] + city[1]
else:
temp = city[0] + u'市' + city[1]
else:
temp = city[0] + u'市'
print temp
saveInfo( getCityGeo(temp) , temp)
def getCityListText():
with open('citylist' , 'r') as f:
cityList = f.readlines()
for city in cityList:
city = city.strip()
# city = city + '县'
saveInfo(getCityGeo(city), city)
getCityListText()
# getCityListDB()
# getCityGeo('北京')
|
[
"[email protected]"
] | |
b4b07278d2bdd76b4fcc168e8ca4a5e2f2b61686
|
4a027b32b1e2dfebd6d65c9e7afce1f2e93f16bc
|
/webblog/blog/admin.py
|
c103bbbe9137099f88f5d13d6b08262854240b18
|
[] |
no_license
|
Jethet/DjangoProject-WebBlog
|
92aa2959349129b2ef192163ab5637dbd4099224
|
f64a79d889abe6a2d3caa9aa5350178d97b5c5eb
|
refs/heads/master
| 2020-05-29T23:32:53.154542 | 2020-02-11T21:59:24 | 2020-02-11T21:59:24 | 189,438,086 | 1 | 0 | null | 2020-02-11T21:59:25 | 2019-05-30T15:28:38 |
Python
|
UTF-8
|
Python
| false | false | 197 |
py
|
from django.contrib import admin
# First import the model that you want to show up on the admin page:
from .models import Post
# Register your models on the admin page:
admin.site.register(Post)
|
[
"[email protected]"
] | |
ce555ee518fcfbdb43e59334bdddd885f194b341
|
1a24def8879972f21d846ffb3813632070e1cf12
|
/Chapter08/0813exception-while-true.py
|
fa0b51fa722222322031c355be5fd5b499f32cbf
|
[] |
no_license
|
mushahiroyuki/beginning-python
|
03bb78c8d3f678ce39662a44046a308c99f29916
|
4d761d165203dbbe3604173c404f70a3eb791fd8
|
refs/heads/master
| 2023-08-16T12:44:01.336731 | 2023-07-26T03:41:22 | 2023-07-26T03:41:22 | 238,684,870 | 5 | 4 | null | 2023-09-06T18:34:01 | 2020-02-06T12:33:26 |
Python
|
UTF-8
|
Python
| false | false | 397 |
py
|
#ファイル名 Chapter08/0813exception-while-true.py
while True:
try:
x = int(input('最初の数を入れてください: '))
y = int(input('2番目の数を入れてください: '))
value = x / y
print(f'{x}/{y}は{value}です。')
except:
print('入力が正しくありません。再度入力してください。')
else:
break
|
[
"[email protected]"
] | |
5b3cbdb9ee3124e0fee05d82c702f0c9e56923ec
|
fc77fc08e983385521f7073e160cf05b8484dc9d
|
/Music App/mapp/db_setup.py
|
7d68ace0c79aa6840b2127f24640c4be99f1da1e
|
[] |
no_license
|
Icode4passion/Apps
|
e561a179147ab0f9bd074998f2b3e3a9bfedc539
|
51e5f2c9026a7f6a6efef33f4f54c9d7573a3070
|
refs/heads/master
| 2020-05-04T15:22:59.139023 | 2019-04-03T07:57:58 | 2019-04-03T07:57:58 | 179,238,161 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 551 |
py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///mymusic.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine,))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
Base.metadata.create_all(bind=engine)
|
[
"[email protected]"
] | |
192f1edf5a7c689278a89613efd7f7460b9516b8
|
1f6a85330596eb86a55e631ce5a0a643e200e977
|
/muddery/server/typeclasses/script_room_interval.py
|
2048e8b16d3f7555894ca832a36db1eb0acbe74d
|
[
"BSD-3-Clause"
] |
permissive
|
kwer8080/muddery
|
ba41765c6245d33978b431ef490f10873ca8615c
|
8b712eeb90cfee2d602aad4505a4929528d44afd
|
refs/heads/master
| 2022-12-02T14:27:22.363386 | 2020-08-16T03:51:12 | 2020-08-16T03:51:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,549 |
py
|
"""
Scripts
Scripts are powerful jacks-of-all-trades. They have no in-game
existence and can be used to represent persistent game systems in some
circumstances. Scripts can also have a time component that allows them
to "fire" regularly or a limited number of times.
There is generally no "tree" of Scripts inheriting from each other.
Rather, each script tends to inherit from the base Script class and
just overloads its hooks to have it perform its function.
"""
import time
from evennia.scripts.scripts import DefaultScript
from muddery.server.mappings.event_action_set import EVENT_ACTION_SET
class ScriptRoomInterval(DefaultScript):
"""
This script triggers an event in a room at intervals.
"""
def at_script_creation(self):
# Set default data.
if not self.attributes.has("room"):
self.db.room = None
if not self.attributes.has("event_key"):
self.db.event_key = ""
if not self.attributes.has("action"):
self.db.action = ""
if not self.attributes.has("begin_message"):
self.db.begin_message = ""
if not self.attributes.has("end_message"):
self.db.end_message = ""
if not self.attributes.has("offline"):
self.db.offline = False
if not self.attributes.has("last_trigger_time"):
self.db.last_trigger_time = 0
def set_action(self, room, event_key, action, offline, begin_message, end_message):
"""
Set action data.
Args:
event: (string) event's key.
action: (string) action's key.
"""
self.db.room = room
self.db.event_key = event_key
self.db.action = action
self.db.begin_message = begin_message
self.db.end_message = end_message
self.db.offline = offline
self.db.last_trigger_time = 0
def at_start(self):
"""
Called every time the script is started.
"""
# The script will be unpaused when the server restarts. So pause it if the character is no online now.
if self.db.begin_message:
if self.obj:
self.obj.msg(self.db.begin_message)
# Offline intervals.
if self.db.offline:
last_time = self.db.last_trigger_time
if last_time:
current_time = time.time()
times = int((current_time - last_time) / self.interval)
if times > 0:
self.db.last_trigger_time = current_time
action = EVENT_ACTION_SET.get(self.db.action)
if action and hasattr(action, "offline_func"):
action.offline_func(self.db.event_key, self.obj, self.db.room, times)
def at_repeat(self):
"""
Trigger events.
"""
if not self.obj.location:
# The character's location is empty (maybe just login).
return
if self.obj.location != self.db.room:
# The character has left the room.
self.obj.scripts.delete(self)
return
# Do actions.
if self.db.offline:
self.db.last_trigger_time = time.time()
func = EVENT_ACTION_SET.func(self.db.action)
if func:
func(self.db.event_key, self.obj, self.db.room)
def at_stop(self):
"""
Called every time the script is stopped.
"""
if self.db.end_message:
if self.obj:
self.obj.msg(self.db.end_message)
|
[
"[email protected]"
] | |
cb72bed745489fd0e982e080dff5966200d993e3
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py
|
e7bb5052b4b5d9571da6b4b40941ddd27288a488
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 1,405 |
py
|
# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
|
[
"[email protected]"
] | |
6f64803b680f530118f50d12f840345200374827
|
001ca88155c90447ae3564bb51c503500d4fdcdd
|
/apps/christmas/migrations/0001_initial.py
|
2f33cc812b526ca9d65d097c3b32136603943187
|
[] |
no_license
|
andre23arruda/cartas-de-natal
|
b7d5766b2806814dc7aaed1315b0d51d4aa53582
|
b704b28137256e9c52a7d716e462334928c9d2bd
|
refs/heads/main
| 2023-04-28T04:33:28.139797 | 2021-05-14T04:56:05 | 2021-05-14T04:56:05 | 367,122,010 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 729 |
py
|
# Generated by Django 3.1.4 on 2021-05-13 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Letter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('created_at', models.DateField(auto_now=True)),
('updated_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=100)),
('message', models.TextField()),
],
),
]
|
[
"[email protected]"
] | |
bd52bb1039bba3f6e62021f5e1b5035e90a422c1
|
7bc0075367290ff06565991e19033b13f0604f96
|
/Mundo 2/aula13/desafio047.py
|
531922ef5ea3d9c949fd2497d363dc2cbe2bf5db
|
[] |
no_license
|
iamtheluiz/curso_em_video_python
|
298acd90e36473fbf797ba7bf85d729d0ca28407
|
aa4247b7d206771f9c9b08ad5d8585c3813ddaff
|
refs/heads/master
| 2020-04-12T16:17:51.672662 | 2019-01-22T00:10:41 | 2019-01-22T00:10:41 | 162,608,169 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
# imports
print("""
|******************|
| Desafio047 |
|******************|
""")
print("Números Pares de 1 até 50")
result = ""
for i in range(1, 51, 2):
if i == 1:
result += str(i + 1)
else:
result += ", "+str(i + 1)
print(result)
|
[
"[email protected]"
] | |
5291f471b2d5e46a05cd5e2ec8fd990b3acf7711
|
33114a0f96406008da69adac757b271229fb81bf
|
/__init__.py
|
5488e89bfa7b2ba3c29c0da45814f981069162df
|
[] |
no_license
|
ROB-Seismology/simpledb
|
9f1eaf3ad4cd2367a03b5e79931a18959e9a370d
|
4993dd472d1cb37023751ffca80e4dde7a6ad7fc
|
refs/heads/master
| 2021-06-24T12:13:34.309067 | 2020-10-20T10:30:34 | 2020-10-20T10:30:34 | 90,835,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,309 |
py
|
"""
Module providing basic read-write access to SQL databases.
Currently supports MySQL, PostgreSQL and SQLite/SpatiaLite databases.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Import submodules
## base
if not reloading:
from . import base
else:
reload(base)
from .base import (SQLDB, SQLRecord, build_sql_query)
## sqlite, depends on base
if not reloading:
from . import sqlite
else:
reload(sqlite)
from .sqlite import (SQLiteDB, query_sqlite_db, query_sqlite_db_generic)
__all__ = base.__all__ + sqlite.__all__
## mysql, depends on base
if not reloading:
from . import mysql
else:
reload(mysql)
if mysql.HAS_MYSQL:
from .mysql import (MySQLDB, query_mysql_db, query_mysql_db_generic)
__all__ += mysql.__all__
## postgres, depends on base
if not reloading:
from . import postgres
else:
reload(postgres)
if postgres.HAS_POSTGRES:
from .postgres import (PgSQLDB, query_pgsql_db, query_pgsql_db_generic)
__all__ += postgres.__all__
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.