blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26a4deb38675a8c8a8ed12f89b75937b21c93aec
|
62e240f67cd8f92ef41ce33dafdb38436f5a9c14
|
/tests/parsers/bencode_parser.py
|
f012685073c77212fcc29cb216201a18d37e4779
|
[
"Apache-2.0"
] |
permissive
|
joshlemon/plaso
|
5eb434772fa1037f22b10fa1bda3c3cc83183c3a
|
9f8e05f21fa23793bfdade6af1d617e9dd092531
|
refs/heads/master
| 2022-10-14T18:29:57.211910 | 2020-06-08T13:08:31 | 2020-06-08T13:08:31 | 270,702,592 | 1 | 0 |
Apache-2.0
| 2020-06-08T14:36:56 | 2020-06-08T14:36:56 | null |
UTF-8
|
Python
| false | false | 839 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Bencode file parser."""
from __future__ import unicode_literals
import unittest
from plaso.parsers import bencode_parser
# Register all plugins.
from plaso.parsers import bencode_plugins # pylint: disable=unused-import
from tests.parsers import test_lib
class BencodeTest(test_lib.ParserTestCase):
"""Tests for the Bencode file parser."""
# pylint: disable=protected-access
def testEnablePlugins(self):
"""Tests the EnablePlugins function."""
parser = bencode_parser.BencodeParser()
parser.EnablePlugins(['bencode_transmission'])
self.assertIsNotNone(parser)
self.assertIsNone(parser._default_plugin)
self.assertNotEqual(parser._plugins, [])
self.assertEqual(len(parser._plugins), 1)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
3d82aea556022fc260397c29a753c5ffa68f69ad
|
815f70b6a6e1c58676de2def893baf4f70b0f72c
|
/apps/restapi/twee/serializers/tip.py
|
54ce2848966860f92faa422cc2ccd5e4a37a538b
|
[
"MIT"
] |
permissive
|
adepeter/pythondailytip
|
ed6e25578f84c985eea048f4bc711b411cdc4eff
|
8b114b68d417e7631d139f1ee2267f6f0e061cdf
|
refs/heads/main
| 2023-05-30T11:07:57.452009 | 2021-06-11T13:42:19 | 2021-06-11T13:42:19 | 375,838,410 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
from rest_framework import serializers
from ....twee.models import PythonTip
class TipSerializer(serializers.ModelSerializer):
class Meta:
model = PythonTip
fields = '__all__'
extra_kwargs = {
'tip': {
'max_length': 140
}
}
|
[
"[email protected]"
] | |
ef082f9bb3bf1cae4397163bfce43ab59d77dfac
|
90e6860b5370b742f01c0664ac84f14dc1272155
|
/examples/helloZiggurat/src/ziggHello/models/zigguratTest/ZigguratTestBase.py
|
6f82ebfb51611a2d85ce9a0b6f6c0667be506880
|
[] |
no_license
|
sernst/Ziggurat
|
e63f876b8f2cb3f78c7a7a4dcf79af810a540722
|
4ae09bbd9c467b2ad740e117ed00354c04951e22
|
refs/heads/master
| 2021-01-17T07:20:17.138440 | 2016-05-27T14:27:43 | 2016-05-27T14:27:43 | 9,278,283 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
# ZigguratTestBase.py
# (C)2013
# Scott Ernst
from ziggurat.sqlalchemy.ZigguratModelsBase import ZigguratModelsBase
#___________________________________________________________________________________________________ ZigguratTestBase
class ZigguratTestBase(ZigguratModelsBase):
"""A class for..."""
#===================================================================================================
# C L A S S
__abstract__ = True
|
[
"[email protected]"
] | |
474ca8e491dd7c8a564d196843a5593c517b1619
|
7533acbcf36b196e5513fad2b3c9623411500f0f
|
/0x0F-python-object_relational_mapping/model_state.py
|
9b22628ad72e4f2739e3630bec79c475e4db1008
|
[] |
no_license
|
AndrewKalil/holbertonschool-higher_level_programming
|
97ce8af5ad7e8e9f0b1a25d7fa7dcb1a2b40810e
|
9bef1f7c8ff9d8e90ec2aed7a29f37cec3a5e590
|
refs/heads/master
| 2022-12-17T19:02:12.096913 | 2020-09-23T00:00:44 | 2020-09-23T00:00:44 | 259,439,815 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
#!/usr/bin/python3
"""First state model"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class State(Base):
"""Class State"""
__tablename__ = 'states'
id = Column(Integer, autoincrement=True, primary_key=True,
nullable=False, unique=True)
name = Column(String(128), nullable=False)
|
[
"[email protected]"
] | |
9afc4200eacafdbebe20217fe3f7491121e55325
|
06e51cd96f2788f87c7c426244167ddbfcc0d551
|
/integer_solutions.py
|
cc8d955bf497fb094c54cccbe9ef48050297b32e
|
[] |
no_license
|
Lisolo/ACM
|
683724184dc2af31ef45073a9cd3ef7f2cdabfba
|
231d80dd72768ca97c3e9795af94910f94cc0643
|
refs/heads/master
| 2016-09-06T16:04:11.910067 | 2014-11-26T12:25:50 | 2014-11-26T12:25:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
# coding=utf-8
"""
给你两个整数a和b(-10000<a,b<10000),请你判断是否存在两个整数,他们的和为a,乘积为b。
若存在,输出Yes,否则输出No
例如:a=9,b=15, 此时不存在两个整数满足上述条件,所以应该输出No。
"""
a = 6
b = 9
divisors = []
flag = 0
if b >= 0:
for x in xrange(-b, b+1):
if x == 0:
pass
else:
b % x == 0
divisors.append([x, b/x])
else:
for x in xrange(b,-(b-1)):
if x == 0:
pass
else:
b % x == 0
divisors.append([x, b/x])
for x in divisors:
if sum(x) == a:
flag = 1
if a == 0 and b == 0:
print 'YES'
else:
if flag:
print 'YES'
else:
print 'NO'
"""solution 2:"""
delta = a**2 - 4 * b
if delta >= 0 and int(delta**0.5) == delta**0.5:
print 'YES'
else:
print 'NO'
|
[
"[email protected]"
] | |
c014798865331ef81d1e07c344df553a92294cac
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03031/s125157001.py
|
4ac1006ad6aa89f188ff8caaa0a4a2b77a42ef94
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 654 |
py
|
n, m = map(int, input().split())
switch_list = []
for i in range(m):
s = list(map(int, input().split()))
s.pop(0)
switch_list.append(s)
p_list = list(map(int, input().split()))
#print(switch_list)
#print(p_list)
ans = 0
for bit in range(1 << n): #100・・0(n+1桁)-1 = 111・・・1(n桁)となりn桁のビット演算をfor文で回す
cnt = 0
for j in range(0,m):
switch_sum = 0
for i in range(n):
if (bit >> i) & 1 and i+1 in switch_list[j]:
switch_sum += 1
if switch_sum%2 == p_list[j]:
cnt += 1
if cnt == m:
ans += 1
print(ans)
|
[
"[email protected]"
] | |
a8bfde75fc2cf284a72e5f69140fbf309caf8038
|
46c318dbfedfb95d38207431bbf14bacf12d185f
|
/NLP/II_Process/Matching/RegEx.py
|
d738c2ca0c9f6a0f8c5f444a610746577f70e4b9
|
[] |
no_license
|
miltonluaces/problem_solving
|
2e92877ee736c0920ce6e94dcc73fd01a52e3e46
|
bccb89d8aadef4a2e409fc6c66ccad2fb84b6976
|
refs/heads/master
| 2023-01-08T15:58:51.002478 | 2020-10-28T21:31:46 | 2020-10-28T21:31:46 | 308,143,277 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
import regex
# Normal matching.
m1 = regex.search(r'Mr|Mrs', 'Mrs'); print(m1.expandf('{0}'))
m2 = regex.search(r'one(self)?(selfsufficient)?', 'oneselfsufficient'); print(m2.expandf('{0}'))
# POSIX matching.
m3 = regex.search(r'(?p)Mr|Mrs', 'Mrs'); print(m3.expandf('{0}'))
m4 = regex.search(r'(?p)one(self)?(selfsufficient)?', 'oneselfsufficient'); print(m4.expandf('{0}'))
|
[
"[email protected]"
] | |
bf6fab955be82cb8c2a81a65c3d6b12d35068493
|
3e1584f4bc2f1d4368b10d0f28fcba69d946eb00
|
/core/apps/kubeops_api/migrations/0063_auto_20200221_0654.py
|
a552b6fac34d4eea6b6e19c7ad53a2cf039001be
|
[
"Apache-2.0"
] |
permissive
|
azmove/KubeOperator
|
80d102a41a0009ae85dd2d82c7dc164511de9a58
|
0561ddbc03eded5813a86693af7fc4ee9647f12d
|
refs/heads/master
| 2021-01-08T22:40:51.267027 | 2020-02-21T08:47:43 | 2020-02-21T08:47:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
# Generated by Django 2.2.10 on 2020-02-21 06:54
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('kubeops_api', '0062_auto_20200221_0510'),
]
operations = [
migrations.AddField(
model_name='item',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='clusterhealthhistory',
name='date_type',
field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR', max_length=255),
),
]
|
[
"[email protected]"
] | |
1e64d87dfe87a31900f768f82c81e0725aa124e2
|
1ed281b93e11a53ea4ae2a3798aeb9f58dd664de
|
/webapp/starter/config/settings/local.py
|
b7e7a63716f22047acb2f9e1c94ef1b10a5f6274
|
[
"MIT"
] |
permissive
|
bartkim0426/django-docker-seul
|
5ae2a31f1004ae8292569bcafd2e66ce56f67c7e
|
6a75605281403357514d7b30e65d2685bb907b31
|
refs/heads/master
| 2021-05-09T02:55:04.765647 | 2019-02-11T07:45:09 | 2019-02-11T07:45:09 | 119,226,239 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
import os
from .partials import *
DEBUG = True
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INTERNAL_IPS = ['127.0.0.1', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['POSTGRES_NAME'],
'USER': os.environ["POSTGRES_USER"],
'PASSWORD': os.environ["POSTGRES_PASSWORD"],
'HOST': os.environ["POSTGRES_HOST"],
'PORT': os.environ["POSTGRES_PORT"],
}
}
MEDIA_ROOT = str(ROOT_DIR('mediafiles'))
# before collectstatic
# for prevent duplication of STATIC_ROOT and STATICFILES_DIRS
# STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# STATICFILES_DIRS = (
# str(ROOT_DIR.path('static')),
# )
# after collectstatic
STATIC_ROOT = str(ROOT_DIR('static-files'))
STATICFILES_DIRS = (
str(ROOT_DIR.path('staticfiles')),
)
|
[
"[email protected]"
] | |
dc5df62772aa2776784f4a98884bd8e5b46d2056
|
5f2608d4a06e96c3a032ddb66a6d7e160080b5b0
|
/week4/homework_w4_b1.py
|
e70dae9bdc972512e2e7c76f7fc0ae5ef2833a01
|
[] |
no_license
|
sheikhusmanshakeel/statistical-mechanics-ens
|
f3e150030073f3ca106a072b4774502b02b8f1d0
|
ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7
|
refs/heads/master
| 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 967 |
py
|
import math, random, pandas
def Vol1_s(dimension):
return (math.pi ** (dimension / 2.0)) / math.gamma(dimension / 2.0 + 1.0)
def Vol1_s_est(dimensions, trials):
n_hits = 0
for i in range(trials):
dists = [random.uniform(-1.0, 1.0) for _ in range(dimensions)]
sum_dist = sum(d ** 2 for d in dists)
if sum_dist < 1.0:
n_hits += 1
return n_hits / float(trials) * 2 ** dimensions, n_hits
dimensions = []
result = []
trials = 1000000
print '%i used for all' % trials
for d in range(1, 33):
dimensions.append(str(d) + 'd')
vol_est, n_hits = Vol1_s_est(d, trials)
result.append({ 'estimation of Vol1_s(d)': vol_est,
'Vol1_s(d) (exact)': Vol1_s(d),
'n_hits': n_hits })
print d, n_hits, vol_est
ordered_cols = ['estimation of Vol1_s(d)', 'actual', 'n_hits']
print pandas.DataFrame(result, dimensions, columns=ordered_cols)
|
[
"[email protected]"
] | |
7af7b7b2b077c56d314c8a7de890790b7cd2a523
|
9972988c4f4ccd7fdbafea601782dae94b679e78
|
/tests/test.py
|
8f7fc6f16dc0d00289c64bafe01422ece4e4f123
|
[
"MIT"
] |
permissive
|
chenshoubiao/ButterSalt
|
b67e9dec730350e64520064940fe69621a927418
|
7120c5135448cb3c9760925f23d2efc8316458d8
|
refs/heads/master
| 2021-01-22T03:22:58.791300 | 2017-05-25T01:58:45 | 2017-05-25T01:58:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,787 |
py
|
import ButterSalt
import unittest
class ButterSaltTestCase(unittest.TestCase):
def setUp(self):
ButterSalt.app.config['TESTING'] = True
ButterSalt.app.config['WTF_CSRF_ENABLED'] = False
ButterSalt.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
ButterSalt.app.config['SQLALCHEMY_ECHO'] = False
self.app = ButterSalt.app.test_client()
ButterSalt.db.create_all()
def login(self, username, password):
return self.app.post('/user/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.app.get('/user/logout', follow_redirects=True)
def test_login_logout(self):
rv = self.login('admin', 'default')
assert '/user/logout' in str(rv.data)
assert 'Logged in successfully.' in str(rv.data)
rv = self.logout()
assert 'Please log in to access this page.' in str(rv.data)
assert '/user/logout' not in str(rv.data)
def test_index(self):
self.login('admin', 'default')
rv = self.app.get('/', follow_redirects=True)
assert 'id="tgt" name="tgt" type="text" value="" placeholder="Required"' in str(rv.data)
assert '/user/logout' in str(rv.data)
def test_deployment(self):
self.login('admin', 'default')
rv = self.app.get('/deployment/operation', follow_redirects=True)
assert '<table class="table table-hover">' in str(rv.data)
assert '/user/logout' in str(rv.data)
def test_salt_jobs(self):
self.login('admin', 'default')
rv = self.app.get('/salt/jobs/', follow_redirects=True)
assert '<table class="table table-striped">' in str(rv.data)
assert '/user/logout' in str(rv.data)
def test_execution_command_testping(self):
self.login('admin', 'default')
rv = self.app.post('/', data=dict(
tgt='HXtest3',
fun='test.ping',
), follow_redirects=True)
assert '['HXtest3']' in str(rv.data)
def test_execution_command_testarg(self):
self.login('admin', 'default')
rv = self.app.post('/', data=dict(
tgt='HXtest3',
fun='test.arg',
arg="/proc lol"
), follow_redirects=True)
assert '<th> Arguments </th>' in str(rv.data)
assert '__kwarg__' not in str(rv.data)
def test_execution_command_testkwarg(self):
self.login('admin', 'default')
rv = self.app.post('/', data=dict(
tgt='HXtest3',
fun='test.arg',
arg="/proc lol",
kwarg='lol=wow'
), follow_redirects=True)
assert '__kwarg__' in str(rv.data)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
e7be49dbc740b1357c53555b1c8370e37846f83e
|
dbde9338e87117397c2a7c8969df614f4dd4eacc
|
/examples/tensorflow/qat_conversion/benchmark.py
|
e31fb0226a4869184f24d64386ded4940317fec9
|
[
"Apache-2.0",
"MIT",
"Intel"
] |
permissive
|
leonardozcm/neural-compressor
|
9f83551007351e12df19e5fae3742696613067ad
|
4a49eae281792d987f858a27ac9f83dffe810f4b
|
refs/heads/master
| 2023-08-16T17:18:28.867898 | 2021-09-03T06:44:25 | 2021-09-03T06:54:30 | 407,043,747 | 0 | 0 |
Apache-2.0
| 2021-09-16T07:57:10 | 2021-09-16T06:12:32 | null |
UTF-8
|
Python
| false | false | 1,017 |
py
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
class dataloader(object):
def __init__(self, batch_size=100):
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
self.train_images = train_images / 255.0
self.test_images = test_images / 255.0
self.train_labels = train_labels
self.test_labels = test_labels
self.batch_size = batch_size
self.i = 0
def __iter__(self):
while self.i < len(self.test_images):
yield self.test_images[self.i: self.i + self.batch_size], self.test_labels[self.i: self.i + self.batch_size]
self.i = self.i + self.batch_size
from lpot.experimental import Benchmark, common
evaluator = Benchmark('mnist.yaml')
evaluator.model = common.Model('quantized_model')
evaluator.b_dataloader = dataloader()
evaluator('accuracy')
|
[
"[email protected]"
] | |
8cffae1caed4f348b156a25034e81b9c31782903
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/client/gui/battle_control/requestsavatarrequestscontroller.py
|
f867089a8ae0ffd0381a8948555605e3e5e292d7
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,880 |
py
|
# Embedded file name: scripts/client/gui/battle_control/requests/AvatarRequestsController.py
from collections import namedtuple
import BigWorld
import AccountCommands
from debug_utils import LOG_DEBUG, LOG_WARNING
from ids_generators import Int32IDGenerator
from helpers import i18n
from messenger import MessengerEntry, g_settings
from gui.shared.utils.requesters.abstract import RequestsByIDProcessor
from gui.shared.utils.requesters.RequestsController import RequestsController
from gui.shared.rq_cooldown import RequestCooldownManager, REQUEST_SCOPE
from gui.battle_control.requests.settings import AVATAR_REQUEST_TYPE, DEFAULT_COOLDOWN
class _AvatarCooldownManager(RequestCooldownManager):
def __init__(self):
super(_AvatarCooldownManager, self).__init__(REQUEST_SCOPE.CLUB)
def lookupName(self, rqTypeID):
rqName = AVATAR_REQUEST_TYPE.getKeyByValue(rqTypeID)
return i18n.makeString('#system_messages:battle/request/%s' % str(rqName))
def getDefaultCoolDown(self):
return DEFAULT_COOLDOWN
def _showSysMessage(self, msg):
MessengerEntry.g_instance.gui.addClientMessage(g_settings.htmlTemplates.format('battleErrorMessage', ctx={'error': msg}))
class _AvatarRequester(RequestsByIDProcessor):
class _Response(namedtuple('_Response', ['code', 'errStr', 'data'])):
def isSuccess(self):
return AccountCommands.isCodeValid(self.code)
def __init__(self):
super(_AvatarRequester, self).__init__(Int32IDGenerator())
def getSender(self):
return BigWorld.player().prebattleInvitations
def _doCall(self, method, *args, **kwargs):
requestID = self._idsGenerator.next()
def _callback(code, errStr, data):
ctx = self._requests.get(requestID)
self._onResponseReceived(requestID, self._makeResponse(code, errStr, data, ctx))
method(callback=_callback, *args, **kwargs)
return requestID
def _makeResponse(self, code = 0, errMsg = '', data = None, ctx = None):
response = self._Response(code, errMsg, data)
if not response.isSuccess():
LOG_WARNING('Avatar request error', ctx, response)
return response
class AvatarRequestsController(RequestsController):
def __init__(self):
super(AvatarRequestsController, self).__init__(_AvatarRequester(), _AvatarCooldownManager())
self.__handlers = {AVATAR_REQUEST_TYPE.SEND_INVITES: self.sendInvites}
def fini(self):
self.__handlers.clear()
super(AvatarRequestsController, self).fini()
def sendInvites(self, ctx, callback = None):
return self._requester.doRequestEx(ctx, callback, 'sendInvitation', ctx.getDatabaseIDs())
def _getHandlerByRequestType(self, requestTypeID):
return self.__handlers.get(requestTypeID)
def _getRequestTimeOut(self):
return 30.0
|
[
"[email protected]"
] | |
1e322b9340bde3dac33558b3897bfef9ce871bd7
|
1ee10e1d42b59a95a64d860f0477a69b016d1781
|
/Lecture_09/Lecture Code/3-pipeline_text_generation.py
|
c1c3ebf2a71d9e21e8fada4b70c2de73687de274
|
[] |
no_license
|
KushalIsmael/NLP
|
5564070a573d251d7222dda85b8025ae1f9c3c6f
|
d4ce567a009e149b0cb1781d3a341d25aa438916
|
refs/heads/master
| 2023-08-18T14:07:48.646386 | 2021-10-28T19:09:25 | 2021-10-28T19:09:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 295 |
py
|
from transformers import pipeline
generator = pipeline("text-generation")
print(generator("In this course, we will teach you how to"))
print(generator("I am tired of listening to this brownbag session about natural language processing.",
num_return_sequences = 1, max_length = 100 ))
|
[
"[email protected]"
] | |
8212462617b51d5afbf32fbe0aa6e02ac157b1de
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/760.py
|
c9ed44a45ad2596edbd1cbaff02c0ff2ac596d1c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,137 |
py
|
def opera(case_list):
sat = []
for e in case_list:
sat.append(int(e))
t = sum(sat)
standing = sat[0]
invites = 0
s_list = []
count = 0
for i in sat:
if i > 0:
s_list.append(count)
count += 1
if s_list[0] == 0:
s_list = s_list[1:]
while standing < t:
for i in s_list:
if standing >= i:
standing += sat[i]
else:
while standing < i:
standing += 1
invites += 1
standing += sat[i]
return invites
input_file = open('C:\Users\chrisjwaite\Desktop\\A-large.in')
output_file = open('C:\Users\chrisjwaite\Desktop\\A-large_output.out', 'w')
lines = input_file.read().split('\n')
n_cases = int(lines[0])
case_list = []
for case in lines[1:-1]:
data = case.split(' ')
case_list.append(data[1])
for i in range(n_cases):
output_file.write('Case #' + str(i+1) + ': ' + str(opera(case_list[i])) + '\n')
input_file.close()
output_file.close()
|
[
"[email protected]"
] | |
4a37049bdd2a5eb1ab32b0f6c0feabcf07e1d909
|
0bc9bff4fd4bd72b0ad681b79f0e39cdb9fc9dc0
|
/voximplant/management/commands/vox_call_list_download.py
|
7ea6954170db7d549c13f6340aae7c904ee5af68
|
[
"MIT"
] |
permissive
|
telminov/django-voximplant
|
bc4fcb53147d9a318857b8213934217ebfc8fdef
|
a0165498d1727039e26f77724079033c252a3611
|
refs/heads/master
| 2020-05-22T01:16:43.631059 | 2017-09-13T04:41:47 | 2017-09-13T04:41:47 | 58,752,532 | 4 | 2 | null | 2017-09-13T04:41:48 | 2016-05-13T15:39:03 |
Python
|
UTF-8
|
Python
| false | false | 395 |
py
|
# coding: utf-8
from django.core.management.base import BaseCommand
from ... import tools
class Command(BaseCommand):
help = 'Get call list detail'
def add_arguments(self, parser):
parser.add_argument('--id', dest='call_list_id', type=int)
def handle(self, *args, **options):
call_list_id = options['call_list_id']
tools.call_list_download(call_list_id)
|
[
"[email protected]"
] | |
da4d7b80c470a5ea6762ba816acdd9922c6b0eaf
|
05a211233ccb01ecd2c12367548cba65bbdbc5d9
|
/examples/02relative/app/en/__init__.py
|
771d13b64c475a7ca1ad41ba952a579d17934208
|
[] |
no_license
|
podhmo/miniconfig
|
94ee7fa6345816daa83a74b1cbfb40592f221fbb
|
4cee752fd965c8094ed9d1ff1c33e531e88e479c
|
refs/heads/master
| 2021-05-21T11:49:14.836184 | 2021-03-13T14:06:27 | 2021-03-13T14:06:57 | 26,328,967 | 3 | 1 | null | 2020-07-09T19:24:51 | 2014-11-07T16:58:28 |
Python
|
UTF-8
|
Python
| false | false | 157 |
py
|
def includeme(config):
config.include(".spring:include")
config.include(".summer")
config.include("./autumn")
config.include("../en/winter")
|
[
"[email protected]"
] | |
6da8896820cb21775182cc8b2f30d43f369eae43
|
803176d4f2798989623c62f091f0d5cca687aad3
|
/sorting_recursive.py
|
7d2426d95ea2377f69e96f89c1c668f1d448098d
|
[] |
no_license
|
Tylerholland12/CS2-1
|
79986bb437e4c517d80eb9ba198226cea3e83471
|
a095d23c48c19926ad6fd9be55fb980904dcc495
|
refs/heads/main
| 2023-01-31T00:20:48.603002 | 2020-12-08T14:33:42 | 2020-12-08T14:33:42 | 304,582,069 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,944 |
py
|
#!python
def merge(items1, items2):
"""Merge given lists of items, each assumed to already be in sorted order,
and return a new list containing all items in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# create new empty array
new_sorted_list = []
# create a new variable to store the length of each list
len_items1 = len(items1)
len_items2 = len(items2)
# set a variable for each list index and set to 0
i = j = 0
# check if index is less than items
while i < len_items1 and j < len_items2:
# write a conditional to check if one index is less than the other
if items1[i] <= items2[j]:
new_sorted_list.append(items1[i])
i+=1
# do the opposite of the first conditional
else:
new_sorted_list.append(items2[j])
j+=1
# append the items to the new list
while i < len_items1:
new_sorted_list.append(items1[i])
i+=1
# append the items to the new list
while j < len_items2:
new_sorted_list.append(items2[j])
j+=1
# return new list
return new_sorted_list
def merge_sort(items):
"""Sort given items by splitting list into two approximately equal halves,
sorting each recursively, and merging results into a list in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# base case
if len(items) <= 1:
return items
# divide array into two parts
mid = len(items) // 2
# slice first half of list
left = items[:mid]
# slice second half of list
right = items[mid:]
# recursive call on left
left = merge_sort(left)
# recursive call on right
right = merge_sort(right)
# merge two together
return merge(left, right)
def quick_sort(items, low=None, high=None):
"""Sort given items in place by partitioning items in range `[low...high]`
around a pivot item and recursively sorting each remaining sublist range.
TODO: Best case running time: ??? Why and under what conditions?
TODO: Worst case running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# base case
length = len(items)
if length <= 1:
return items
else:
pivot = items.pop()
# create new empty arrays
low = []
high = []
# loop through and see if the items are greater than pivot
# append items to high
for item in items:
if item > pivot:
high.append(item)
# append items low
else:
low.append(item)
return quick_sort(low) + [pivot] + quick_sort(high)
if __name__ == "__main__":
items = [12, 23, 5, 2, 1, 43, 6, 34, 9]
print(quick_sort(items))
print(merge_sort(items))
|
[
"[email protected]"
] | |
7138ed2a849354335f6674e80424ccc1659246e3
|
307e52d79c9068a2648ae82bbe11cd58733bba37
|
/Convert/ConvertTruth.py
|
2688385564e86b4c2474fb0ca6454547eb8a182e
|
[] |
no_license
|
greatofdream/Recon1t
|
0aa775c43dcfa5b3da7b5894e2567fbe8e7b2991
|
80e58ba3c2c23f1efa962d02fcb2205a95aa716f
|
refs/heads/master
| 2022-11-09T14:12:55.747488 | 2020-06-09T02:43:24 | 2020-06-09T02:43:24 | 263,953,536 | 0 | 0 | null | 2020-05-14T15:31:27 | 2020-05-14T15:31:26 | null |
UTF-8
|
Python
| false | false | 2,440 |
py
|
# Convert ROOT file to HDF5 file
import numpy as np
import ROOT
import sys
import os
import tables
# Define the database columns
class TruthData(tables.IsDescription):
E = tables.Float64Col(pos=0)
x = tables.Float64Col(pos=1)
y = tables.Float64Col(pos=2)
z = tables.Float64Col(pos=3)
px = tables.Float64Col(pos=4)
py = tables.Float64Col(pos=5)
pz = tables.Float64Col(pos=6)
class GroundTruthData(tables.IsDescription):
EventID = tables.Int64Col(pos=0)
ChannelID = tables.Int64Col(pos=1)
PETime = tables.Float64Col(pos=2)
photonTime = tables.Float64Col(pos=3)
PulseTime = tables.Float64Col(pos=4)
dETime = tables.Float64Col(pos=5)
# Automatically add multiple root files created a program with max tree size limitation.
if len(sys.argv)!=3:
print("Wront arguments!")
print("Usage: python ConvertTruth.py MCFileName outputFileName")
sys.exit(1)
baseFileName = sys.argv[1]
outputFileName = sys.argv[2]
ROOT.PyConfig.IgnoreCommandLineOptions = True
FileNo = 0
# Create the output file and the group
h5file = tables.open_file(outputFileName, mode="w", title="OneTonDetector",
filters = tables.Filters(complevel=9))
group = "/"
# Create tables
GroundTruthTable = h5file.create_table(group, "GroundTruth", GroundTruthData, "GroundTruth")
groundtruth = GroundTruthTable.row
TruthData = h5file.create_table(group, "TruthData", TruthData, "TruthData")
truthdata = TruthData.row
# Loop for ROOT files.
t = ROOT.TChain("Readout")
tTruth = ROOT.TChain("SimTriggerInfo")
tTruth.Add(baseFileName)
t.Add(baseFileName)
# Loop for event
for event in tTruth:
for truthinfo in event.truthList:
truthdata['E'] = truthinfo.EkMerged
truthdata['x'] = truthinfo.x
truthdata['y'] = truthinfo.y
truthdata['z'] = truthinfo.z
for px in truthinfo.PrimaryParticleList:
truthdata['px'] = px.px
truthdata['py'] = px.py
truthdata['pz'] = px.pz
truthdata.append()
for PE in event.PEList:
groundtruth['EventID'] = event.TriggerNo
groundtruth['ChannelID'] = PE.PMTId
groundtruth['PETime'] = PE.HitPosInWindow
groundtruth['photonTime'] = PE.photonTime
groundtruth['PulseTime'] = PE.PulseTime
groundtruth['dETime'] = PE.dETime
groundtruth.append()
# Flush into the output file
GroundTruthTable.flush()
h5file.close()
|
[
"[email protected]"
] | |
ba5d12e3a9f281a603a4f3fc0b6ae61ff59e2ad6
|
b05bd7c104a51910c6ed9d6f0e8d039ffa108f2b
|
/carros/migrations/0004_auto_20201204_2106.py
|
c25e4f02c0364f388b0077ad46c71811b1b44762
|
[] |
no_license
|
BrunoVittor/TesteGregory
|
76e12585d4532dc8ab4836c567b5ba56469139e5
|
2c7e3afdb2a62d0464189153a9ab150d69d89083
|
refs/heads/master
| 2023-04-01T22:56:49.422893 | 2021-03-31T22:49:59 | 2021-03-31T22:49:59 | 334,147,980 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
# Generated by Django 2.2 on 2020-12-04 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carros', '0003_auto_20201204_2103'),
]
operations = [
migrations.AlterField(
model_name='carros',
name='ano',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
44f150c666e75aa32b284dd253d435323b5f0de0
|
7dba60ae27ff247705607839348f017b85f5da16
|
/nyumbax/migrations/0010_remove_hood_user.py
|
9bd48cf28d900417152b7edac6e33f76bd08d027
|
[
"MIT"
] |
permissive
|
BwanaQ/nyumba-kumi
|
7edccb6745ede6d9f6faf5bd8c0dcf6e24726991
|
c264b0941c77a4d7175a2dc5380723bea1acf380
|
refs/heads/master
| 2023-04-05T09:32:34.867456 | 2021-04-13T15:54:16 | 2021-04-13T15:54:16 | 356,136,458 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
# Generated by Django 3.2 on 2021-04-13 04:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nyumbax', '0009_rename_name_hood_title'),
]
operations = [
migrations.RemoveField(
model_name='hood',
name='user',
),
]
|
[
"[email protected]"
] | |
42ae7af6024d205e88ad2aa61c2d8c5c3a071dc3
|
92cc5c61799e93446d6562a6cc9fb74e9220c6c7
|
/mac-graph/cell/mac_cell.py
|
a159f0e137574775b4d6c51682a27dc300eb9ca7
|
[
"Unlicense"
] |
permissive
|
houqp/mac-graph
|
2728c89605b71e7ac610303e7100797787f0fa30
|
ae91e5708d2a63d157a397b608acf720f4c4d840
|
refs/heads/master
| 2020-03-22T20:41:10.786619 | 2018-07-11T19:20:41 | 2018-07-11T19:20:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,995 |
py
|
import tensorflow as tf
from .read_cell import *
from .memory_cell import *
from .control_cell import *
from .output_cell import *
from ..util import *
class MACCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, args, features, question_state, question_tokens, vocab_embedding):
self.args = args
self.features = features
self.question_state = question_state
self.question_tokens = question_tokens
self.vocab_embedding = vocab_embedding
super().__init__(self)
def __call__(self, inputs, state):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: **Unused!** `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
in_control_state, in_memory_state = state
out_control_state = control_cell(self.args, self.features,
in_control_state, self.question_state, self.question_tokens)
read = read_cell(self.args, self.features,
in_memory_state, out_control_state, self.vocab_embedding)
out_memory_state = memory_cell(self.args,
in_memory_state, read, out_control_state)
output = output_cell(self.args, self.features,
self.question_state, out_memory_state)
return output, (out_control_state, out_memory_state)
@property
def state_size(self):
"""
Returns a size tuple (control_state, memory_state)
"""
return (self.args["bus_width"], self.args["bus_width"])
@property
def output_size(self):
return self.args["answer_classes"]
|
[
"[email protected]"
] | |
0a2d71946f7a3beb7d3039832ef4d851ca101ab9
|
6da19be45ff986768eb820f11691977cb3c84772
|
/Python/5_Advance_buily_in_functions/501_generator_example/app.py
|
f86cf0322af9c70c5287d5b23541ecb63ab41ed6
|
[] |
no_license
|
alexp01/trainings
|
9e72f3a571292b79d2b1518f564d2dc0a774ef41
|
9d8daee16f15e0d7851fab12ab3d2505386a686c
|
refs/heads/master
| 2023-05-04T23:37:13.243691 | 2023-05-02T08:02:53 | 2023-05-02T08:02:53 | 272,425,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 867 |
py
|
# https://www.udemy.com/course/the-complete-python-course/learn/lecture/9445596#questions
# yield can be used to temporary stop a function, so that you can coninue it afterwards
def get_100_numbers() -> int:
i = 0
while i < 100:
yield i
i +=1
# yield is like a return, but it will also remember inside the function the last execution point and the values
# so when it reaches yield it will return i, and when its called again by next(variable), it will continue with i = I=1, and then run the while again
x = get_100_numbers()
print (x)
print (next(x)) # this will call again the function and it will continue from where it was stopped -> when i = 0
print (next(x)) # this will call again the function and it will continue from where it was stopped -> when i = 1
print(list(x)) # this will execute the function until it reaches the limit
|
[
"[email protected]"
] | |
3aa84a12c555bb02030d3ec9127a6ee3676a3089
|
3086b5195cb4dbb27aa73a24f6bf964440dff422
|
/tools/fileinfo/detection/packers/pe-pack/test.py
|
0d3de334959002d7c06f44c3a66d04733d5aa5ee
|
[
"MIT",
"Python-2.0"
] |
permissive
|
avast/retdec-regression-tests
|
8c6ea27ce2f5d0dfa6e6c845c38b56fa5bdfcc23
|
6662fed9d73cb7bc882ea69fd2429d5464950e39
|
refs/heads/master
| 2023-08-31T05:53:16.967008 | 2023-08-07T13:33:00 | 2023-08-15T08:33:07 | 113,974,761 | 7 | 10 |
MIT
| 2023-08-15T08:33:08 | 2017-12-12T10:11:00 |
Python
|
UTF-8
|
Python
| false | false | 295 |
py
|
from regression_tests import *
class Test(Test):
settings = TestSettings(
tool='fileinfo',
input='fact_rec.ex'
)
def test_correctly_analyzes_input_file(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output.contains(r'.*PE-PACK \(1\.0*')
|
[
"[email protected]"
] | |
520d8b4de76bc22b176016cd250e44aa8922ed31
|
3a8c2bd3b8df9054ed0c26f48616209859faa719
|
/Challenges/binaryTreeRightSideView.py
|
5ba301ff4e30397260ef87ec8389c5ebedd932f9
|
[] |
no_license
|
AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges
|
684f1ca2f9ee3c49d0b17ecb1e80707efe305c82
|
98fb752c574a6ec5961a274e41a44275b56da194
|
refs/heads/master
| 2023-09-01T23:58:15.514231 | 2021-09-10T12:42:03 | 2021-09-10T12:42:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,354 |
py
|
"""
Binary Tree Right Side View
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
"""
Time Complexity O(N)
Space Complexity O(N)
"""
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return []
ans = [root.val]
left = ans + self.rightSideView(root.left)
right = ans + self.rightSideView(root.right)
if len(right) > len(left):
return right
return right + left[len(right):]
"""
BFS
"""
from collections import deque
class Solution:
def rightSideView(self, root):
if not root:
return []
q, res = deque([root]), []
while q:
res.append(q[-1].val)
for _ in range(len(q)):
cur = q.popleft()
if cur.left:
q.append(cur.left)
if cur.right:
q.append(cur.right)
return res
|
[
"[email protected]"
] | |
177e0fb844c10dfa74004b38b345e8812b831e03
|
0ce9226dc0622e1edd93e57dcf2e88eaf77cedd6
|
/leetcode/explore/October/11_subsquence_disnct.py
|
f9dff5ee942c4736487a7c15ad7c7a7aeeb83767
|
[] |
no_license
|
minhthe/algo-and-ds-practice
|
6b09fc2174d58f8ba39ceabd80e2525ab95fe7ea
|
3a9b882af8412859f204569ca11808b638acf29d
|
refs/heads/master
| 2023-01-31T18:49:31.773115 | 2020-12-18T06:26:47 | 2020-12-18T06:26:47 | 298,933,489 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
'''
Greading approach: if the char you want to add, and this char not the last,
-> consider will add later not NOT to achive lexicographical order
'''
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
last_index = {c: i for i,c in enumerate(s)}
stk = []
for i, c in enumerate(s):
if c in stk: continue
while stk and stk[-1] > c and last_index[stk[-1]] > i:
stk.pop()
stk.append(c)
return ''.join(stk)
|
[
"[email protected]"
] | |
cda6a6e5e1b60598a1893d844bcba02707ddbbb7
|
282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19
|
/Malware1/venv/Lib/site-packages/scipy/spatial/setup.py
|
17994e6fb084330c7b91f8e312a70465a528a0ff
|
[] |
no_license
|
sameerakhtar/CyberSecurity
|
9cfe58df98495eac6e4e2708e34e70b7e4c055d3
|
594973df27b4e1a43f8faba0140ce7d6c6618f93
|
refs/heads/master
| 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:ccb99ae81e55c20bfd073d894471ea6c5a51f1cc27e19fea1bd2ebdfa959f8cd
size 2935
|
[
"[email protected]"
] | |
121d743af8ee8b7ac6eff95e4756e10c11b93dfc
|
78e93ca71a54bd11b6f51ef3936044e08782c7e3
|
/batchkit_examples/speech_sdk/work_item_processor.py
|
cb1108528d05baf51c553dc4922e2052d930bdf2
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/batch-processing-kit
|
c0134e1e395fdf7f2938101cea542dbb8d3c1f1f
|
8b0a5492361ff9473ab66c2f64aaccd5340f2f62
|
refs/heads/master
| 2023-09-02T01:54:36.226987 | 2022-10-27T03:40:34 | 2022-10-27T03:40:34 | 265,635,442 | 29 | 19 |
MIT
| 2023-06-02T10:38:06 | 2020-05-20T17:14:45 |
Python
|
UTF-8
|
Python
| false | false | 1,170 |
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import multiprocessing
from typing import List
from batchkit.logger import LogEventQueue
from batchkit.work_item import WorkItemRequest, WorkItemResult
from batchkit.work_item_processor import WorkItemProcessor
from batchkit_examples.speech_sdk.recognize import run_recognizer
from batchkit_examples.speech_sdk.work_item import SpeechSDKWorkItemRequest
class SpeechSDKWorkItemProcessor(WorkItemProcessor):
def __init__(self):
super().__init__()
def work_item_types(self) -> List[type]:
return [SpeechSDKWorkItemRequest]
def process_impl(self,
work_item: WorkItemRequest,
endpoint_config: dict, rtf: float,
log_event_queue: LogEventQueue, cancellation_token: multiprocessing.Event,
global_workitem_lock: multiprocessing.RLock) -> WorkItemResult:
assert isinstance(work_item, SpeechSDKWorkItemRequest)
return run_recognizer(
work_item,
rtf,
endpoint_config,
log_event_queue,
cancellation_token
)
|
[
"[email protected]"
] | |
bbefec74ec05c8be2358eb6d37693b79a119f68a
|
a4830a0189c325c35c9021479a5958ec870a2e8b
|
/routing/migrations/0022_auto_20160819_1523.py
|
9a72cd9f00d71edcf95d1e679496d2ced9546eee
|
[] |
no_license
|
solutionprovider9174/steward
|
044c7d299a625108824c854839ac41f51d2ca3fd
|
fd681593a9d2d339aab0f6f3688412d71cd2ae32
|
refs/heads/master
| 2022-12-11T06:45:04.544838 | 2020-08-21T02:56:55 | 2020-08-21T02:56:55 | 289,162,699 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 887 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-19 15:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('routing', '0021_fraudbypasshistory_outboundroutehistory'),
]
operations = [
migrations.AlterModelOptions(
name='fraudbypass',
options={'ordering': ('number',)},
),
migrations.AlterModelOptions(
name='outboundroute',
options={'ordering': ('number',)},
),
migrations.AlterField(
model_name='fraudbypasshistory',
name='number',
field=models.CharField(max_length=64, validators=[django.core.validators.RegexValidator(code='nomatch', message='Must be 10 digits', regex='^\\d{10}$')]),
),
]
|
[
"[email protected]"
] | |
9434fd3c1d1715f323f8d9c6fc8f1097ccd9a93e
|
0cdcee391e178092d7073734957075c72681f037
|
/hackerrank/si/si-smaller-element-left-side.py
|
10a600c468bd60c31b9b74c6e23fe144363e00bf
|
[] |
no_license
|
hrishikeshtak/Coding_Practises_Solutions
|
6b483bbf19d5365e18f4ea1134aa633ff347a1c1
|
86875d7436a78420591a60b716acd2780287b4a8
|
refs/heads/master
| 2022-10-06T18:44:56.992451 | 2022-09-25T03:29:03 | 2022-09-25T03:29:03 | 125,744,102 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 903 |
py
|
#!/usr/bin/python3
# Find 1st smaller elements on left side
class Solution:
# @param A : list of integers
# @return a list of integers
def prevSmaller(self, arr):
N = len(arr)
s = [-1] * N
b = [-1] * N
top = -1
top += 1
s[top] = 0
for i in range(1, N):
# print("stack: ", s)
# print("b: ", b)
while top >= 0:
if arr[i] > arr[s[top]]:
b[i] = arr[s[top]]
top += 1
s[top] = i
break
else:
top -= 1
if top == -1:
b[i] = -1
top += 1
s[top] = i
return b
if __name__ == '__main__':
A = [4, 5, 2, 10, 8]
A = [3, 2, 1]
A = [39, 27, 11, 4, 24, 32, 32, 1]
print(Solution().prevSmaller(A))
|
[
"[email protected]"
] | |
4133d8de12e950deab0ef7eb66dff3ef852e342b
|
5cc1421f5280c4c869e5df5b936f4d629693d0f1
|
/main.py
|
139b340dbacec7bbaa8633419be07b3aeef61f1e
|
[
"MIT"
] |
permissive
|
zhangxujinsh/MTCNN-VS
|
96c38479fa6e6aa5dea0e855cddcf8548ea7872d
|
42d79c0a8954493fd8afb4a6665584da9a8b9c6e
|
refs/heads/master
| 2020-07-11T01:51:40.142178 | 2016-10-29T02:13:57 | 2016-10-29T02:17:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,875 |
py
|
# coding: utf-8
import mxnet as mx
from mtcnn_detector import MtcnnDetector
import cv2
import os
import time
def testimg(detector):
img = cv2.imread('test.jpg')
t1 = time.time()
results = detector.detect_face(img)
print 'time: ',time.time() - t1
if results is not None:
total_boxes = results[0]
points = results[1]
draw = img.copy()
for b in total_boxes:
cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
for p in points:
for i in range(5):
cv2.circle(draw, (p[i], p[i + 5]), 1, (0, 0, 255), 2)
cv2.imshow("detection result", draw)
cv2.imwrite("result.png", draw)
cv2.waitKey(0)
# --------------
# test on camera
# --------------
def testcamera(detector):
camera = cv2.VideoCapture(0)
while True:
grab, frame = camera.read()
img = cv2.resize(frame, (320,180))
t1 = time.time()
results = detector.detect_face(img)
print 'time: ',time.time() - t1
if results is None:
cv2.imshow("detection result", img)
cv2.waitKey(1)
continue
total_boxes = results[0]
points = results[1]
draw = img.copy()
for b in total_boxes:
cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
for p in points:
for i in range(5):
cv2.circle(draw, (p[i], p[i + 5]), 1, (255, 0, 0), 2)
cv2.imshow("detection result", draw)
key=cv2.waitKey(1)
if 'q'==chr(key & 255) or 'Q'==chr(key & 255):
break;
if __name__=="__main__":
detector = MtcnnDetector(model_folder='model', ctx=mx.gpu(0), num_worker = 4 , accurate_landmark = False)
# testimg(detector)
testcamera(detector)
|
[
"[email protected]"
] | |
8d85aaa01325ea01f6ece159131b127ef9047799
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=26/sched.py
|
1613dd9b0c794754a75a5de64bc5ac7319aa1a66
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 342 |
py
|
-X FMLP -Q 0 -L 3 95 400
-X FMLP -Q 0 -L 3 66 300
-X FMLP -Q 0 -L 3 54 175
-X FMLP -Q 1 -L 2 53 200
-X FMLP -Q 1 -L 2 50 200
-X FMLP -Q 1 -L 2 44 175
-X FMLP -Q 2 -L 2 34 125
-X FMLP -Q 2 -L 2 34 175
-X FMLP -Q 3 -L 1 33 175
-X FMLP -Q 3 -L 1 31 200
28 150
25 175
24 125
20 200
20 150
18 150
14 175
13 100
9 125
|
[
"[email protected]"
] | |
e9c6a490422bade7bff0ccdc363ca4f326b7f8bb
|
55821cab06b431b3b253df77559800b9f84ed2a7
|
/models/place.py
|
a918d531769a7e7fed34aacbe57ca9ec87ce9dab
|
[] |
no_license
|
kaci65/AirBnB_clone
|
1fa2f1721d752635dd895de09fcedc194612ca91
|
b2c03583aab891fde5e87e7e34b40bcf2aa7ebb6
|
refs/heads/main
| 2023-03-11T08:23:08.811811 | 2021-02-28T20:41:17 | 2021-02-28T20:41:17 | 340,441,645 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
#!/usr/bin/python3
"""Place module"""
import models
from models.base_model import BaseModel
class Place(BaseModel):
"""place class inheriting from BaseModel"""
city_id = ""
user_id = ""
name = ""
description = ""
number_rooms = 0
number_bathrooms = 0
max_guest = 0
price_by_night = 0
latitude = 0.0
longitude = 0.0
amenity_ids = ""
|
[
"[email protected]"
] | |
d6d3e38f6d727b711d14a8cf13a3acf935cdda72
|
18239524612cf572bfeaa3e001a3f5d1b872690c
|
/clients/client/python/test/test_submit_self_service_login_flow.py
|
b4e8c0a3a77a374f30e918234b71717beae63d3c
|
[
"Apache-2.0"
] |
permissive
|
simoneromano96/sdk
|
2d7af9425dabc30df830a09b26841fb2e8781bf8
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
refs/heads/master
| 2023-05-09T13:50:45.485951 | 2021-05-28T12:18:27 | 2021-05-28T12:18:27 | 371,689,133 | 0 | 0 |
Apache-2.0
| 2021-05-28T12:11:41 | 2021-05-28T12:11:40 | null |
UTF-8
|
Python
| false | false | 1,198 |
py
|
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.3
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.submit_self_service_login_flow_with_password_method import SubmitSelfServiceLoginFlowWithPasswordMethod
globals()['SubmitSelfServiceLoginFlowWithPasswordMethod'] = SubmitSelfServiceLoginFlowWithPasswordMethod
from ory_client.model.submit_self_service_login_flow import SubmitSelfServiceLoginFlow
class TestSubmitSelfServiceLoginFlow(unittest.TestCase):
"""SubmitSelfServiceLoginFlow unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceLoginFlow(self):
"""Test SubmitSelfServiceLoginFlow"""
# FIXME: construct object with mandatory attributes with example values
# model = SubmitSelfServiceLoginFlow() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
45d28aa10f25871b33de9573c126392639152d09
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_003_20180618133626.py
|
749199c16ddebe39bbc973c2ba32a1bfd48fc600
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,849 |
py
|
from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
#vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
if int(x[0]) == 1:
row1[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 2:
row2[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 3:
row3[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 4:
row4[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 5:
row5[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 6:
row6[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 7:
row7[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 8:
row8[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 9:
row9[int(x[2]) - 1] = int(x[4])
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
'''
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9]
r1 = [[5, 9, 8, 6, 1, 2, 3, 4, 7], [9, 8, 7, 4, 3, 2, 5, 6, 1]]
r2 = [[2, 1, 7, 9, 3, 4, 8, 6, 5], [2, 4, 3, 5, 1, 6, 8, 7, 9]]
r3 = [[6, 4, 3, 5, 8, 7, 1, 2, 9], [5, 6, 1, 7, 9, 8, 4, 3, 2]]
r4 = [[1, 6, 5, 4, 9, 8, 2, 7, 3], [3, 9, 5, 6, 4, 7, 2, 1, 8]]
r5 = [[3, 2, 9, 7, 6, 5, 4, 1, 8], [8, 2, 4, 3, 5, 1, 6, 9, 7]]
r6 = [[7, 8, 4, 3, 2, 1, 5, 9, 6], [1, 7, 6, 2, 8, 9, 3, 4, 5]]
r7 = [[8, 3, 1, 2, 7, 6, 9, 5, 4], [7, 1, 2, 8, 6, 3, 9, 5, 4]]
r8 = [[4, 7, 2, 8, 5, 9, 6, 3, 1], [4, 3, 8, 9, 7, 5, 1, 2, 6]]
r9 = [[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2], [6, 5, ' ', 1, ' ',
' ', 7, 8, ' ']] # 9 1 6, 9 3 9, 9 4 1, 9 6 4, 9 8 8, 9 9 3
# r9=[[9,5, ' ', ' ', ' ', ' ', ' ', ' ',2],[' ',5,' ',' ',2,' ',7,' ','
# ']] # 9 1 6, 9 3 9, 9 4 1, 9 6 4, 9 8 8, 9 9 3
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
print("Your sudoku to solve:")
try:
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(
row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN")
break
except TypeError:
print()
'''
|
[
"[email protected]"
] | |
b052cc54020a43043bb7d1822c05072b653f6113
|
46f358b954d2d0067a2093ee9006e222f831a8f8
|
/tests/datasource/batch_kwarg_generator/test_s3_subdir_reader_generator.py
|
474a13f241874c6c833756f7ae698d9226069a0e
|
[
"Apache-2.0"
] |
permissive
|
dhruvvyas90/great_expectations
|
b963aa99c683a0da3a9e2b5a1046d2a32f622c7b
|
fddf5336065c644558c528301e601b9f02be87e2
|
refs/heads/main
| 2023-01-28T15:26:55.331282 | 2020-12-03T18:52:14 | 2020-12-03T18:52:14 | 319,719,900 | 1 | 0 |
Apache-2.0
| 2020-12-08T18:02:33 | 2020-12-08T18:02:32 | null |
UTF-8
|
Python
| false | false | 3,651 |
py
|
import logging
import os
import time
import pandas as pd
import pytest
import requests
from botocore.session import Session
from great_expectations.datasource.batch_kwargs_generator import (
S3SubdirReaderBatchKwargsGenerator,
)
from great_expectations.exceptions import BatchKwargsError
port = 5555
endpoint_uri = "http://127.0.0.1:%s/" % port
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret"
@pytest.fixture(scope="module")
def s3_base():
# writable local S3 system
import shlex
import subprocess
proc = subprocess.Popen(shlex.split("moto_server s3 -p %s" % port))
timeout = 5
while timeout > 0:
try:
r = requests.get(endpoint_uri)
if r.ok:
break
except:
pass
timeout -= 0.1
time.sleep(0.1)
yield
proc.terminate()
proc.wait()
@pytest.fixture(scope="module")
def mock_s3_bucket(s3_base):
bucket = "test_bucket"
session = Session()
client = session.create_client("s3", endpoint_url=endpoint_uri)
client.create_bucket(Bucket=bucket, ACL="public-read")
df = pd.DataFrame({"c1": [1, 2, 3], "c2": ["a", "b", "c"]})
keys = [
"data/for/you.csv",
"data/for/me.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=df.to_csv(index=None).encode("utf-8"), Key=key
)
yield bucket
@pytest.fixture
def s3_subdir_generator(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/for",
reader_options={"sep": ","},
)
yield generator
@pytest.fixture
def s3_subdir_generator_with_partition(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/",
reader_options={"sep": ","},
)
yield generator
def test_s3_subdir_generator_basic_operation(s3_subdir_generator):
# S3 Generator sees *only* configured assets
assets = s3_subdir_generator.get_available_data_asset_names()
print(assets)
assert set(assets["names"]) == {
("you", "file"),
("me", "file"),
}
def test_s3_subdir_generator_reader_options_configuration(s3_subdir_generator):
batch_kwargs_list = [
kwargs
for kwargs in s3_subdir_generator.get_iterator(data_asset_name="you", limit=10)
]
print(batch_kwargs_list)
assert batch_kwargs_list[0]["reader_options"] == {"sep": ","}
def test_s3_subdir_generator_build_batch_kwargs_no_partition_id(s3_subdir_generator):
batch_kwargs = s3_subdir_generator.build_batch_kwargs("you")
assert batch_kwargs["s3"] in [
"s3a://test_bucket/data/for/you.csv",
]
def test_s3_subdir_generator_build_batch_kwargs_partition_id(
s3_subdir_generator_with_partition, basic_sparkdf_datasource
):
batch_kwargs = s3_subdir_generator_with_partition.build_batch_kwargs("for", "you")
assert batch_kwargs["s3"] == "s3a://test_bucket/data/for/you.csv"
|
[
"[email protected]"
] | |
758d3add23ff4cc75c3f3557a759800c70585c20
|
27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f
|
/plugins/doc_fragments/files.py
|
a3723db249284fc0990a043729d80d3b2ea6bec2
|
[] |
no_license
|
coll-test/notstdlib.moveitallout
|
eb33a560070bbded5032385d0aea2f3cf60e690b
|
0987f099b783c6cf977db9233e1c3d9efcbcb3c7
|
refs/heads/master
| 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,706 |
py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
# Note: mode is overridden by the copy and template modules so if you change the description
# here, you should also change it there.
DOCUMENTATION = r'''
options:
mode:
description:
- The permissions the resulting file or directory should have.
- For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers.
You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
(like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives
a string and can do its own conversion from string into number.
- Giving Ansible a number without following one of these rules will end up with a decimal
number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or
C(u=rw,g=r,o=r)).
- As of Ansible 2.6, the mode may also be the special string C(preserve).
- When set to C(preserve) the file will be given the same permissions as the source file.
type: str
owner:
description:
- Name of the user that should own the file/directory, as would be fed to I(chown).
type: str
group:
description:
- Name of the group that should own the file/directory, as would be fed to I(chown).
type: str
seuser:
description:
- The user part of the SELinux file context.
- By default it uses the C(system) policy, where applicable.
- When set to C(_default), it will use the C(user) portion of the policy if available.
type: str
serole:
description:
- The role part of the SELinux file context.
- When set to C(_default), it will use the C(role) portion of the policy if available.
type: str
setype:
description:
- The type part of the SELinux file context.
- When set to C(_default), it will use the C(type) portion of the policy if available.
type: str
selevel:
description:
- The level part of the SELinux file context.
- This is the MLS/MCS attribute, sometimes known as the C(range).
- When set to C(_default), it will use the C(level) portion of the policy if available.
type: str
default: s0
unsafe_writes:
description:
- Influence when to use atomic operation to prevent data corruption or inconsistent reads from the target file.
- By default this module uses atomic operations to prevent data corruption or inconsistent reads from the target files,
but sometimes systems are configured or just broken in ways that prevent this. One example is docker mounted files,
which cannot be updated atomically from inside the container and can only be written in an unsafe manner.
- This option allows Ansible to fall back to unsafe methods of updating files when atomic operations fail
(however, it doesn't force Ansible to perform unsafe writes).
- IMPORTANT! Unsafe writes are subject to race conditions and can lead to data corruption.
type: bool
default: no
attributes:
description:
- The attributes the resulting file or directory should have.
- To get supported flags look at the man page for I(chattr) on the target system.
- This string should contain the attributes in the same order as the one displayed by I(lsattr).
- The C(=) operator is assumed as default, otherwise C(+) or C(-) operators need to be included in the string.
type: str
aliases: [ attr ]
'''
|
[
"[email protected]"
] | |
6bd87fef952e8c69e3423f386f408538339d9185
|
8370083dbbbd32740ad1862637809396dc7984e2
|
/paresh61.A.MILESTONEPROJECTE/a1.py
|
524064c675bcdabbfbdd144f009ea8b4126de4dc
|
[] |
no_license
|
parshuramsail/PYTHON_LEARN
|
a919b14aab823e0f5e769d8936ddbfb357133db2
|
8c76720bf73f13cf96930e6d4d5128e6ba9aa535
|
refs/heads/main
| 2023-07-14T16:25:26.240555 | 2021-08-29T17:10:19 | 2021-08-29T17:10:19 | 401,095,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,294 |
py
|
# STEP1: write a function that can printout a board.setup your board as your list,where each index 1-9 corresponnds with a number on a numberpad.
#so you can get a 3 by 3 board representation.
#print('\n'*100)
def display_board(board):
print('\n'*100)
print(" | |")
print(" " + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(" | |")
print("--------------------")
print(" | |")
print(" " + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(" | |")
print("--------------------")
print(" | |")
print(" " + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(" | |")
# TEST STEP1:RUN YOUR FUNCTION ON TEST VERSION OF THE BOARD LIST AND MAKE ADJUSTMENTS AS NECESSARY.
test_board=["#","X","O","X","O","X","O","X","O","X"]
#test_board=['']*10
display_board(test_board)
#print(display_board(test_board))
# STEP3:
def player_input():
"""
output:(player1=marker, player2=marker)
"""
marker=""
# keep asking player 1 to choose X or O
while marker!="X" and marker!="O":
marker=input("player:1 choose X or O: ").upper()
if marker=="X":
return("X","O")
else:
return("O","X")
# RUN THE FUNCTION TO MAKE SURE IT RUNS THE DESIRED OUTPUT
player1_marker,player2_marker=player_input()
# STEP3
def place_marker(board,marker,position):
board [position]=marker
test_board=["#","X","O","X","O","X","O","X","O","X"]
place_marker(test_board,"$",8)
display_board(test_board)
# STEP 4: WRITE IN A FUNCTION THAT TAKES IN A BOARD AND MARK (X OR O) AND CHECKS TO SEE IF THAT MARK HAS WON.
def win_check(board,mark):
return((board[7]==mark and board[8]==mark and board[9]==mark) or
(board[4]==mark and board[5]==mark and board[6]==mark)or
(board[1]==mark and board[2]==mark and board[3]==mark)or
(board[7]==mark and board[4]==mark and board[1]==mark)or
(board[8]==mark and board[5]==mark and board[2]==mark)or
(board[9]==mark and board[6]==mark and board[3]==mark)or
(board[7]==mark and board[5]==mark and board[3]==mark)or
(board[9]==mark and board[5]==mark and board[1]==mark))
win_check(test_board,"X")
|
[
"[email protected]"
] | |
9106a10aff28c894fe165cefa35ee82cd8488822
|
b18f92a6a41a3d83e77848460d4a3f17e4fe677a
|
/introduction_to_python/recursive_functions/1_find_power/solution/test_solution.py
|
1a873495b9e68f09bf6e6f09278da0ec62088424
|
[] |
no_license
|
ByteAcademyCo/Exercises
|
de71b885a498ead8296e6107836f9a06ac399d4f
|
8332d0473ab35ee1d2975b384afda45c77ef943d
|
refs/heads/master
| 2022-05-25T23:01:59.466480 | 2022-03-14T13:12:10 | 2022-03-14T13:12:10 | 252,842,407 | 1 | 109 | null | 2022-03-14T13:12:11 | 2020-04-03T21:09:47 |
Python
|
UTF-8
|
Python
| false | false | 167 |
py
|
def test_solution():
from solution import power
assert power(1, 3) == 1
assert power(2, 4) == 16
assert power(0, 1) == 0
assert power(5, 2) == 25
|
[
"[email protected]"
] | |
f30006767dcdf9f17324e03f92349b7c526fad62
|
07564c75c1f37f2e0304720d1c01f23a27ef3469
|
/273.IntegertoEnglishWords/solution.py
|
cfa5b45a7acb04c003bd49fbf53a7a34351569ff
|
[] |
no_license
|
ynXiang/LeetCode
|
5e468db560be7f171d7cb24bcd489aa81471349c
|
763372587b9ca3f8be4c843427e4760c3e472d6b
|
refs/heads/master
| 2020-05-21T18:27:16.941981 | 2018-01-09T22:17:42 | 2018-01-09T22:17:42 | 84,642,017 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,439 |
py
|
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
res = self.helper(num)
return ' '.join(res) if res else 'Zero'
def helper(self, num):
Ones = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
Tens = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
Hundreds = ['Hundred', 'Thousand', 'Million', 'Billion']
res = []
if num == 0:
res = []
elif num < 20:
res.append(Ones[num - 1])
elif num < 10**2:
res.append(Tens[num // 10 - 2])
res += self.helper(num % 10)
elif num < 10**3:
res += self.helper(num // 10**2)
res.append(Hundreds[0])
res += self.helper(num % 10**2)
elif num < 10**6:
res += self.helper(num // 10**3)
res.append(Hundreds[1])
res += self.helper(num % 10**3)
elif num < 10**9:
res += self.helper(num // 10**6)
res.append(Hundreds[2])
res += self.helper(num % 10**6)
else:
res += self.helper(num // 10**9)
res.append(Hundreds[3])
res += self.helper(num % 10**9)
return res
|
[
"[email protected]"
] | |
96766b767b7e79f7fb5ea45946f0cff5d54bc1c8
|
47dc4152dd163ce751d4703f19bb5339fc1cfb98
|
/djchat/settings.py
|
dae41d6fb978e9e0118e1da42103746c0c1bbbbe
|
[
"BSD-3-Clause"
] |
permissive
|
michaelXDzhang/pulsar-django
|
85cf3437a578b2b198ea2f794d1a1f4db8a78ec1
|
0aa20e1c08b6a782cd634e736e2238776e0c98d5
|
refs/heads/master
| 2020-07-27T01:06:32.586546 | 2017-11-28T10:18:34 | 2017-11-28T10:18:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,562 |
py
|
"""
Django settings for djchat project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
APP_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(APP_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fux9z2i)6ab$b_5*^z@96hdtqfj5=ct7b)m6_6cfrr5g%x#=81'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pulse',
'djchat'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(APP_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
]
}
}
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'djchat.views.middleware'
)
ROOT_URLCONF = 'djchat.urls'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
81063a6e3d985fbef8bfdf7fa09786028090fef0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_231/ch45_2020_04_12_23_45_54_626320.py
|
c4a53b37d17f38d42ea23fb09e36af31d98485ca
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
lista=[]
lista_reversa=[]
a=int(input('digite um numero:'))
i=(0)
while a>0:
lista.append(a)
i+=1
a=int(input('digite um numero:'))
del lista[i]
while i>=0:
lista_reversa.append(lista[i])
i-=1
print(lista_reversa)
|
[
"[email protected]"
] | |
f22210c8427f7e7a65853ec23b3430b0491d5c34
|
c97fc7658c39feb51c0ed42c04783797c8675b8a
|
/xm_1/qt简单数据可视化.py
|
8536d7db1bea48f72b69fae54a0168600924e53b
|
[] |
no_license
|
githubvit/study
|
8bff13b18bea4954e8ed1b4619a091b134b8ff97
|
845e19d1225f1aa51c828b15effac30be42fdc1b
|
refs/heads/master
| 2023-02-20T15:59:19.635611 | 2021-12-15T08:30:54 | 2021-12-15T08:30:54 | 241,928,274 | 1 | 1 | null | 2023-02-02T06:18:48 | 2020-02-20T16:08:06 |
Python
|
UTF-8
|
Python
| false | false | 1,548 |
py
|
# Qt数据可视化 https://doc.qt.io/qt-5/qtcharts-overview.html
from PySide2 import QtGui, QtWidgets
from PySide2.QtCharts import QtCharts
# 在Qt5.7版本后将Qt Charts加入到了Qt模块中。
# 我们可以方便的使用这个模块,绘制很多样式的图形,比如折线、饼图等,快速实现数据可视化。
# 用Qt Charts绘制,大概分为四个部分:
# 数据(QXYSeries)、QChart(不知怎么称呼)、坐标轴(QAbstractAXis)和视图(QChartView)。
# 要注意的是 QChart要先添加数据(QXYSeries)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
series = QtCharts.QLineSeries()#定义线条 连续折线图
# 加点 添加数据
series.append(0,0)
series.append(1,7)
series.append(1.2,14)
series.append(1.3,21)
series.append(1.4,28)
series.append(1.5,35)
self.chartView = QtCharts.QChartView() # 定义ui
self.chartView.chart().addSeries(series) # 添加 线条 即 数据
self.chartView.chart().createDefaultAxes() # 创建 坐标轴
series.setColor(QtGui.QColor("salmon")) # 给线条设置颜色 salmon 橙红色,粉橙色
self.setCentralWidget(self.chartView) # 给QMainWindow窗口设置中心部件,必须的
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.resize(640, 480)
w.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
e3befb7b065b5be68585a6da785f873742bbffa3
|
a6fa311aff9a99ad6a47e41fe34f3f12bb507007
|
/reagent/training/__init__.py
|
2cc9b73dd0d046b8b5115d9b7e1115535db99f34
|
[
"BSD-3-Clause"
] |
permissive
|
cts198859/ReAgent
|
222e9dd4aeba455ad5faa9f6178a0e9793cb82fc
|
20f3d333821bad364fd567cce97de51c44123484
|
refs/heads/master
| 2022-09-15T13:08:24.732208 | 2020-05-29T00:51:35 | 2020-05-29T00:54:45 | 267,776,326 | 0 | 0 |
BSD-3-Clause
| 2020-05-29T05:51:43 | 2020-05-29T05:51:43 | null |
UTF-8
|
Python
| false | false | 987 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .c51_trainer import C51Trainer, C51TrainerParameters
from .cem_trainer import CEMTrainer
from .dqn_trainer import DQNTrainer, DQNTrainerParameters
from .parametric_dqn_trainer import ParametricDQNTrainer, ParametricDQNTrainerParameters
from .qrdqn_trainer import QRDQNTrainer, QRDQNTrainerParameters
from .rl_trainer_pytorch import RLTrainer
from .sac_trainer import SACTrainer, SACTrainerParameters
from .td3_trainer import TD3Trainer, TD3TrainingParameters
from .world_model.mdnrnn_trainer import MDNRNNTrainer
__all__ = [
"C51Trainer",
"C51TrainerParameters",
"CEMTrainer",
"RLTrainer",
"DQNTrainer",
"DQNTrainerParameters",
"MDNRNNTrainer",
"ParametricDQNTrainer",
"ParametricDQNTrainerParameters",
"QRDQNTrainer",
"QRDQNTrainerParameters",
"SACTrainer",
"SACTrainerParameters",
"TD3Trainer",
"TD3TrainingParameters",
]
|
[
"[email protected]"
] | |
d8fc9aa6b18fb2f4bc50363b8a36ca7d158c1c44
|
08b998966c06dc50cd9372fe3e15d6599bcafbfb
|
/dotfiles/.ipython/profile_default/startup/10-pager.py
|
57da300105eaebfc2a84c667499aeb36a6ca7a1d
|
[
"MIT"
] |
permissive
|
chbrown/config
|
77661fc8e485d5a8992114fd11e7eae383698b9b
|
ec8deb0bf756ff62f5599cb239c8ac11084d3d16
|
refs/heads/master
| 2021-06-06T10:13:24.401647 | 2021-02-22T15:03:54 | 2021-02-22T15:03:54 | 1,827,574 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
from __future__ import print_function
# IPython.core.hooks.show_in_pager doesn't cut it
import IPython.core.page
def page_printer(data, start=0, screen_lines=0, pager_cmd=None):
if isinstance(data, dict):
data = data['text/plain']
print(data)
IPython.core.page.page = page_printer
|
[
"[email protected]"
] | |
f023b96d1bcc10da7a3a00e98c2a26e6526415ec
|
b6e7e7c0a68621c613898534f20de96c459fd0a9
|
/client/app.py
|
9999fc5bcf65ae1e09cde1f359f971321fe32177
|
[] |
no_license
|
jwoglom/zoom-tools
|
227db0974c7ac239b9ea51b6e95222c765025d66
|
951b20970a990f3b293c593d3969c92550120913
|
refs/heads/main
| 2023-03-07T18:00:02.646547 | 2021-02-16T21:33:16 | 2021-02-16T21:33:16 | 339,311,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,877 |
py
|
#!/usr/bin/env python3
from flask import Flask, Response, request, abort
import random
import string
import subprocess
import os
app = Flask(__name__)
scripts_dir = os.path.join(os.path.dirname(__file__), "../scripts")
token = os.environ.get("TOKEN", "".join(random.choice(string.ascii_letters) for i in range(24)))
try:
from secrets import SELF_TOKEN
token = SELF_TOKEN
except ImportError:
pass
print("Token: %s" % token)
@app.before_request
def is_token_set():
provided_token = request.args.get("token") or request.form.get("token")
if provided_token != token:
print("Provided invalid token %s" % provided_token)
abort(403)
def run(script):
print(os.path.join(scripts_dir, script))
s = subprocess.run([os.path.join(scripts_dir, script)], capture_output=True)
return s.stdout.decode()
@app.route('/status', methods=['GET', 'POST'])
def status_route():
return run("zoom_status.sh")
@app.route('/audio', methods=['GET', 'POST'])
def audio_route():
return run("zoom_audio_status.sh")
@app.route('/audio/mute', methods=['GET', 'POST'])
def mute_route():
return run("zoom_mute.sh")
@app.route('/audio/unmute', methods=['GET', 'POST'])
def unmute_route():
return run("zoom_unmute.sh")
@app.route('/audio/toggle', methods=['GET', 'POST'])
def audio_toggle_route():
return run("zoom_audio_toggle.sh")
@app.route('/video', methods=['GET', 'POST'])
def video_route():
return run("zoom_video_status.sh")
@app.route('/video/off', methods=['GET', 'POST'])
def video_off_route():
return run("zoom_video_off.sh")
@app.route('/video/on', methods=['GET', 'POST'])
def video_on_route():
return run("zoom_video_on.sh")
@app.route('/video/toggle', methods=['GET', 'POST'])
def video_toggle_route():
return run("zoom_video_toggle.sh")
if __name__ == '__main__':
app.run('0.0.0.0', port=2626)
|
[
"[email protected]"
] | |
1d64087b50a7754102a8f120289480550b469a86
|
41bd7d939207e94c8f6956f02b779f5084b23bf4
|
/archives/admin.py
|
8604e9655b6c0429d898e44ebeaf1a4f5c81a761
|
[] |
no_license
|
wd5/acanthes
|
724b81c799ab04344c66691a054b2a555b3e3d77
|
8c4fd011e60e9869396f1a93b385133ebff74238
|
refs/heads/master
| 2021-01-17T12:13:35.216661 | 2012-06-13T13:05:06 | 2012-06-13T13:05:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,171 |
py
|
from django.contrib import admin
from archives.models import *
class IntervenantAudioInline(admin.TabularInline):
model = IntervenantAudio
extra = 1
class AudioAdmin(admin.ModelAdmin):
inlines = (IntervenantAudioInline,)
list_display = ('id','subtitle', 'annee', 'genre', 'url_ecoute_intranet_adresse' )
list_filter = ('annee', )
search_fields = ['subtitle', ]
exclude = ('duree', 'total_durees', 'chemin_fichier', 'lien_test_web', 'dateissued_portail', 'horodatage_modification',
'url_export_ircam', 'type_ircam', 'date_enregistrement', 'acanthes',
'horodatage_creation', 'url_ecoute_extranet', 'url_ecoute_internet', 'url_ecoute_intranet', 'details_intranet_actuel_acda',
'oai_web_oai_mods', 'oai_id', 'oai_titleinfo_title', 'oai_typeofresource', 'oai_genre', 'oai_origininfo_place',
'oai_origininfo_publisher', 'oai_origininfo_datecaptured', 'oai_language_languageterm_1', 'oai_language_languageterm_2',
'oai_language_languageterm_3', 'oai_physicaldescription_form', 'oai_physicaldescription_internetmediatype', 'oai_physicaldescription_digitalorigin',
'oai_abstract', 'oai_targetaudience', 'oai_location_url_preview', 'oai_location_url_full', 'oai_location_physicallocation', 'oai_accesscondition',
'oai_recordinfo_recordcontentsource', 'oai_recordinfo_recordcreationdate', 'oai_recordinfo_recordchangedate', 'oai_recordinfo_recordidentifier',
'oai_recordinfo_languageofcataloging_languageterm', 'oai_publication')
class IntervenantAdmin(admin.ModelAdmin):
list_display = ('nom', 'prenom')
exclude = ('horodatage_creation', 'horodatage_modification')
search_fields = ['nom', 'prenom']
class LangueAdmin(admin.ModelAdmin):
list_display = ('languageterm',)
class LieuAdmin(admin.ModelAdmin):
list_display = ('placeterm', 'salle')
class OrchestreAdmin(admin.ModelAdmin):
list_display = ('nom_complet', 'sous_titre')
search_fields = ['nom_complet', ]
admin.site.register(Audio, AudioAdmin)
admin.site.register(Intervenant, IntervenantAdmin)
admin.site.register(Langue, LangueAdmin)
admin.site.register(Lieu, LieuAdmin)
admin.site.register(Orchestre, OrchestreAdmin)
|
[
"[email protected]"
] | |
38a28d7f0257148f8e867dcfd6350f0e6276dd14
|
f7dd190a665a4966db33dcc1cc461dd060ca5946
|
/venv/Lib/site-packages/graphene/types/tests/test_schema.py
|
88af101988356209c9722d213bfa5137344960fa
|
[] |
no_license
|
Darwin939/macmeharder_back
|
2cc35e2e8b39a82c8ce201e63d9f6a9954a04463
|
8fc078333a746ac7f65497e155c58415252b2d33
|
refs/heads/main
| 2023-02-28T12:01:23.237320 | 2021-02-02T17:37:33 | 2021-02-02T17:37:33 | 328,173,062 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,022 |
py
|
import pytest
from ..field import Field
from ..objecttype import ObjectType
from ..scalars import String
from ..schema import Schema
class MyOtherType(ObjectType):
field = String()
class Query(ObjectType):
inner = Field(MyOtherType)
def test_schema():
schema = Schema(Query)
assert schema.get_query_type() == schema.get_graphql_type(Query)
def test_schema_get_type():
schema = Schema(Query)
assert schema.Query == Query
assert schema.MyOtherType == MyOtherType
def test_schema_get_type_error():
schema = Schema(Query)
with pytest.raises(AttributeError) as exc_info:
schema.X
assert str(exc_info.value) == 'Type "X" not found in the Schema'
def test_schema_str():
schema = Schema(Query)
assert (
str(schema)
== """schema {
query: Query
}
type MyOtherType {
field: String
}
type Query {
inner: MyOtherType
}
"""
)
def test_schema_introspect():
schema = Schema(Query)
assert "__schema" in schema.introspect()
|
[
"[email protected]"
] | |
87b4c9c295b5f43b508c4f5062977f0f628852e2
|
4a84ef702269eed582b04dbed979a24607579f52
|
/src/mapnik/tests/python_tests/sqlite_rtree_test.py
|
2d28adac0266d3439eb51f6e9cc4d9c5da04e236
|
[] |
no_license
|
olibook/pymapnik2
|
9ef766d759afc3efeccd988bfb7239bd73cac01e
|
c409fa150e203ff85e14b8fd40063267a6802e1c
|
refs/heads/master
| 2016-08-04T11:51:35.987664 | 2013-02-18T16:01:10 | 2013-02-18T16:01:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,302 |
py
|
#!/usr/bin/env python
from nose.tools import *
from mapnik.tests.python_tests.utilities import execution_path
from Queue import Queue
import threading
import os, mapnik
import sqlite3
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
NUM_THREADS = 10
TOTAL = 245
DB = '../data/sqlite/world.sqlite'
TABLE= 'world_merc'
def create_ds():
ds = mapnik.SQLite(file=DB,table=TABLE)
fs = ds.all_features()
if 'sqlite' in mapnik.DatasourceCache.instance().plugin_names():
def test_rtree_creation():
index = DB +'.index'
if os.path.exists(index):
os.unlink(index)
threads = []
for i in range(NUM_THREADS):
t = threading.Thread(target=create_ds)
t.start()
threads.append(t)
for i in threads:
i.join()
eq_(os.path.exists(index),True)
conn = sqlite3.connect(index)
cur = conn.cursor()
try:
cur.execute("Select count(*) from idx_%s_GEOMETRY" % TABLE.replace("'",""))
conn.commit()
eq_(cur.fetchone()[0],TOTAL)
except sqlite3.OperationalError:
# don't worry about testing # of index records if
# python's sqlite module does not support rtree
pass
cur.close()
ds = mapnik.SQLite(file=DB,table=TABLE)
fs = ds.all_features()
eq_(len(fs),TOTAL)
os.unlink(index)
ds = mapnik.SQLite(file=DB,table=TABLE,use_spatial_index=False)
fs = ds.all_features()
eq_(len(fs),TOTAL)
eq_(os.path.exists(index),False)
ds = mapnik.SQLite(file=DB,table=TABLE,use_spatial_index=True)
fs = ds.all_features()
for feat in fs:
query = mapnik.Query(feat.envelope())
selected = ds.features(query)
eq_(len(selected.features)>=1,True)
eq_(os.path.exists(index),True)
os.unlink(index)
def test_geometry_round_trip():
test_db = '/tmp/mapnik-sqlite-point.db'
ogr_metadata = True
# create test db
conn = sqlite3.connect(test_db)
cur = conn.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS point_table
(id INTEGER PRIMARY KEY AUTOINCREMENT, geometry BLOB, name varchar)
''')
# optional: but nice if we want to read with ogr
if ogr_metadata:
cur.execute('''CREATE TABLE IF NOT EXISTS geometry_columns (
f_table_name VARCHAR,
f_geometry_column VARCHAR,
geometry_type INTEGER,
coord_dimension INTEGER,
srid INTEGER,
geometry_format VARCHAR )''')
cur.execute('''INSERT INTO geometry_columns
(f_table_name, f_geometry_column, geometry_format,
geometry_type, coord_dimension, srid) VALUES
('point_table','geometry','WKB', 1, 1, 4326)''')
conn.commit()
cur.close()
# add a point as wkb (using mapnik) to match how an ogr created db looks
x = -122 # longitude
y = 48 # latitude
wkt = 'POINT(%s %s)' % (x,y)
# little endian wkb (mapnik will auto-detect and ready either little or big endian (XDR))
wkb = mapnik.Path.from_wkt(wkt).to_wkb(mapnik.wkbByteOrder.NDR)
values = (None,sqlite3.Binary(wkb),"test point")
cur = conn.cursor()
cur.execute('''INSERT into "point_table" (id,geometry,name) values (?,?,?)''',values)
conn.commit()
cur.close()
def make_wkb_point(x,y):
import struct
byteorder = 1; # little endian
endianess = ''
if byteorder == 1:
endianess = '<'
else:
endianess = '>'
geom_type = 1; # for a point
return struct.pack('%sbldd' % endianess, byteorder, geom_type, x, y)
# confirm the wkb matches a manually formed wkb
wkb2 = make_wkb_point(x,y)
eq_(wkb,wkb2)
# ensure we can read this data back out properly with mapnik
ds = mapnik.Datasource(**{'type':'sqlite','file':test_db, 'table':'point_table'})
fs = ds.featureset()
feat = fs.next()
eq_(feat.id(),1)
eq_(feat['name'],'test point')
geoms = feat.geometries()
eq_(len(geoms),1)
eq_(geoms.to_wkt(),'Point(-122.0 48.0)')
# ensure it matches data read with just sqlite
cur = conn.cursor()
cur.execute('''SELECT * from point_table''')
conn.commit()
result = cur.fetchone()
cur.close()
feat_id = result[0]
eq_(feat_id,1)
name = result[2]
eq_(name,'test point')
geom_wkb_blob = result[1]
eq_(str(geom_wkb_blob),geoms.to_wkb(mapnik.wkbByteOrder.NDR))
new_geom = mapnik.Path.from_wkb(str(geom_wkb_blob))
eq_(new_geom.to_wkt(),geoms.to_wkt())
# cleanup
os.unlink(test_db)
os.unlink(test_db + '.index')
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
|
[
"[email protected]"
] | |
8ac469d250354ff770e368d0dc803cc543d5ac0d
|
c42908fce35bc2afb10abd924cfd13d5fa286205
|
/html2vec/base/io/basefilehandlers.py
|
30f19a526fdb753f0bc6b1578280d70ce6dcfae6
|
[
"MIT"
] |
permissive
|
dpritsos/html2vec
|
b3866f05e7e1c1cb61f40b8f038c1a05a89a9faa
|
be5629d6dc2665891472c5795c191286f0de31e7
|
refs/heads/master
| 2023-05-13T08:30:24.485797 | 2021-06-05T07:29:06 | 2021-06-05T07:29:06 | 1,896,404 | 8 | 0 | null | 2018-10-20T13:10:43 | 2011-06-14T19:54:52 |
Python
|
UTF-8
|
Python
| false | false | 3,689 |
py
|
#
# Module: Base File Handlers
#
# Author: Dimitiros Pritsos
#
# License: BSD Style
#
# Last update: Please refer to the GIT tracking
#
""" html2vect.base.io.basefilehandlers: submodule of `html2vect` module defines the class
BasePathHandler and BaseFileHandler """
import codecs
import os
def copyfile(source, dest):
""" copyfile(): Copy a file from source to dest path. """
source_f = open(source, 'rb')
dest_f = open(dest, 'wb')
while True:
copy_buffer = source_f.read(1024*1024)
if copy_buffer:
dest_f.write(copy_buffer)
else:
break
source_f.close()
dest_f.close()
def movefile(source, dest):
""" movefile(): A UNIX compatible function for moving file from Source path
to Destination path. The Source path Hard Link is deleted """
os.link(source, dest)
os.unlink(source)
def file_list_frmpaths(basepath, filepath_l):
if basepath is None:
basepath = ''
if isinstance(filepath_l, str):
flist = [files_n_paths[2] for files_n_paths in os.walk(basepath + filepath_l)]
flist = flist[0]
fname_lst = [basepath + filepath_l + fname for fname in flist]
elif isinstance(filepath_l, list):
fname_lst = list()
for filepath in filepath_l:
flist = [files_n_paths[2] for files_n_paths in os.walk(basepath + filepath)]
flist = flist[0]
fname_lst.extend([basepath + filepath + '/' + fname for fname in flist])
else:
raise Exception(
"A String or a list of Strings was Expected as input - Stings should be file-paths"
)
# For ease of usage the filename list should be returned sorted
fname_lst.sort()
return fname_lst
class BaseFileHandler(object):
def __init__(self):
self.filename_lst = []
self.file_count = None
def __iter__(self):
return self
def next(self):
if len(self.filename_lst) == self.file_count:
raise StopIteration
xhtml = self.__load_file(
self.filename_lst[self.file_count], self.encoding, self.error_handling
)
self.file_count += 1
return xhtml
def __load_file(self, filename, encoding='utf-8', error_handling='strict'):
""" """
try:
fenc = codecs.open(filename, 'rb', encoding, error_handling)
except Exception as e:
print("BaseFileHandler.__load_file() FILE %s ERROR: %s" % (filename, e))
return None
try:
fstr = fenc.read()
except Exception as e:
print("BaseFileHandler.__load_file() FILE %s ERROR: %s" % (filename, e))
return None
finally:
fenc.close()
return fstr
def load_files(self, filename_l, encoding='utf-8', error_handling='strict'):
""" """
if isinstance(filename_l, str):
return self.__load_file(filename_l, encoding, error_handling)
elif isinstance(filename_l, list):
self.filename_lst = filename_l
self.file_count = 0
self.encoding = encoding
self.error_handling = error_handling
return self.__iter__()
else:
raise Exception("A String or a list of Strings was Expected as input")
def load_frmpaths(self, basepath, filepath_l, encoding='utf-8', error_handling='strict'):
"""This function requires hight amount of memory!"""
fname_lst = self.file_list_frmpaths(basepath, filepath_l)
return [[fname, fstr] for fname, fstr in zip(
fname_lst, self.load_files(fname_lst, encoding, error_handling))]
|
[
"[email protected]"
] | |
f2b8c9a7622b4657969fb9800cd35901be8fe2e1
|
e83df449e6956d5af8e4b98d535a9daacbbff477
|
/main.py
|
0215ec4d5e158a0c5cdccee9fcaa8569fd2549a5
|
[] |
no_license
|
LefterisJP/race_analyzer
|
2f48edc34bb299f0d96e3a19a4f245b1b082f21d
|
08a5041817e227969775a42656c2bce2030ed69f
|
refs/heads/master
| 2020-03-28T22:40:01.078406 | 2018-09-18T07:47:53 | 2018-09-18T07:47:53 | 149,248,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 204 |
py
|
import click
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx, threads, keyfile, input_file, respect_word_order, **kwargs):
pass
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
ec86aee2863caad73625cec5b38ecb008e726e79
|
462c56e7454c97e0541588b9be66a4e216ea20fd
|
/399.evaluate-division.py
|
a3cfbdefe5a676c0fd3cfbca9f067f3686c034cf
|
[] |
no_license
|
LouisYLWang/leetcode_python
|
d5ac6289e33c5d027f248aa3e7dd66291354941c
|
2ecaeed38178819480388b5742bc2ea12009ae16
|
refs/heads/master
| 2020-05-27T08:38:48.532000 | 2019-12-28T07:08:57 | 2019-12-28T07:08:57 | 188,549,256 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,199 |
py
|
#
# @lc app=leetcode id=399 lang=python3
#
# [399] Evaluate Division
#
class Solution(object):
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
div_map = dict()
for [i,j], v in zip(equations, values):
if i in div_map:
div_map[i][j] = v
else: div_map[i] = {j:v}
if j in div_map:
div_map[j][i] = 1/v
else: div_map[j] = {i:1/v}
print(div_map)
def get_res(i, j, ans):
if i not in div_map or j not in div_map:
return -1.0
elif i == j:
return 1.0
else:
if j in div_map[i]:
return div_map[i][j]
else:
for k in div_map[i]:
# use visited to control repeating visit
visited.add(i)
if k not in visited:
temp = get_res(k, j, ans)
# do not mistakenly use if temp
if temp != -1:
return div_map[i][k] * temp
# notice: if not find anything, remember to return -1
return -1.0
# an alternative way of implementing get_res (more compact)
def get_res(i, j, ans):
if i not in div_map:
return -1.0
elif i == j:
return 1.0
for k in div_map[i]:
if j == k:
return div_map[i][j]
elif k not in visited:
visited.add(i)
temp = get_res(k, j, ans)
if temp != -1:
return div_map[i][k] * temp
return -1.0
res = list()
for query in queries:
visited = set()
res.append(get_res(query[0], query[1], 1))
return res
|
[
"[email protected]"
] | |
0e466f4ac716661f529c7dba7cacc70a9e2d454b
|
5b9b2ec5fb3142609882a3320c6e64c6b912395c
|
/LeetCode/mostWaterContainer.py
|
b38ef468717fd38297251aa67b4e445ea5621a0e
|
[] |
no_license
|
anildhaker/DailyCodingChallenge
|
459ba7ba968f4394fb633d6ba8b749c1e4cb7fb0
|
f1cfc52f156436dc7c0a6c43fa939cefac5cee36
|
refs/heads/master
| 2020-04-20T18:58:33.133225 | 2020-01-11T14:50:52 | 2020-01-11T14:50:52 | 169,036,874 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 642 |
py
|
# Given n non-negative integers a1, a2, ..., an , where each represents a point at
# coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line
# i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a
# container, such that the container contains the most water.
def maxArea(self, height: List[int]) -> int:
i = 0
j = len(height)-1
area = 0
while i < j :
area = max(area,(j-i)*min(height[i],height[j]))
if height[i] < height[j]:
i += 1
else:
j -= 1
return area
|
[
"[email protected]"
] | |
404a81bab69f0ff9408a716756755d82973ea033
|
c0f72a4c87794df5c4c239ddfc0392f7b9295d3f
|
/top/api/rest/TopatsTaskDeleteRequest.py
|
162b86495235cb30dd4a04ecdb6d98d9891e873b
|
[
"MIT"
] |
permissive
|
chenluzhong150394/taobao-top-python3_version
|
c37ec2093726212b49a84598becd183b9104bd99
|
61b262c46e48504754a9427986595bce0ae0e373
|
refs/heads/master
| 2020-11-27T16:28:46.526318 | 2020-06-29T14:32:16 | 2020-06-29T14:32:16 | 229,528,970 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
'''
Created by auto_sdk on 2013-06-03 16:32:57
'''
from top.api.base import RestApi
class TopatsTaskDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.task_id = None
def getapiname(self):
return 'taobao.topats.task.delete'
|
[
"[email protected]"
] | |
f901db4af23a8c26750f616948c92326dd175944
|
4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8
|
/python/decorator/6.py
|
9daaa357949d9124d267fde893e0bbd950f06d36
|
[
"MIT"
] |
permissive
|
gozeon/code-collections
|
464986c7765df5dca980ac5146b847416b750998
|
13f07176a6c7b6ac13586228cec4c1e2ed32cae4
|
refs/heads/master
| 2023-08-17T18:53:24.189958 | 2023-08-10T04:52:47 | 2023-08-10T04:52:47 | 99,432,793 | 1 | 0 |
NOASSERTION
| 2020-07-17T09:25:44 | 2017-08-05T15:56:53 |
JavaScript
|
UTF-8
|
Python
| false | false | 310 |
py
|
import logging
def user_logging(func):
def wrapper(*args, **kwargs):
logging.warn("%s is running" % func.__name__)
return func(*args, **kwargs)
return wrapper
@user_logging
def foo(name, age=None, height=None):
print('i am %s, age %s, height %s' % (name, age, height))
foo('haha', 12, 40)
|
[
"[email protected]"
] | |
edde83793cbbb6f5ecd213edbf7171025f7c5995
|
f603b0edb36f3578b99c49aea68c09acb222b5e2
|
/exercicios/Curso_Udemy_Python/sec3_aula58.py
|
6506bd25d840427c70745b94680fc8c9fb54c13b
|
[
"MIT"
] |
permissive
|
igobarros/maratona-data-science-brasil
|
260d8160a356dfdf5876cfef03a0aacc7f20340e
|
cc07476579134a2764f00d229d415657555dcdd1
|
refs/heads/master
| 2021-10-09T23:33:25.278361 | 2019-01-04T15:08:43 | 2019-01-04T15:08:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 292 |
py
|
class MyList(list):
def append(self, *args):
self.extend(args)
m = MyList()
m.append(0)
m.append(1,2,3,4,5,6)
print(m)
class MyList1(list):
def sort(self):
return 'eae vey? ta afim de ordenar?'
l = [4,1,78,34,4,9]
'''l.sort()
print(l)'''
lista = MyList1()
print(lista.sort())
|
[
"[email protected]"
] | |
ec46ebcaaa624f2ac7abf272df486a27cd2075fe
|
b25055503a8f0de13b4f7aece4f6cf1ba5c9d3ab
|
/tests/fixtures.py
|
03ad2db59c5325385cda821694184a7a51d8a6c9
|
[
"MIT"
] |
permissive
|
mkturkcan/autobahn-sync
|
a340eb9f32c331a9b4331f0a1701e18ef78e3d9e
|
2663520c032912c0769647de8fc5e47d9234cf07
|
refs/heads/master
| 2020-03-19T12:41:23.387271 | 2018-06-12T16:54:30 | 2018-06-12T16:54:30 | 136,533,456 | 0 | 0 | null | 2018-06-07T21:34:46 | 2018-06-07T21:34:46 | null |
UTF-8
|
Python
| false | false | 1,294 |
py
|
from os import path
from time import sleep
import subprocess
import pytest
from autobahn_sync import AutobahnSync, ConnectionRefusedError
CROSSBAR_CONF_DIR = path.abspath(path.dirname(__file__)) + '/.crossbar'
START_CROSSBAR = not pytest.config.getoption("--no-router")
@pytest.fixture(scope="module")
def crossbar(request):
if START_CROSSBAR:
# Start a wamp router
subprocess.Popen(["crossbar", "start", "--cbdir", CROSSBAR_CONF_DIR])
started = False
for _ in range(20):
sleep(0.5)
# Try to engage a wamp connection with crossbar to make sure it is started
try:
test_app = AutobahnSync()
test_app.run()
# test_app.session.disconnect() # TODO: fix me
except ConnectionRefusedError:
continue
else:
started = True
break
if not started:
raise RuntimeError("Couldn't connect to crossbar router")
def finalizer():
p = subprocess.Popen(["crossbar", "stop", "--cbdir", CROSSBAR_CONF_DIR])
p.wait()
if START_CROSSBAR:
request.addfinalizer(finalizer)
@pytest.fixture
def wamp(crossbar):
wamp = AutobahnSync()
wamp.run()
return wamp
@pytest.fixture
def wamp2(crossbar):
return wamp(crossbar)
|
[
"[email protected]"
] | |
80a876d02aa0d4d1c1a901b0311bd3e3900c7ef4
|
7623386df02a52145b174700621fa70973e81d0e
|
/shakecastaebm/validation/generate.py
|
7925da9b8f3a495b9c6fce24a65e5ca2affc39d0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
dslosky-usgs/shakecast-aebm
|
f641f6a3bac3d466fb4e0f02b4913e0b63fa5ecb
|
bec1ad970989a7121096123f0b3a84c20ed0a0cc
|
refs/heads/master
| 2021-06-24T07:02:27.539492 | 2018-08-08T20:51:08 | 2018-08-08T20:51:08 | 144,181,944 | 0 | 0 | null | 2018-08-09T17:09:24 | 2018-08-09T17:09:24 | null |
UTF-8
|
Python
| false | false | 1,409 |
py
|
import os
import sys
from . import shakecast
from . import workbook
from . import damping
from . import demand
if __name__ == '__main__':
pp_fig, capacity_fig, acc_diff_fig, disp_diff_fig = workbook.run()
cap_fig, haz_fig, dsf_fig, dem_fig, sc_pp_fig, impact_fig = shakecast.run()
damp1, damp2 = damping.run()
demand1, demand2 = demand.run()
if len(sys.argv) > 1:
path = sys.argv[1]
else:
path = '.'
if not os.path.exists(path):
os.makedirs(path)
# save workbook validation figures
pp_fig.savefig(os.path.join(path, 'perf_point1'))
capacity_fig.savefig(os.path.join(path, 'capacity_comp'))
acc_diff_fig.savefig(os.path.join(path, 'acc_diff'))
disp_diff_fig.savefig(os.path.join(path, 'disp_diff'))
# save shakecast figures
cap_fig.savefig(os.path.join(path, 'sc_capacity'))
haz_fig.savefig(os.path.join(path, 'sc_hazard'))
dsf_fig.savefig(os.path.join(path, 'sc_dsf'))
dem_fig.savefig(os.path.join(path, 'sc_demand'))
sc_pp_fig.savefig(os.path.join(path, 'perf_point2'))
impact_fig.savefig(os.path.join(path, 'impact_fig'))
# save damping figures
damp1.savefig(os.path.join(path, 'damping_beta'))
damp2.savefig(os.path.join(path, 'damping_dsf'))
# save demand figures
demand1.savefig(os.path.join(path, 'hazard_expansion'))
demand2.savefig(os.path.join(path, 'damped_demand'))
|
[
"[email protected]"
] | |
220dfaaeafb0194a281d372055511fb51b1ca888
|
f7f2e8af3e9b19840396ab5da36bfa161cf03484
|
/setup.py
|
3466011a2a76c26eb5542750376251fd57f946c5
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nevinoven/bcwallet
|
2a4713f24505978f681d6d398300c144834bfbf0
|
afaef09b3c3ac87de765cd9a915f98c046084b21
|
refs/heads/master
| 2021-01-15T23:50:56.911620 | 2015-12-08T18:29:22 | 2015-12-08T18:29:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 692 |
py
|
# https://youtu.be/kNke39OZ2k0?t=65
from setuptools import setup
setup(
name='bcwallet',
version='1.2.3',
description='Simple BIP32 HD cryptocurrecy command line wallet',
author='Michael Flaxman',
author_email='[email protected]',
url='https://github.com/blockcypher/bcwallet/',
py_modules=['bcwallet'],
install_requires=[
'clint==0.4.1',
'blockcypher==1.0.53',
'bitmerchant==0.1.8',
'tzlocal==1.2',
],
entry_points='''
[console_scripts]
bcwallet=bcwallet:invoke_cli
''',
packages=['bcwallet'],
)
|
[
"[email protected]"
] | |
164272c7c197a50b02def627df3852104c8d4b26
|
656341483ae8abe8792942d26556fdd4ff5ca7a9
|
/Case/AS/Http/DocPolicyMgnt/test_AddPolicyPwdStrength201.py
|
ce8cafe542826cd32d85e60e6ce32d22c57ae029
|
[] |
no_license
|
GWenPeng/Apitest_framework
|
b57ded9be4ec896d4ba8e02e9135bc7c73d90034
|
ab922c82c2454a3397ddbf4cd0771067734e1111
|
refs/heads/master
| 2022-11-26T05:54:47.168062 | 2020-08-06T01:45:12 | 2020-08-06T01:45:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,886 |
py
|
import pytest
import allure
import sys
sys.path.append("../../../../")
from Common.readjson import JsonRead
from DB_connect.mysqlconnect import DB_connect
from Common.http_request import Http_client
@pytest.mark.ASP_344
@pytest.mark.high
@allure.severity('blocker') # 优先级
@allure.feature("文档域策略管控")
class Test_AddPolicy_PwdStrengthCheck201(object):
@allure.testcase("ID5318,用例名:新增策略配置--密码强度,配置成功--返回201")
# 每条用例执行完成后执行,清除环境
@pytest.fixture(scope="function")
def teardown(self):
pass
yield
db = DB_connect()
db.delete("delete from t_policy_tpls")
@pytest.mark.parametrize("jsondata,checkpoint", argvalues=JsonRead(
"AS\\Http\\DocPolicyMgnt\\testdata\\test_AddPolicyPwdStrength201.json").dict_value_join())
def test_AddPolicy_PwdStrengthCheck201(self, jsondata,checkpoint,teardown):
# 新增策略
add_client = Http_client()
add_client.post(url="/api/document-domain-management/v1/policy-tpl",
jsondata=jsondata,
header="{\"Content-Type\":\"application/json\"}")
# 接口响应状态断言
assert add_client.status_code == checkpoint['status_code']
# 获取t_policy_tpls表中策略id
db = DB_connect()
query_result = db.select_one("select f_id from t_policy_tpls")
# sql查询结果为元组,获取元组第一个值,即策略id
global policyid
policyid = query_result[0]
# 拼接location预期值
location = "/api/document-domain-management/v1/policy-tpl/" + policyid
assert location == add_client.respheaders['Location']
assert add_client.elapsed <= 20.0
if __name__ == '__main__':
pytest.main(['-q', '-v', 'test_AddPolicyPwdStrength201.py'])
|
[
"[email protected]"
] | |
effe7dc25476101643ced680af1b5b329b9d4308
|
de27e6d143f40d5948244597b861d522a9a272f6
|
/fjord/heartbeat/migrations/0009_answer_country.py
|
f824482bc7c9c38b3c33cf27d7df7e8ff5aaac97
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mozilla/fjord
|
7f31af6dd80869ca856f8a02ff10e72c81685368
|
0fcb81e6a5edaf42c00c64faf001fc43b24e11c0
|
refs/heads/master
| 2023-07-03T18:20:01.651759 | 2017-01-10T20:12:33 | 2017-01-10T20:12:33 | 5,197,539 | 18 | 22 | null | 2016-08-22T14:56:11 | 2012-07-26T21:25:00 |
Python
|
UTF-8
|
Python
| false | false | 519 |
py
|
# -*- coding: utf-8 -*-
"""
Add country to heartbeat Answer table.
"""
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('heartbeat', '0008_auto_20150305_1442'),
]
operations = [
migrations.AddField(
model_name='answer',
name='country',
field=models.CharField(default='', max_length=4, null=True, blank=True),
preserve_default=True,
),
]
|
[
"[email protected]"
] | |
2ca452bcbb76a5940af2d37e15ccbd301ac908f9
|
46af8b5c7d1790ee9ddef636c7428eb5f23de5e5
|
/project/settings_local.py
|
d8b4e8f42916c8b50165d0ad17e924372de258a3
|
[] |
no_license
|
praekelt/speed-demo
|
f1370628ca9241ec5cb86ea76f6c615c1138fa9e
|
782c9d7263bed59a7d2ab9dc5d169a7a348a277e
|
refs/heads/master
| 2020-12-07T15:24:26.979572 | 2017-06-28T14:03:50 | 2017-06-28T14:03:50 | 95,519,936 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,601 |
py
|
import os
import raven
# Declare or redeclare variables here
FOOFOO = 1
# You should redefine the CACHE setting here
# Configure raven. Set "dsn" to None for your development environment. It must
# be None - anything else causes problems.
RAVEN_CONFIG = {
"dsn": None
# "dsn": "https://<key>:<secret>@sentry.io/<project>",
}
# Uncomment if you are doing performance profiling with Django Debug Toolbar
DEBUG_TOOLBAR_PANELS = [
#"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = ["127.0.0.1", "172.30.45.146"]
RESULTS_CACHE_SIZE = 20000
# If you need to access an existing variable your code must be in configure
def configure(**kwargs):
# Uncomment if you are doing performance profiling with Django Debug Toolbar
return {
"INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"],
"MIDDLEWARE_CLASSES": (
"debug_toolbar.middleware.DebugToolbarMiddleware",
) + kwargs["MIDDLEWARE_CLASSES"]
}
return {}
|
[
"[email protected]"
] | |
d08e6121ee2290536a5b41e02083249be2e73fcf
|
2ea17b7b5fe875821f05f2d148220cfe7082120f
|
/migrations/versions/59c170d304e0_.py
|
5c39be4beaf508e801753d04c3316590d69575ae
|
[] |
no_license
|
xilixjd/python_react_blog_back_end
|
b1c76759654847717846671906d9bd1a758cd8f7
|
6b88e8f9340d35988c948e7c9ca1dff74dcf75d6
|
refs/heads/master
| 2020-05-20T18:50:13.941816 | 2017-08-08T13:48:49 | 2017-08-08T13:48:49 | 88,735,190 | 19 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
"""empty message
Revision ID: 59c170d304e0
Revises: 390b63a723a6
Create Date: 2017-07-03 22:18:00.342518
"""
# revision identifiers, used by Alembic.
revision = '59c170d304e0'
down_revision = '390b63a723a6'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chess', sa.Column('chess_board', mysql.LONGTEXT(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('chess', 'chess_board')
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
48424c58d4841f72a346c4d91fa4d737bc3caba8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2648/60678/289889.py
|
2502a253d156988e60af2cf0b3448f5e525988df
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
stringM = input()
stringS = input()
if stringM == 'whatthemomooofun' and stringS == 'moo':
print('whatthefun', end="")
if stringM == 'whatthemomooofun' and stringS == 'o':
print('whatthemmfun', end="")
if stringM == 'whatthemmfunwhatthemomooofun' and stringS == 'o':
print('whatthemmfun', end="")
else:
print(stringM)
print(stringS)
|
[
"[email protected]"
] | |
7a273b69ade85c025a308827da97ee147e75a0af
|
b26f62e1ae52df9e34c4ce27dc0f617416518e23
|
/12-python-level-one/Part9_Functions_Exercises.py
|
fa8e0bc6622df3e7cfeea4082a548c026b1c314e
|
[] |
no_license
|
Rutrle/udemy-django
|
2ba5b39f69fc526c27d074818ff372c91f3b879b
|
53502d8d87f9da907771bc044538844cf18f6895
|
refs/heads/master
| 2023-04-17T13:05:20.539842 | 2021-05-03T23:25:51 | 2021-05-03T23:25:51 | 339,551,118 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,332 |
py
|
#####################################
#### PART 9: FUNCTION EXERCISES #####
#####################################
# Complete the tasks below by writing functions! Keep in mind, these can be
# really tough, its all about breaking the problem down into smaller, logical
# steps. If you get stuck, don't feel bad about having to peek to the solutions!
#####################
## -- PROBLEM 1 -- ##
#####################
# Given a list of integers, return True if the sequence of numbers 1, 2, 3
# appears in the list somewhere.
# For example:
# arrayCheck([1, 1, 2, 3, 1]) → True
# arrayCheck([1, 1, 2, 4, 1]) → False
# arrayCheck([1, 1, 2, 1, 2, 3]) → True
def arrayCheck_simple(nums):
return (1 in nums) and (2 in nums) and (3 in nums)
# CODE GOES HERE
def arrayCheck(nums):
for num in range(len(nums)-2):
if (nums[num]) == 1 and (nums[num+1]) == 2 and (nums[num+2]) == 3:
return True
return False
#####################
## -- PROBLEM 2 -- ##
#####################
# Given a string, return a new string made of every other character starting
# with the first, so "Hello" yields "Hlo".
# For example:
# stringBits('Hello') → 'Hlo'
# stringBits('Hi') → 'H'
# stringBits('Heeololeo') → 'Hello'
def stringBits(str_v):
return_str = ""
for i in range(0, len(str_v), 2):
return_str = return_str+str_v[i]
return return_str
print(stringBits('Heeololeo'))
print(stringBits('Hi'))
print(stringBits('Hello'))
#####################
## -- PROBLEM 3 -- ##
#####################
# Given two strings, return True if either of the strings appears at the very end
# of the other string, ignoring upper/lower case differences (in other words, the
# computation should not be "case sensitive").
#
# Note: s.lower() returns the lowercase version of a string.
#
# Examples:
#
# end_other('Hiabc', 'abc') → True
# end_other('AbC', 'HiaBc') → True
# end_other('abc', 'abXabc') → True
def end_other(a, b):
a = a.lower()
b = b.lower()
if len(a) < len(b):
a, b = b, a
for i in range(len(b)):
if a[-(len(b)-i)] != b[i]:
return False
return True
print(end_other('Hiabc', 'abc'), end_other(
'AbC', 'HiaBc'), end_other('abc', 'abXabc'))
#####################
## -- PROBLEM 4 -- ##
#####################
# Given a string, return a string where for every char in the original,
# there are two chars.
# doubleChar('The') → 'TThhee'
# doubleChar('AAbb') → 'AAAAbbbb'
# doubleChar('Hi-There') → 'HHii--TThheerree'
def doubleChar(old_str):
new_str = ""
for letter in old_str:
new_str = new_str+letter*2
return new_str
# CODE GOES HERE
print(doubleChar('The'), doubleChar('AAbb'), doubleChar('Hi-There'))
#####################
## -- PROBLEM 5 -- ##
#####################
# Read this problem statement carefully!
# Given 3 int values, a b c, return their sum. However, if any of the values is a
# teen -- in the range 13-19 inclusive -- then that value counts as 0, except 15
# and 16 do not count as a teens. Write a separate helper "def fix_teen(n):"that
# takes in an int value and returns that value fixed for the teen rule.
#
# In this way, you avoid repeating the teen code 3 times (i.e. "decomposition").
# Define the helper below and at the same indent level as the main no_teen_sum().
# Again, you will have two functions for this problem!
#
# Examples:
#
# no_teen_sum(1, 2, 3) → 6
# no_teen_sum(2, 13, 1) → 3
# no_teen_sum(2, 1, 14) → 3
def no_teen_sum(a, b, c):
return fix_teen(a)+fix_teen(b)+fix_teen(c)
def fix_teen(n):
teens = (list(range(13, 20)))
exceptions = [15, 16]
if n in teens and n not in exceptions:
return 0
else:
return n
print(no_teen_sum(1, 2, 3))
print(no_teen_sum(2, 13, 1))
print(no_teen_sum(2, 1, 14))
print(no_teen_sum(2, 1, 15))
#####################
## -- PROBLEM 6 -- ##
#####################
# Return the number of even integers in the given array.
#
# Examples:
#
# count_evens([2, 1, 2, 3, 4]) → 3
# count_evens([2, 2, 0]) → 3
# count_evens([1, 3, 5]) → 0
def count_evens(nums):
count = 0
for item in nums:
if item % 2 == 0:
count = count+1
return count
print(count_evens([2, 1, 2, 3, 4]),
count_evens([2, 2, 0]),
count_evens([1, 3, 5]))
|
[
"[email protected]"
] | |
15871357f103326ade70ff1294562689ef8c5375
|
38dc0477ba472146f4fabe109826198705144d03
|
/fastai/layer_optimizer.py
|
1791659fe6e1b601fc2ebaac8a96eab91c43d304
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
dana-kelley/DeOldify
|
ad54a3a44e4a8d90f00ef3d7ee20e56b14683f47
|
fa186f251b8a7dbc120d8a5901fdd0d065c60eec
|
refs/heads/master
| 2020-05-17T04:52:54.795176 | 2019-12-11T05:53:01 | 2019-12-11T05:53:01 | 183,519,547 | 68 | 10 |
MIT
| 2020-02-18T15:44:14 | 2019-04-25T22:41:00 |
Python
|
UTF-8
|
Python
| false | false | 3,313 |
py
|
from .imports import *
from .torch_imports import *
from .core import *
def opt_params(parm, lr, wd):
return {'params': chain_params(parm), 'lr':lr, 'weight_decay':wd}
class LayerOptimizer():
def __init__(self, opt_fn, layer_groups, lrs, wds=None):
if not isinstance(layer_groups, (list,tuple)): layer_groups=[layer_groups]
if not isinstance(lrs, Iterable): lrs=[lrs]
if len(lrs)==1: lrs=lrs*len(layer_groups)
if wds is None: wds=0.
if not isinstance(wds, Iterable): wds=[wds]
if len(wds)==1: wds=wds*len(layer_groups)
self.layer_groups,self.lrs,self.wds = layer_groups,lrs,wds
self.opt = opt_fn(self.opt_params())
def opt_params(self):
assert(len(self.layer_groups) == len(self.lrs))
assert(len(self.layer_groups) == len(self.wds))
params = list(zip(self.layer_groups,self.lrs,self.wds))
return [opt_params(*p) for p in params]
@property
def lr(self): return self.lrs[-1]
@property
def mom(self):
if 'betas' in self.opt.param_groups[0]:
return self.opt.param_groups[0]['betas'][0]
else:
return self.opt.param_groups[0]['momentum']
def set_lrs(self, lrs):
if not isinstance(lrs, Iterable): lrs=[lrs]
if len(lrs)==1: lrs=lrs*len(self.layer_groups)
set_lrs(self.opt, lrs)
self.lrs=lrs
def set_wds_out(self, wds):
if not isinstance(wds, Iterable): wds=[wds]
if len(wds)==1: wds=wds*len(self.layer_groups)
set_wds_out(self.opt, wds)
set_wds(self.opt, [0] * len(self.layer_groups))
self.wds=wds
def set_wds(self, wds):
if not isinstance(wds, Iterable): wds=[wds]
if len(wds)==1: wds=wds*len(self.layer_groups)
set_wds(self.opt, wds)
set_wds_out(self.opt, [0] * len(self.layer_groups))
self.wds=wds
def set_mom(self,momentum):
if 'betas' in self.opt.param_groups[0]:
for pg in self.opt.param_groups: pg['betas'] = (momentum, pg['betas'][1])
else:
for pg in self.opt.param_groups: pg['momentum'] = momentum
def set_beta(self,beta):
if 'betas' in self.opt.param_groups[0]:
for pg in self.opt.param_groups: pg['betas'] = (pg['betas'][0],beta)
elif 'alpha' in self.opt.param_groups[0]:
for pg in self.opt.param_groups: pg['alpha'] = beta
def set_opt_fn(self, opt_fn):
if type(self.opt) != type(opt_fn(self.opt_params())):
self.opt = opt_fn(self.opt_params())
def zip_strict_(l, r):
assert(len(l) == len(r))
return zip(l, r)
def set_lrs(opt, lrs):
if not isinstance(lrs, Iterable): lrs=[lrs]
if len(lrs)==1: lrs=lrs*len(opt.param_groups)
for pg,lr in zip_strict_(opt.param_groups,lrs): pg['lr'] = lr
def set_wds_out(opt, wds):
if not isinstance(wds, Iterable): wds=[wds]
if len(wds)==1: wds=wds*len(opt.param_groups)
assert(len(opt.param_groups) == len(wds))
for pg,wd in zip_strict_(opt.param_groups,wds): pg['wd'] = wd
def set_wds(opt, wds):
if not isinstance(wds, Iterable): wds=[wds]
if len(wds)==1: wds=wds*len(opt.param_groups)
assert(len(opt.param_groups) == len(wds))
for pg,wd in zip_strict_(opt.param_groups,wds): pg['weight_decay'] = wd
|
[
"[email protected]"
] | |
cac02f47d1ecfbce494b5b3cccba8632db18a064
|
802770deb5a98e8e644e9aaf5a6fabc851e6eae1
|
/quiz_test/migrations/0018_auto_20180704_1623.py
|
3d78f9b8dbeb5d59fae13e7a420f414c4ff58225
|
[] |
no_license
|
Subhash1998/quiz
|
1eaf7fe0338eee092f6a5af52d57718c61738930
|
da4c11c4f9271200c63970ab1f90c240f5a10598
|
refs/heads/master
| 2022-12-12T17:34:04.450562 | 2018-07-12T19:41:52 | 2018-07-12T19:41:52 | 140,757,317 | 0 | 0 | null | 2021-06-10T20:33:39 | 2018-07-12T19:39:52 |
Python
|
UTF-8
|
Python
| false | false | 802 |
py
|
# Generated by Django 2.0.5 on 2018-07-04 16:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz_test', '0017_category_test'),
]
operations = [
migrations.RenameField(
model_name='test',
old_name='question_amount',
new_name='amount',
),
migrations.RenameField(
model_name='test',
old_name='question_category',
new_name='category',
),
migrations.RenameField(
model_name='test',
old_name='question_level',
new_name='level',
),
migrations.RenameField(
model_name='test',
old_name='question_type',
new_name='q_type',
),
]
|
[
"[email protected]"
] | |
5f021c7f67037101485a78987bd462e9077c3f9a
|
45dd427ec7450d2fac6fe2454f54a130b509b634
|
/homework_3/preparation2.py
|
f45b9c53cbbbda4b2d028ec030b01ce3a6e5a699
|
[] |
no_license
|
weka511/smac
|
702fe183e3e73889ec663bc1d75bcac07ebb94b5
|
0b257092ff68058fda1d152d5ea8050feeab6fe2
|
refs/heads/master
| 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 561 |
py
|
import os, random
filename = 'disk_configuration.txt'
if os.path.isfile(filename):
f = open(filename, 'r')
L = []
for line in f:
a, b = line.split()
L.append([float(a), float(b)])
f.close()
print ('starting from file', filename)
else:
L = []
for k in range(3):
L.append([random.uniform(0.0, 1.0), random.uniform(0.0, 1.0)])
print ('starting from a new random configuration')
L[0][0] = 3.3
f = open(filename, 'w')
for a in L:
f.write(str(a[0]) + ' ' + str(a[1]) + '\n')
f.close()
|
[
"[email protected]"
] | |
dcf1b8da0e24589c36e224719499d07a0cf14ac6
|
ab11640874d7f7eb6c6c44ecadf0022368fd3d30
|
/ppm.py
|
0a2936220a56bda68cb0ba41af36762844c0711b
|
[] |
no_license
|
bsdphk/BSTJ_reformat
|
074d44d86cb0fccd25e47be5ffc2199c910640bf
|
9e72421ed110a582f67cd94727573da9b68c4ed2
|
refs/heads/master
| 2021-01-25T10:11:42.752665 | 2013-01-23T09:44:26 | 2013-01-23T09:44:26 | 7,771,692 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,280 |
py
|
from __future__ import print_function
import mmap
import os
import sys
class ppm(object):
def __init__(self, fn, a = "r", x = None, y = None):
assert a == "r" or a == "w"
if a == "w":
self.wr = True
assert type(x) == int
assert type(y) == int
assert x > 0
assert y > 0
else:
self.wr = False
if self.wr:
self.fi = open(fn, "w+b")
self.fi.truncate(0)
self.fi.truncate(19 + 3 * x * y)
self.m = mmap.mmap(self.fi.fileno(), 0 )
s = "P6\n%5d %5d\n%3d\n" % (x, y, 255)
self.m[0:len(s)] = s
self.m[len(s):] = str(bytearray((255,255,255)) * (x * y))
else:
self.fi = open(fn, "rb")
self.m = mmap.mmap(self.fi.fileno(),
0, prot=mmap.PROT_READ)
assert self.m[:2] == "P6"
o = 0
n = 0
while True:
x = self.m.find("\n", o, o + 100)
assert x >= -1
s = self.m[o:x]
o = x + 1
if s[0] == '#':
continue
if n == 0:
self.type = s
elif n == 1:
s = s.split()
self.x = int(s[0])
self.y = int(s[1])
elif n == 2:
self.d = int(s)
self.o = o
break
n += 1
self.xhis = None
self.yhis = None
self.fn = fn
def __repr__(self):
return "<P %dx%d %s>" % (self.x, self.y, self.fn)
def rdpx(self, x, y):
i = self.o + 3 * (y * self.x + x)
return bytearray(self.m[i:i+3])
def wrpx(self, x, y, r, g, b):
assert self.wr
if y >= self.y:
print("WRPX hi y", self.y, y)
return
if x >= self.x:
print("WRPX hi x", self.x, x)
return
i = self.o + 3 * (y * self.x + x)
self.m[i:i+3] = str(bytearray((r,g,b)))
def clone(self, fn):
o = ppm(fn, "w", self.x, self.y)
o.m[o.o:] = self.m[self.o:]
return o
def hist(self):
self.yhis = list()
lx = list([0] * (self.x * 3))
for y in range(0, self.y):
o = self.o + y * self.x * 3
w = self.x * 3
v = bytearray(self.m[o:o+w])
self.yhis.append(sum(v)/float(w))
#for i in range(len(v)):
# lx[i] += v[i]
self.xhis = list()
for x in range(0, self.x):
self.xhis.append(sum(lx[x * 3:x*3+3]) / (3 * self.y))
def put_rect(self, xlo, ylo, r):
for b in r:
o = self.o + ylo * self.x * 3 + xlo * 3
self.m[o:o+len(b)] = str(b)
ylo += 1
class rect(object):
def __init__(self, parent, xlo = 0, ylo = 0, xhi = None, yhi = None):
self.p= parent
self.xlo = xlo
self.ylo = ylo
if xhi == None:
xhi = parent.x
self.xhi = xhi
if yhi == None:
yhi = parent.y
self.yhi = yhi
self.typ = None
def set_typ(self, typ):
self.typ = typ
def outline(self, o, r, g, b):
for x in range(self.xlo, self.xhi - 1):
o.wrpx(x, self.ylo, r, g, b)
o.wrpx(x, self.ylo + 1, r, g, b)
o.wrpx(x, self.yhi - 2, r, g, b)
o.wrpx(x, self.yhi - 1, r, g, b)
for y in range(self.ylo, self.yhi - 1):
o.wrpx(self.xlo, y, r, g, b)
o.wrpx(self.xlo + 1, y, r, g, b)
o.wrpx(self.xhi - 2, y, r, g, b)
o.wrpx(self.xhi - 1, y, r, g, b)
def yavg(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = sum(bytearray(self.p.m[a0:a0 + w]))
a /= float(w)
l.append(a)
return l
def ymin(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = min(bytearray(self.p.m[a0:a0 + w]))
l.append(a)
return l
def ymax(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = max(bytearray(self.p.m[a0:a0 + w]))
l.append(a)
return l
def xmin(self):
w= (self.xhi - self.xlo)
l = [255] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] = min(l[i], b[i * 3])
return l
def xmax(self):
w= (self.xhi - self.xlo)
l = [0] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] = max(l[i], b[i * 3])
return l
def xavg(self):
w= (self.xhi - self.xlo)
l = [0] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] += b[i * 3]
for i in range(w):
l[i] /= float(self.yhi - self.ylo)
return l
def ydens(self, lo = 64, hi = 192):
w= (self.xhi - self.xlo)
h= (self.yhi - self.ylo)
dl = [0] * h
dh = [0] * h
for y in range(h):
a0 = self.p.o + (self.xlo + (self.ylo + y) * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
v = b[i]
if v < lo:
dl[y] += 1
elif v > hi:
dh[y] += 1
return dl, dh
def hist(self):
w= (self.xhi - self.xlo)
h= (self.yhi - self.ylo)
hh = [0] * 256
for y in range(h):
a0 = self.p.o + (self.xlo + (self.ylo + y) * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
v = b[i * 3]
hh[v] += 1
return hh
def __iter__(self):
w= (self.xhi - self.xlo)
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
yield bytearray(self.p.m[a0:a0 + w * 3])
def __repr__(self):
return "<R %dx%d+%d+%d>" % (
self.xhi - self.xlo,
self.yhi - self.ylo,
self.xlo, self.ylo
)
|
[
"[email protected]"
] | |
6e7cb657f766e088b1c0fb3cbe8754948b3991a6
|
c3175f2b482691fbfcb9adc391b4d45b6f17b09d
|
/PyOhio_2019/examples/pyscript_example.py
|
0b49ed87ee1a875552f07f3411e05bb70e6a9b23
|
[
"MIT"
] |
permissive
|
python-cmd2/talks
|
27abff4566c6545c00ad59c701831694224b4ccf
|
64547778e12d8a457812bd8034d3c9d74edff407
|
refs/heads/master
| 2023-08-28T20:45:01.123085 | 2021-03-29T20:44:36 | 2021-03-29T20:44:36 | 197,960,510 | 2 | 3 |
MIT
| 2022-01-21T20:03:37 | 2019-07-20T17:14:51 |
Python
|
UTF-8
|
Python
| false | false | 3,750 |
py
|
#!/usr/bin/env python
# coding=utf-8
"""A sample application for how Python scripting can provide conditional control flow of a cmd2 application"""
import os
import cmd2
from cmd2 import style
class CmdLineApp(cmd2.Cmd):
""" Example cmd2 application to showcase conditional control flow in Python scripting within cmd2 aps. """
def __init__(self):
# Enable the optional ipy command if IPython is installed by setting use_ipython=True
super().__init__(use_ipython=True)
self._set_prompt()
self.intro = 'Built-in Python scripting is a killer feature ...'
# Add cwd accessor to Python environment used by pyscripts
self.py_locals['cwd'] = self.cwd
def _set_prompt(self):
"""Set prompt so it displays the current working directory."""
self._cwd = os.getcwd()
self.prompt = style('{!r} $ '.format(self._cwd), fg='cyan')
def postcmd(self, stop: bool, line: str) -> bool:
"""Hook method executed just after a command dispatch is finished.
:param stop: if True, the command has indicated the application should exit
:param line: the command line text for this command
:return: if this is True, the application will exit after this command and the postloop() will run
"""
"""Override this so prompt always displays cwd."""
self._set_prompt()
return stop
def cwd(self):
"""Read-only property used by the pyscript to obtain cwd"""
return self._cwd
@cmd2.with_argument_list
def do_cd(self, arglist):
"""Change directory.
Usage:
cd <new_dir>
"""
# Expect 1 argument, the directory to change to
if not arglist or len(arglist) != 1:
self.perror("cd requires exactly 1 argument")
self.do_help('cd')
return
# Convert relative paths to absolute paths
path = os.path.abspath(os.path.expanduser(arglist[0]))
# Make sure the directory exists, is a directory, and we have read access
out = ''
err = None
data = None
if not os.path.isdir(path):
err = '{!r} is not a directory'.format(path)
elif not os.access(path, os.R_OK):
err = 'You do not have read access to {!r}'.format(path)
else:
try:
os.chdir(path)
except Exception as ex:
err = '{}'.format(ex)
else:
out = 'Successfully changed directory to {!r}\n'.format(path)
self.stdout.write(out)
data = path
if err:
self.perror(err)
self.last_result = data
# Enable tab completion for cd command
def complete_cd(self, text, line, begidx, endidx):
return self.path_complete(text, line, begidx, endidx, path_filter=os.path.isdir)
dir_parser = cmd2.Cmd2ArgumentParser()
dir_parser.add_argument('-l', '--long', action='store_true', help="display in long format with one item per line")
@cmd2.with_argparser_and_unknown_args(dir_parser)
def do_dir(self, args, unknown):
"""List contents of current directory."""
# No arguments for this command
if unknown:
self.perror("dir does not take any positional arguments:")
self.do_help('dir')
return
# Get the contents as a list
contents = os.listdir(self._cwd)
fmt = '{} '
if args.long:
fmt = '{}\n'
for f in contents:
self.stdout.write(fmt.format(f))
self.stdout.write('\n')
self.last_result = contents
if __name__ == '__main__':
import sys
c = CmdLineApp()
sys.exit(c.cmdloop())
|
[
"[email protected]"
] | |
ac3e9c697e353b693c7f8c8310a98068050b8172
|
b25485391a8a2007c31cd98555855b517cc68a64
|
/examples/src/dbnd_examples/tutorial_syntax/T60_task_that_calls_other_tasks.py
|
4e531bd99f3753db413843c5526d5528de64f9e8
|
[
"Apache-2.0"
] |
permissive
|
ipattarapong/dbnd
|
5a2bcbf1752bf8f38ad83e83401226967fee1aa6
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
refs/heads/master
| 2022-12-14T06:45:40.347424 | 2020-09-17T18:12:08 | 2020-09-17T18:12:08 | 299,219,747 | 0 | 0 |
Apache-2.0
| 2020-09-28T07:07:42 | 2020-09-28T07:07:41 | null |
UTF-8
|
Python
| false | false | 536 |
py
|
import pandas as pd
from dbnd import task
@task
def func_return_df():
return pd.DataFrame(data=[[3, 1]], columns=["c1", "c2"])
@task
def func_gets(df):
return df
@task
def func_pipeline(p: int):
df = pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"])
d1 = func_gets(df)
d2 = func_gets(d1)
return d2
@task
def func_pipeline2(p: int):
df = func_return_df()
d1 = func_gets(df)
return d1
if __name__ == "__main__":
import os
os.environ["DBND__TRACKING"] = "true"
func_pipeline2(4)
|
[
"[email protected]"
] | |
1f98e74eef835ca6a17c0f6a2081205ba2b18a15
|
c41069e0cb4105c4092853e60de6bf116b332e70
|
/resaspy/__init__.py
|
a5fdbf6faf8e137b9e35b31325e2ee25d4a95bd9
|
[
"MIT"
] |
permissive
|
ar90n/resaspy
|
5a4e7789dc24f412e1f1f929fb491f349abe90f1
|
58d140ad1e61478ab8f3993676bd0c97ad43ae18
|
refs/heads/master
| 2021-01-11T10:13:13.712176 | 2017-05-10T14:17:57 | 2017-05-10T14:17:57 | 78,554,641 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 510 |
py
|
# -*- coding: utf-8 -*-
"""
resaspy is a simple utility for RESAS api(https://opendata.resas-portal.go.jp).
usage:
>>> from resaspy import Resaspy
>>> resas = Resaspy( key )
>>> r = resas.prefectures()
>>> r.result
:copyright: (c) 2016 by Masahiro Wada.
:license: MIT, see LICENSE for more details.
"""
__title__ = 'resaspy'
__version__ = '0.2.1'
__build__ = 0x021204
__author__ = 'Masahiro Wada'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Masahiro Wada'
from .resaspy import Resaspy
|
[
"[email protected]"
] | |
5376359526eb1ac0de52a283b309692922b54864
|
74a01e6a22fe7c6b552e2ffb9f92d9671c54aa20
|
/bpb/parser/pdf.py
|
fb7471eb62cbce5bdbd4260bce0c4ba579fa4d16
|
[] |
no_license
|
snagwuk/blog_post_bot_cli
|
549805ba988c3753185111575ba759566c7ea17f
|
29e6c6e9e7c48e5ad7c9b4dda26e56226c683290
|
refs/heads/master
| 2022-03-27T01:05:44.441712 | 2020-01-05T01:00:54 | 2020-01-05T01:00:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
# modules for
import PyPDF2
import pprint
# pdf file object
# you can find find the pdf file with complete code in below
pdfFileObj = open('../data/test.pdf', 'rb')
# pdf reader object
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# number of pages in pdf
print(pdfReader.numPages)
# a page object
pageObj = pdfReader.getPage(0)
# extracting text from page.
# this will print the text you can also save that into String
pprint.pprint(pageObj)
|
[
"[email protected]"
] | |
69e64077be97c782e455563333f9f0aaafc67fca
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-ims/huaweicloudsdkims/v2/model/list_image_tags_response.py
|
76a1f6343bbb44bb9fa8a53ef623e27886720b43
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,811 |
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListImageTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[ResourceTag]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""ListImageTagsResponse - a model defined in huaweicloud sdk"""
super(ListImageTagsResponse, self).__init__()
self._tags = None
self.discriminator = None
if tags is not None:
self.tags = tags
@property
def tags(self):
"""Gets the tags of this ListImageTagsResponse.
标签列表
:return: The tags of this ListImageTagsResponse.
:rtype: list[ResourceTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListImageTagsResponse.
标签列表
:param tags: The tags of this ListImageTagsResponse.
:type: list[ResourceTag]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListImageTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
6d93b0cd78292a61ae919edfa5a15e96fa5f6f6a
|
c9697437c292df7fefd68559fdd9636066bdb2f1
|
/dev/potentials/sinc_pulse_from_number_of_cycles.py
|
0b1d64c026d7f66d3afbc925237681fad25c3cd4
|
[] |
no_license
|
JoshKarpel/ionization
|
ebdb387483a9bc3fdb52818ab8e897e562ffcc67
|
3056df523ee90147d262b0e8bfaaef6f2678ea11
|
refs/heads/master
| 2021-03-24T13:03:57.469388 | 2020-04-06T03:37:04 | 2020-04-06T03:37:04 | 62,348,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,601 |
py
|
#!/usr/bin/env python
import logging
import os
import numpy as np
import simulacra as si
import simulacra.units as u
FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
OUT_DIR = os.path.join(os.getcwd(), "out", FILE_NAME)
LOGMAN = si.utils.LogManager("simulacra", "ionization", stdout_level=logging.DEBUG)
PLOT_KWARGS = dict(target_dir=OUT_DIR, img_format="png", fig_dpi_scale=6)
if __name__ == "__main__":
with LOGMAN as logger:
number_of_cycles = [0.51, 1, 2, 3]
nc_pulses = [
(
nc,
ion.potentials.SincPulse.from_number_of_cycles(
pulse_width=100 * u.asec, number_of_cycles=nc, phase=u.pi / 2
),
)
for nc in number_of_cycles
]
# note that you actually get twice as many carrier cycles as you specify in the "center"
# because the center of the sinc is twice as wide as a pulse width (it's double-sided)
tb = 1
for nc, pulse in nc_pulses:
print(pulse.info())
times = np.linspace(-tb * pulse.pulse_width, tb * pulse.pulse_width, 10000)
si.vis.xy_plot(
f"Nc={nc}",
times,
pulse.amplitude * np.cos((pulse.omega_carrier * times) + pulse.phase),
pulse.get_electric_field_amplitude(times),
line_labels=["carrier", "pulse"],
line_kwargs=[{"linestyle": "--"}, None],
x_unit=pulse.pulse_width,
y_unit=pulse.amplitude,
**PLOT_KWARGS,
)
|
[
"[email protected]"
] | |
151392182417b31d3dd7cb2a6d0fcfa253fee301
|
436177bf038f9941f67e351796668700ffd1cef2
|
/venv/Lib/site-packages/sklearn/linear_model/__init__.py
|
796b13e6c63d51def5a559c6a79836627fc25551
|
[] |
no_license
|
python019/matplotlib_simple
|
4359d35f174cd2946d96da4d086026661c3d1f9c
|
32e9a8e773f9423153d73811f69822f9567e6de4
|
refs/heads/main
| 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,952 |
py
|
"""
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
|
[
"[email protected]"
] | |
ccbb02c3cf0ac4b9e9da7e4142bf9b2deecd73fd
|
c7a932e28a1a1dbc70c05c62caa43ce6cb691686
|
/fabric/service/monitor/promethues/prometheus.py
|
13d3ebd36fcfa08a10fc933ae20e580484cc043f
|
[] |
no_license
|
Martians/deploy
|
9c2c9a9b0e4431e965960aada0f40df6a34b2e09
|
6fd3f892edd7a12aa69d92f357cc52932df86d9c
|
refs/heads/master
| 2022-01-09T03:29:13.948962 | 2019-04-29T05:15:40 | 2019-04-29T05:15:40 | 112,311,997 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,554 |
py
|
# coding=utf-8
from invoke import task
from common import *
import system
class LocalConfig(LocalBase):
""" 默认配置
"""
def __init__(self):
LocalBase.__init__(self, 'prometheus')
self.source = 'https://github.com/prometheus/prometheus/releases/download/v2.6.0/prometheus-2.6.0.linux-amd64.tar.gz'
self.config = 'prometheus.yml'
self.node_source = 'https://github.com/prometheus/node_exporter/releases/download/v0.17.0/node_exporter-0.17.0.linux-amd64.tar.gz'
self.node_name = 'node_exporter'
self.node_port = 9100
self.node_config = 'node.yaml'
self.client_config = 'client.yaml'
self.alert = 'https://github.com/prometheus/alertmanager/releases/download/v0.16.0-beta.0/alertmanager-0.16.0-beta.0.linux-amd64.tar.gz'
""" 提供个默认参数
该变量定义在头部,这样在函数的默认参数中,也可以使用了
"""
local = LocalConfig()
""" fab install-server
fab install-node
fab start-server
fab start-node
"""
@task
def install_server(c):
c = hosts.one()
download(c, local.name, source=local.source)
""" 安装包下载后,到master上进行解压
"""
scp(c, hosts.get(0), package(), dest=local.temp)
unpack(conn(0), local.name, path=package(local.temp))
config_server(conn(0))
def config_server(c):
sed.path(os.path.join(local.base, local.config))
""" 配置文件
"""
file_sd_node = """
- job_name: 'node'
file_sd_configs:
- files:
- '{node}'""".format(node=local.node_config)
file_sd_client = """
- job_name: 'client'
scrape_interval: 1s
file_sd_configs:
- files:
- '{client}'""".format(client=local.client_config)
sed.append(c, file_sd_node)
sed.append(c, file_sd_client)
sed.append(c, ' - "*_rules.yml"', 'rule_files:')
""" file service discovery
"""
with c.cd(local.base):
c.run("""echo '
- labels:
type: 'node'
targets:' > {node}""".format(node=local.node_config))
c.run("""echo '
- labels:
type: 'client'
targets:' > {client}""".format(client=local.client_config))
@task
def help(c):
c = conn(c, True)
system.help(c, '''
monitor node: {base}/{node}
monitor client: {base}/{client}
monitor rules; {base}/*_rules.yaml\n'''.format(base=local.base, node=local.node_config, client=local.client_config), 'config')
@task
def install_node(c):
c = hosts.one()
download(c, local.node_name, source=local.node_source)
copy_pack(c, dest=local.temp)
hosts.execute('sudo rm -rf /opt/*{}*'.format(local.node_name))
for index in hosts.lists():
unpack(hosts.conn(index), local.node_name, path=package(local.temp))
config_server_node(c)
def config_server_node(c):
c = hosts.conn(0)
append = ''
for host in hosts.lists(index=False):
append += " - '{}:{}'\n".format(host.host, local.node_port)
sed.path(os.path.join(local.base, local.node_config))
sed.append(c, append)
@task
def start_server(c):
c = hosts.conn(0)
c.run(system.nohup('cd {}; nohup ./prometheus --config.file={}'
.format(local.base, local.config), nohup=''), pty=True)
@task
def stop_server(c):
c = hosts.conn(0)
c.run('{}'.format(system.kills('prometheus', string=True)))
@task
def start_node(c):
system.start(local.node_name, system.nohup('cd {}; nohup ./node_exporter --web.listen-address=":{}"'
.format(base(local.node_name), local.node_port), nohup=''), pty=True)
@task
def stop_node(c):
system.stop(local.node_name)
@task
def clean(c):
stop_server(c)
stop_node(c)
system.clean('/opt/{}, /opt/{}'.format(local.name, local.node_name))
@task
def install_alert(c):
pass
# hosts.execute('sudo rm -rf /opt/*kafka*')
#
# for index in hosts.lists():
# unpack(hosts.conn(index), local.name, path=package(local.temp))
@task
def help(c):
c = conn(c, True)
system.help(c, '''
http://192.168.0.81:9090
fab install-server
fab start-server
node:
http://192.168.0.81:9100
fab install-node
fab start-node
''', 'server')
# install_server(conn(0))
# install_node(conn(0))
# start_server(conn(0))
# stop(conn(0))
# clean(conn(0))
# start_node(conn(0))
|
[
"[email protected]"
] | |
797987fe548a6f7c7c46884932412b3e90e8bc1a
|
119437adb7830659307c18b79a9cc3f6bfc6fe40
|
/onnx_model_serving/onnx_model_predict.py
|
95a0f36ce1f8192ebe4a598455dc1cc4eb833cee
|
[] |
no_license
|
percent4/PyTorch_Learning
|
478bec35422cdc66bf41b4258e29fbcb6d24f60c
|
24184d49032c9c9a68142aff89dabe33adc17b52
|
refs/heads/master
| 2023-03-31T03:01:19.372830 | 2023-03-17T17:02:39 | 2023-03-17T17:02:39 | 171,400,828 | 16 | 7 | null | 2023-09-02T08:53:26 | 2019-02-19T03:47:41 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 730 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/2/3 20:09
# @Author : Jclian91
# @File : onnx_model_predict.py
# @Place : Yangpu, Shanghai
import onnxruntime
import torch
import numpy as np
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
ort_session = onnxruntime.InferenceSession("iris.onnx")
# compute ONNX Runtime output prediction
x = torch.Tensor([[6.4, 2.8, 5.6, 2.1]])
print("input size: ", to_numpy(x).shape)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
print(ort_outs[0])
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
|
[
"[email protected]"
] | |
c0bc193c0ca45d24c0490317457e0038ba7a2b66
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/0701-0800/0701-Insert into a Binary Search Tree/0701-Insert into a Binary Search Tree.py
|
fe9dea06abb24db8df133f5a1db2ab1c7bbf15c4
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 |
MIT
| 2020-10-02T12:47:47 | 2017-08-08T05:57:26 |
C++
|
UTF-8
|
Python
| false | false | 617 |
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
inode = TreeNode(val)
if root is None:
return inode
node = root
while node:
prev = node
if node.val < val:
node = node.right
else:
node = node.left
if prev.val < val:
prev.right = inode
else:
prev.left = inode
return root
|
[
"[email protected]"
] | |
c0a4b1ecee5eb7705fb4d6c81545e651d56f3071
|
d36c4c882089b9b81e6e3b6323eeb9c43f5160a9
|
/7KYU/Square Area Inside Circle/solution.py
|
dead9b201402be6e5751806d9e7f0d05e24b1f5d
|
[] |
no_license
|
stuartstein777/CodeWars
|
a6fdc2fa6c4fcf209986e939698d8075345dd16f
|
d8b449a16c04a9b883c4b5e272cc90a4e6d8a2e6
|
refs/heads/master
| 2023-08-27T20:32:49.018950 | 2023-08-24T23:23:29 | 2023-08-24T23:23:29 | 233,281,814 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
import math
def square_area_to_circle(size):
radius = math.sqrt(size) / 2
return round((math.pi * (radius * radius)), 8)
|
[
"[email protected]"
] | |
ef14e05b00b14f120326d7133682265e3176e41e
|
93a613f09d564a1d45ecc01b54b73745ce2850b7
|
/majora2/migrations/0023_biosampleartifact_secondary_accession.py
|
0d98165508518f2dfdfd9b53251418ed78c4a31c
|
[] |
no_license
|
pythseq/majora
|
fa17c77fa8a916c688fd2b40744d768dd851b99b
|
40b918d32b4061cddee5f7279f97e70eb894623d
|
refs/heads/master
| 2022-12-23T20:09:41.233844 | 2020-09-28T18:18:42 | 2020-09-28T18:18:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
# Generated by Django 2.2.10 on 2020-03-22 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('majora2', '0022_auto_20200322_1616'),
]
operations = [
migrations.AddField(
model_name='biosampleartifact',
name='secondary_accession',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
|
[
"[email protected]"
] | |
a4192251a1f0165bc9861caa80f4688fd57d879e
|
3b81dfbacf97918d36fb5accbcef0b610378e1a8
|
/python-basic/item/shoot/02-老蒋开枪设计类,创建对象.py
|
141feaad46a116d7abd5069ed6c48ff39f865cf1
|
[] |
no_license
|
XiaoFei-97/the-way-to-python
|
11706f0845f56246ba8ea0df8ff34e622bbdad2d
|
3667a24f4f4238998e9c6ed42cdc49c68881a529
|
refs/heads/master
| 2020-03-21T06:46:36.939073 | 2018-06-23T03:51:11 | 2018-06-23T03:51:11 | 138,241,410 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
class Person(object):
"""人的类"""
def __init__(self,name):
super(Person,self).__init__()
self.name = name
class Gun(object):
"""枪的类"""
def __init__(self,name):
super(Gun,self).__init__()
self.name = name #用来记录枪的类型
class Danjia(object):
"""弹夹的类"""
def __init__(self,max_num):
super(Gun,self).__init__()
self.max_num = max_num #用来录弹夹的容量
class Zidan(object):
"""子弹的类"""
def __init__(self,shanghai):
super(Zidan,self).__init__()
self.shanghai = shanghai #用来记录子弹的杀伤力
def main():
'''用来控制整个程序的流程'''
pass
#1.创建老蒋对象
laojiang = Person("老蒋")
#2.创建一个敌人
#3.创建子弹对象
zidan = Zidan(20)
#4.创建弹夹对象
danjia = Danjia(30)
#5.创建枪的对象
ak47 = Gun("AK47")
#6.把子弹装到弹夹中
#7.把弹夹装到枪中
#8.老蒋拿起枪
#9.老蒋开枪杀敌人
if __name__="__main__":
main()
|
[
"[email protected]"
] | |
93fcf60be9475d9cd490935255c7a9803947da13
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/coghq/DistributedTriggerAI.py
|
374f8cd57b81b637e18ee7e8befda3be1dea203f
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null |
UTF-8
|
Python
| false | false | 422 |
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
import DistributedSwitchAI
class DistributedTriggerAI(DistributedSwitchAI.DistributedSwitchAI):
"""
DistributedTriggerAI class: The server side representation
of a Cog HQ trigger. This is the object that remembers what the
trigger is doing. The DistributedTrigger, is the client side
version.
"""
pass
|
[
"[email protected]"
] | |
94d19d1919340743e72d4ebb192343c2b15a4bb0
|
ecb7156e958d10ceb57c66406fb37e59c96c7adf
|
/Leetcode Exercise/Leetcode234_Palindrome Linked List/mySolution.py
|
dbf19308c870dfebb7d2d431d79233914dcedce8
|
[] |
no_license
|
chenshanghao/RestartJobHunting
|
b53141be1cfb8713ae7f65f02428cbe51ea741db
|
25e5e7be2d584faaf26242f4f6d6328f0a6dc4d4
|
refs/heads/master
| 2020-07-27T17:39:58.756787 | 2019-10-18T06:27:27 | 2019-10-18T06:27:27 | 209,175,165 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head or not head.next:
return True
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow = slow.next
slow = self.reserveLinkedList(slow)
while slow:
if slow.val != head.val:
return False
slow = slow.next
head = head.next
return True
def reserveLinkedList(self, head):
if not head or not head.next:
return head
dummy = ListNode(-1)
while(head):
tmp = head
head = head.next
tmp.next = dummy.next
dummy.next = tmp
return dummy.next
|
[
"[email protected]"
] | |
7fc045062d1d679bc74cc6bd4c75f09c7eccaacd
|
d4eec8dafdf95084189316dfbc774d0b6ae21463
|
/bcs-app/backend/apps/configuration/yaml_mode/views.py
|
b834138be79fee180860850707b420bcdb547d9f
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
dd-guo/bk-bcs-saas
|
8b9411a22cee9c7982595ff4860720e603dbfaa9
|
45d69d9a72039fbb4f05638785af7dcbc1c075e4
|
refs/heads/master
| 2020-12-01T04:03:22.626481 | 2019-12-27T06:10:51 | 2019-12-27T06:10:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,828 |
py
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import json
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.renderers import BrowsableAPIRenderer
from . import serializers, init_tpls
from .deployer import DeployController
from .release import ReleaseData, ReleaseDataProcessor
from backend.apps.datalog.utils import create_data_project, create_and_start_standard_data_flow
from backend.apps.configuration.mixins import TemplatePermission
from backend.apps.configuration.models import get_template_by_project_and_id
from backend.apps.configuration.showversion.serializers import GetShowVersionSLZ, GetLatestShowVersionSLZ
from backend.components import paas_cc
from backend.utils.error_codes import error_codes
from backend.utils.renderers import BKAPIRenderer
class InitialTemplatesViewSet(viewsets.ViewSet):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def get_initial_templates(self, request, project_id):
return Response(init_tpls.get_initial_templates())
class YamlTemplateViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _template_data(self, request, **kwargs):
template_data = request.data or {}
template_data.update(**kwargs)
return template_data
def create_template(self, request, project_id):
"""
request.data = {
'name': '',
'desc': '',
'show_version': {
'name': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'create'}]
}]
}
"""
data = self._template_data(request, project_id=project_id)
serializer = serializers.CreateTemplateSLZ(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
return Response({'template_id': template.id})
def update_template(self, request, project_id, template_id):
"""
request.data = {
'name': '',
'desc': '',
'show_version': {
'name': '',
'show_version_id': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'update', 'id': 3}]
}]
}
"""
template = get_template_by_project_and_id(project_id, template_id)
data = self._template_data(request, project_id=project_id)
serializer = serializers.UpdateTemplateSLZ(template, data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
return Response({'template_id': template.id})
def get_template_by_show_version(self, request, project_id, template_id, show_version_id):
serializer = GetShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['template']
self.can_view_template(request, template)
with_file_content = request.query_params.get('with_file_content')
with_file_content = False if with_file_content == 'false' else True
serializer = serializers.GetTemplateFilesSLZ(
validated_data, context={'with_file_content': with_file_content}
)
return Response(serializer.data)
def get_template(self, request, project_id, template_id):
serializer = GetLatestShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['template']
self.can_view_template(request, template)
serializer = serializers.GetTemplateFilesSLZ(
validated_data, context={'with_file_content': True}
)
return Response(serializer.data)
class TemplateReleaseViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _request_data(self, request, project_id, template_id, show_version_id):
request_data = request.data or {}
show_version = {
'show_version_id': show_version_id,
'template_id': template_id,
'project_id': project_id
}
request_data['show_version'] = show_version
return request_data
# TODO use resources module function
def _get_namespace_info(self, access_token, project_id, namespace_id):
resp = paas_cc.get_namespace(access_token, project_id, namespace_id)
if resp.get('code') != 0:
raise error_codes.APIError(f"get namespace(id:{namespace_id}) info error: {resp.get('message')}")
return resp.get('data')
def _raw_release_data(self, project_id, initial_data):
show_version = initial_data['show_version']
namespace_info = self._get_namespace_info(
self.request.user.token.access_token, project_id, initial_data['namespace_id']
)
raw_release_data = ReleaseData(
project_id=project_id,
namespace_info=namespace_info,
show_version=show_version['show_version'],
template_files=initial_data['template_files']
)
return raw_release_data
def preview_or_apply(self, request, project_id, template_id, show_version_id):
"""
request.data = {
'is_preview': True,
'namespace_id': 'test',
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'id': 3}]
}]
}
"""
data = self._request_data(request, project_id, template_id, show_version_id)
serializer = serializers.TemplateReleaseSLZ(data=data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['show_version']['template']
self.can_use_template(request, template)
# 在数据平台创建项目信息
username = request.user.username
cc_app_id = request.project.cc_app_id
english_name = request.project.english_name
create_data_project(username, project_id, cc_app_id, english_name)
# 创建/启动标准日志采集任务
create_and_start_standard_data_flow(username, project_id, cc_app_id)
processor = ReleaseDataProcessor(
user=self.request.user, raw_release_data=self._raw_release_data(project_id, validated_data)
)
release_data = processor.release_data()
if validated_data['is_preview']:
return Response(release_data.template_files)
controller = DeployController(
user=self.request.user,
release_data=release_data
)
controller.apply()
return Response()
|
[
"[email protected]"
] | |
00fa4d011176e57511ded5ed70adff09c00870ef
|
162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d
|
/examples/linear_model/plot_ard.py
|
d372542275a23bab2e67a592ff0f450684f6bdcd
|
[] |
no_license
|
testsleeekGithub/trex
|
2af21fa95f9372f153dbe91941a93937480f4e2f
|
9d27a9b44d814ede3996a37365d63814214260ae
|
refs/heads/master
| 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,909 |
py
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from mrex.linear_model import ARDRegression, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], np.full(len(relevant_features), 5.),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
|
[
"[email protected]"
] | |
46f1492e0079cbd9e43a52216150bcb80318ccfe
|
2feaddc19de5490a1b55af08079d7e1d866f4c2d
|
/test/includes/common.py
|
b4fad481873ade2896fdf63bd350d326132c9932
|
[
"BSD-3-Clause"
] |
permissive
|
drakkar-lig/walt-python-packages
|
4beba93394da306550a54313800bb455b8652e81
|
2e487767c697aded22ba3e08b26964b45e154559
|
refs/heads/master
| 2023-09-04T10:53:48.768130 | 2023-09-01T08:05:11 | 2023-09-01T08:05:11 | 24,328,535 | 6 | 3 |
BSD-3-Clause
| 2023-09-01T08:12:36 | 2014-09-22T12:56:10 |
Python
|
UTF-8
|
Python
| false | false | 1,764 |
py
|
import os
import sys
from pathlib import Path
TEST_IMAGE_URL = "hub:eduble/pc-x86-64-test-suite"
def test_suite_image():
p = Path("/tmp/test_suite_image")
if not p.exists():
p.write_text(f"pc-x86-64-test-suite-{os.getpid()}\n")
return p.read_text().strip()
def test_suite_node():
p = Path("/tmp/test_suite_node")
if not p.exists():
p.write_text(f"testnode-{os.getpid()}\n")
return p.read_text().strip()
def test_create_vnode():
node_name = test_suite_node()
from walt.client import api
node = api.nodes.create_vnode(node_name)
assert node.name == node_name
assert node_name in api.nodes.get_nodes()
return node
TEST_CONTEXT = {}
def set_py_test_mode(mode, num_test=0):
TEST_CONTEXT["mode"] = mode
TEST_CONTEXT["num_test"] = int(num_test)
def define_test(s):
if TEST_CONTEXT["mode"] == "describe":
print(TEST_CONTEXT["num_test"], s)
TEST_CONTEXT["num_test"] += 1
def decorate(f):
pass
elif TEST_CONTEXT["mode"] == "run":
if TEST_CONTEXT["num_test"] == 0:
def decorate(f):
f()
else:
def decorate(f):
pass
TEST_CONTEXT["num_test"] -= 1
return decorate
def skip_test(reason):
skip_notify_file = Path(os.environ["TESTSUITE_TMP_DIR"]) / "skipped"
skip_notify_file.write_text(reason)
sys.exit(1)
def get_first_items(item_set, n_items, item_label):
it = iter(item_set)
result = []
try:
for _ in range(n_items):
result.append(next(it))
except StopIteration:
skip_test(f"requires at least two {item_label}s")
if n_items == 1:
return result[0]
else:
return tuple(result)
|
[
"[email protected]"
] | |
000c1ebab7161995ba2a7f947ebcf545cd414d7d
|
6b5431368cb046167d71c1f865506b8175127400
|
/challenges/estimando-o-valor-de-pi-1/tests.py
|
620927474107e9a20a9a8627a9b42bd69d3f8c26
|
[] |
no_license
|
Insper/design-de-software-exercicios
|
e142f4824a57c80f063d617ace0caa0be746521e
|
3b77f0fb1bc3d76bb99ea318ac6a5a423df2d310
|
refs/heads/master
| 2023-07-03T12:21:36.088136 | 2021-08-04T16:18:03 | 2021-08-04T16:18:03 | 294,813,936 | 0 | 1 | null | 2021-08-04T16:18:04 | 2020-09-11T21:17:24 |
Python
|
UTF-8
|
Python
| false | false | 617 |
py
|
from strtest import str_test
class TestCase(str_test.TestCaseWrapper):
TIMEOUT = 2
def test_1(self):
for n in [1, 2, 3, 4, 10, 100, 1000, 10000]:
s = 0
for i in range(1, n + 1):
s += 6 / (i**2)
esperado = s**0.5
obtido = self.function(n)
msg = 'Não funcionou para n={0}. Esperado={1}. Obtido={2}'.format(
n, esperado, obtido)
if abs(obtido - s) < 0.01:
msg += ' Será que você não se esqueceu da raíz quadrada?'
self.assertAlmostEqual(esperado, obtido, msg=msg)
|
[
"[email protected]"
] | |
5da15392b61fbee5433962aa065a01f22a496917
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02675/s264656425.py
|
2c982c55dd80b0a0be281ffc5f2da71e4b7ffb85
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
N = str(input())
N = N[::-1]
if N[0] == '2' or N[0] =='4' or N[0] =='5' or N[0] =='7' or N[0] =='9' :
print('hon')
elif N[0] == '0' or N[0] =='1' or N[0] =='6' or N[0] =='8' :
print('pon')
else:
print('bon')
|
[
"[email protected]"
] | |
af3e3b9bcce575f57877e191b46b2cfb11a4e81c
|
c243661d9d321b39256ad3dee4f5ce4b30a1fa93
|
/packages/compress-stringify/python-web-demo/parse_string_decompress_load_sample_inputs.py
|
e9a295edc58bd8b9dccffb1a87464cf0883016d1
|
[
"Apache-2.0"
] |
permissive
|
InsightSoftwareConsortium/itk-wasm
|
409621ea9430065c51759e4398959fe0ea3ab64a
|
63369f1439583f27c77a4534ea2ef204c63dfa39
|
refs/heads/main
| 2023-08-31T13:31:27.333792 | 2023-08-30T04:20:30 | 2023-08-30T04:20:30 | 45,812,381 | 69 | 23 |
Apache-2.0
| 2023-09-11T21:09:22 | 2015-11-09T03:20:17 |
C++
|
UTF-8
|
Python
| false | false | 761 |
py
|
import js
async def load_sample_inputs(model):
sample_input = bytes([100,97,116,97,58,97,112,112,108,105,99,97,116,105,111,110,47,105,119,105,43,99,98,111,114,43,122,115,116,100,59,98,97,115,101,54,52,44,75,76,85,118,47,83,65,69,73,81,65,65,51,113,50,43,55,119,61,61])
model.inputs["input"] = sample_input
input_element = js.document.getElementById("parse_string_decompress-input-details")
input_element.innerHTML = f"<pre>{str(sample_input)}</pre>"
input_element.disabled = False
parse_string = True
model.options["parse_string"] = parse_string
parse_string_element = js.document.querySelector("#parse_string_decompress-inputs sl-checkbox[name=parse-string]")
parse_string_element.checked = parse_string
return model
|
[
"[email protected]"
] | |
44f1e787c7b23aac5830825fe0b255ac28d4cdf4
|
9689ebc06e7c9a5c1b5b19d34dbcf0f5b5b82cb6
|
/callcenter/migrations/0081_auto_20181220_2253.py
|
1a65a3e4074be197ec3a50bab2a3e38b75af4f25
|
[] |
no_license
|
tigrezhito1/Ramas
|
94fe57dc4235616522aa50b36f5a655861ecbb9f
|
fa894fa69f6bf2a645179cadc11fb8809e82700a
|
refs/heads/master
| 2020-05-02T07:03:03.564208 | 2019-03-26T14:55:29 | 2019-03-26T14:55:29 | 177,808,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,875 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-20 22:53
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('callcenter', '0080_merge_20181220_2241'),
]
operations = [
migrations.AlterField(
model_name='agente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 433213)),
),
migrations.AlterField(
model_name='api',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 430484)),
),
migrations.AlterField(
model_name='base',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 428354)),
),
migrations.AlterField(
model_name='campania',
name='fecha',
field=models.DateTimeField(db_column='fecha cargada', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 426702)),
),
migrations.AlterField(
model_name='cliente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 424761)),
),
migrations.AlterField(
model_name='estado',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 426128)),
),
migrations.AlterField(
model_name='supervisor',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2018, 12, 20, 22, 52, 59, 423992)),
),
]
|
[
"[email protected]"
] | |
2029aeff51b31fb2e24f1e95c740deb621b4268b
|
34270cb66280545a37ec58381c9bac819d626a32
|
/tests/api/test_request.py
|
a023918ab092efa85317007b5ef389d2c5734e9e
|
[
"Apache-2.0"
] |
permissive
|
ArdanaCLM/opsconsole-server
|
ffc7320138b2635506295bf367bc9e3225a744ca
|
d98c230aad058616d6b59079842893b290332cd9
|
refs/heads/master
| 2021-05-16T12:07:23.195171 | 2018-08-03T17:18:17 | 2018-08-03T20:02:39 | 105,184,962 | 1 | 2 |
Apache-2.0
| 2018-02-13T05:40:25 | 2017-09-28T18:29:24 |
Python
|
UTF-8
|
Python
| false | false | 4,058 |
py
|
# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
from bll import api
from tests import util
from bll.api.request import BllRequest
class Test(util.TestCase):
def test_chained_creation(self):
req1 = BllRequest(target=util.randomword(),
operation=util.randomword())
req2 = BllRequest(req1)
self.assertEquals(req1, req2)
def test_creation_from_dict(self):
req1 = dict(target=util.randomword(),
operation=util.randomword())
req2 = BllRequest(req1)
req3 = BllRequest(req2)
self.assertEquals(req2, req3)
def test_overrides(self):
# Test that explicitly supplied values override those in the
# request parameter of the BllRequest constructor
req1 = BllRequest(target=util.randomword(),
auth_token=util.randomword(),
operation=util.randomword(),
action=util.randomword(),
data=util.randomdict())
target = util.randomword()
operation = util.randomword()
action = util.randomword()
auth_token = util.randomword()
req2 = BllRequest(request=req1, target=target, operation=operation,
action=action, auth_token=auth_token)
self.assertEquals(req2['action'], action)
self.assertEquals(req2['target'], target)
self.assertEquals(req2['auth_token'], auth_token)
self.assertEquals(req2['data']['operation'], operation)
def test_data_remains_gone_when_none_supplied(self):
# Verify that when neither 'operation' nor 'data' are supplied, that
# the resulting request has no 'data' key
req1 = BllRequest(target=util.randomword(), action=util.randomword())
self.assertFalse(req1.get('data'))
def test_flattening(self):
# Verify that we get the same result whether creating from a
# dictionary, individual fields, or a nested data element
txn_id = util.randomhex()
target = util.randomword()
op = util.randomword()
d = util.randomdict()
req1 = BllRequest(dict(target=target, foo="baz", txn_id=txn_id,
operation=op, bar=d))
req2 = BllRequest(target=target, foo="baz", txn_id=txn_id,
operation=op, bar=d)
req3 = BllRequest(target=target, txn_id=txn_id,
data={'operation': op, 'foo': 'baz',
'bar': d})
self.assertDictEqual(req1, req2)
self.assertDictEqual(req2, req3)
self.assertIn("operation", req1['data'])
self.assertIn("foo", req1['data'])
self.assertIn("bar", req1['data'])
self.assertNotIn("target", req1['data'])
self.assertNotIn("txn_id", req1['data'])
def test_doubly_nested_data(self):
target = util.randomword()
d = util.randomdict()
req = BllRequest(target=target, data={'data': d})
# Make sure that the doubly nested data got populated correctly
self.assertDictEqual(d, req['data']['data'])
def test_get_data(self):
# Verify that get_data returns all non reserved fields correctly
req = BllRequest(target=util.randomword(),
action="GET",
foo=util.randomword(),
txn_id=util.randomhex(),
auth_token=util.randomhex(),
operation=util.randomword(),
version="1")
data = req.get_data()
self.assertNotIn("action", data)
self.assertNotIn("target", data)
self.assertNotIn("txn_id", data)
self.assertNotIn("auth_token", data)
self.assertNotIn("region", data)
self.assertNotIn("data", data)
self.assertNotIn(api.VERSION, data)
self.assertNotIn("operation", data)
self.assertIn("foo", data)
|
[
"[email protected]"
] | |
2bb8d377c3f0f92ed567eeddc1e97303100d5013
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_winos.py
|
4e2b048338398edd14d25251a3ce0aeb08022260
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
#calss header
class _WINOS():
def __init__(self,):
self.name = "WINOS"
self.definitions = wino
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['wino']
|
[
"[email protected]"
] | |
03ff69f5d49343b455e515bad16a54b876459c51
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/88/usersdata/236/58781/submittedfiles/listas.py
|
3bd37d6ae7004bd6da0f8a8243ae437e9ce52610
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 347 |
py
|
# -*- coding: utf-8 -*-
def MAIORDEGRAU (A):
maior=0
for i in range (1,N+1,1):
ABS(DEGRAU)=A[i]-A[i-1]
if DEGRAU>maior:
maior=DEGRAU
print(maior)
N= int(input('Digite o número de termos da lista: '))
A=[]
for i in range (1,N+1,1):
numero= int(input('n:'))
A.append('numero')
MAIORDEGRAU(A)
|
[
"[email protected]"
] | |
02bb21280cce17cc249b2e2402eaa7c6392d9123
|
49cbc5f4735152ecd0dfff45fd719f2705c0ab30
|
/exp1.py
|
c058e12eee2b5909adc7ca1fb3b79bc6e56b65b6
|
[
"MIT"
] |
permissive
|
praveenpmin/Python
|
964cc3652bfe79be93b71094fe504e7b6b072def
|
9e2316b990a9c8c379b584339f918d23db32821a
|
refs/heads/master
| 2023-09-02T21:42:19.287328 | 2023-06-28T05:53:41 | 2023-06-28T05:53:41 | 157,956,206 | 0 | 0 |
MIT
| 2023-08-30T03:40:37 | 2018-11-17T06:08:21 |
Python
|
UTF-8
|
Python
| false | false | 166 |
py
|
a = 4
b = 9
if b % a == 0 :
print ("b is divisible by a")
elif b + 1 == 10:
print ("Increment in b produces 10")
else:
print ("You are in else statement")
|
[
"[email protected]"
] | |
45997b82d56895703ff4a3a134de0adc9dd4a8a1
|
7437ad1203ff272a482e4a7c7266afdbc7a0e619
|
/lra/models/gpu_16g/linear_transformer_exp/listops/r1/config.py
|
e995e9eec539a6581e7a43ebafb563a58630585f
|
[] |
no_license
|
maximzubkov/spe
|
4ccc59d538a2cb4e5f9b0118ef79933eed0b8d95
|
d877feb0f6b935152e5431ce374606ba72c08d65
|
refs/heads/main
| 2023-08-23T02:08:14.253693 | 2021-10-05T17:25:36 | 2021-10-05T17:25:36 | 385,636,912 | 0 | 0 | null | 2021-10-05T17:25:37 | 2021-07-13T14:42:19 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,311 |
py
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from fast_self_attention import fast_self_attention as favor
import jax
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.random_seed = 0
config.model_type = "transformer"
config.attention_fn = favor.make_fast_generalized_attention(
qkv_dim=config.qkv_dim // config.num_heads,
features_type='deterministic',
kernel_fn=jax.lax.exp,
lax_scan_unroll=16)
config.batch_size = 8
config.learning_rate = config.learning_rate / 32 * 8
config.num_train_steps = 10000
return config
def get_hyper(hyper):
return hyper.product([])
|
[
"[email protected]"
] | |
6c602b72c293927fb1d528411a5844309da2a86d
|
89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04
|
/base/third_party/libevent/libevent_nacl_nonsfi.gyp
|
91e2557b765dae92c5247763243dd8bc9e04f7ce
|
[
"BSD-3-Clause"
] |
permissive
|
bino7/chromium
|
8d26f84a1b6e38a73d1b97fea6057c634eff68cb
|
4666a6bb6fdcb1114afecf77bdaa239d9787b752
|
refs/heads/master
| 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 |
BSD-3-Clause
| 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null |
UTF-8
|
Python
| false | false | 1,223 |
gyp
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../../build/common_untrusted.gypi',
],
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0', {
'targets': [
{
'target_name': 'event_nacl_nonsfi',
'type': 'none',
'sources': [
'buffer.c',
'evbuffer.c',
'event.c',
'evutil.c',
'log.c',
'poll.c',
'strlcpy.c',
'nacl_nonsfi/config.h',
'nacl_nonsfi/event-config.h',
'nacl_nonsfi/random.c',
'nacl_nonsfi/signal_stub.c',
],
'defines': [
'HAVE_CONFIG_H',
],
'include_dirs': [
'nacl_nonsfi',
],
'variables': {
'nacl_untrusted_build': 1,
'nlib_target': 'libevent_nacl_nonsfi.a',
'build_glibc': 0,
'build_newlib': 0,
'build_irt': 0,
'build_pnacl_newlib': 0,
'build_nonsfi_helper': 1,
},
},
],
}],
],
}
|
[
"[email protected]"
] | |
2d297b6201f59e67d8543a6fc8dcc4d29204b0d0
|
3107b28d397f62fe913996fa50f099dc38fda20e
|
/qcfractal/services/gridoptimization_service.py
|
4234942c56c4e89fa84931630e001f15de75b3ca
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yudongqiu/QCFractal
|
71ccfcdd194cdf3c6b807d4317d2439aa8b37394
|
43b5b4807dfe19f78177288f204aab1066de2dea
|
refs/heads/master
| 2020-04-27T09:53:56.291827 | 2019-07-29T19:46:39 | 2019-07-29T19:46:39 | 174,233,001 | 0 | 0 |
BSD-3-Clause
| 2019-03-06T22:49:09 | 2019-03-06T22:49:09 | null |
UTF-8
|
Python
| false | false | 7,849 |
py
|
"""
Wraps geometric procedures
"""
import json
from typing import Dict, Set
import numpy as np
from .service_util import BaseService, expand_ndimensional_grid
from ..extras import get_information
from ..interface.models import GridOptimizationRecord, Molecule, json_encoders
__all__ = ["GridOptimizationService"]
class GridOptimizationService(BaseService):
# Index info
service: str = "gridoptimization"
program: str = "qcfractal"
procedure: str = "gridoptimization"
# Output
output: GridOptimizationRecord
# Temporaries
grid_optimizations: Dict[str, str] = {}
seeds: Set[tuple] = set()
complete: Set[tuple] = set()
dimensions: tuple
iteration: int
starting_grid: tuple
final_energies = {}
# Task helpers
task_map: Dict[str, str] = {}
# Templates
constraint_template: str
optimization_template: str
# keyword_template: KeywordSet
starting_molecule: Molecule
class Config:
json_encoders = json_encoders
@classmethod
def initialize_from_api(cls, storage_socket, logger, service_input, tag=None, priority=None):
# Build the record
output = GridOptimizationRecord(
**service_input.dict(exclude={"initial_molecule"}),
initial_molecule=service_input.initial_molecule.id,
starting_molecule=service_input.initial_molecule.id,
provenance={
"creator": "qcfractal",
"version": get_information("version"),
"routine": "qcfractal.services.gridoptimization"
},
final_energy_dict={},
grid_optimizations={},
starting_grid=[0])
meta = {"output": output}
# Build dihedral template
constraint_template = []
for scan in output.keywords.scans:
tmp = {"type": scan.type, "indices": scan.indices}
constraint_template.append(tmp)
meta["constraint_template"] = json.dumps(constraint_template)
# Build optimization template
meta["optimization_template"] = json.dumps({
"meta": {
"procedure": "optimization",
"keywords": output.optimization_spec.keywords,
"program": output.optimization_spec.program,
"qc_spec": output.qc_spec.dict(),
"tag": meta.pop("tag", None)
},
})
# Move around geometric data
meta["optimization_program"] = output.optimization_spec.program
meta["hash_index"] = output.hash_index
# Hard coded data, # TODO
meta["dimensions"] = output.get_scan_dimensions()
meta["starting_molecule"] = service_input.initial_molecule
if output.keywords.preoptimization:
meta["iteration"] = -2
meta["starting_grid"] = (0 for x in meta["dimensions"])
else:
meta["iteration"] = 0
meta["starting_grid"] = GridOptimizationService._calculate_starting_grid(
output.keywords.scans, service_input.initial_molecule)
meta["task_tag"] = tag
meta["task_priority"] = priority
return cls(**meta, storage_socket=storage_socket, logger=logger)
@staticmethod
def _calculate_starting_grid(scans, molecule):
starting_grid = []
for scan in scans:
# Find closest index
if scan.step_type == "absolute":
m = molecule.measure(scan.indices)
elif scan.step_type == "relative":
m = 0
else:
raise KeyError("'step_type' of '{}' not understood.".format(scan.step_type))
idx = np.abs(np.array(scan.steps) - m).argmin()
starting_grid.append(int(idx))
return tuple(starting_grid)
def iterate(self):
self.status = "RUNNING"
# Special pre-optimization iteration
if self.iteration == -2:
packet = json.loads(self.optimization_template)
packet["data"] = [self.output.initial_molecule]
self.task_manager.submit_tasks("optimization", {"initial_opt": packet})
self.iteration = -1
return False
elif self.iteration == -1:
if self.task_manager.done() is False:
return False
complete_tasks = self.task_manager.get_tasks()
self.starting_molecule = self.storage_socket.get_molecules(
id=[complete_tasks["initial_opt"]["final_molecule"]])["data"][0]
self.starting_grid = self._calculate_starting_grid(self.output.keywords.scans, self.starting_molecule)
self.submit_optimization_tasks({self.output.serialize_key(self.starting_grid): self.starting_molecule.id})
self.iteration = 1
return False
# Special start iteration
elif self.iteration == 0:
self.submit_optimization_tasks({self.output.serialize_key(self.starting_grid): self.starting_molecule.id})
self.iteration = 1
return False
# Check if tasks are done
if self.task_manager.done() is False:
return False
# Obtain complete tasks and figure out future tasks
complete_tasks = self.task_manager.get_tasks()
for k, v in complete_tasks.items():
self.final_energies[k] = v["energies"][-1]
self.grid_optimizations[k] = v["id"]
# Build out nthe new set of seeds
complete_seeds = set(tuple(json.loads(k)) for k in complete_tasks.keys())
self.complete |= complete_seeds
self.seeds = complete_seeds
# print("Complete", self.complete)
# Compute new points
new_points_list = expand_ndimensional_grid(self.dimensions, self.seeds, self.complete)
# print(new_points_list)
# grid = np.zeros(self.dimensions, dtype=np.int)
# for x in self.complete:
# grid[x] = 1
# print(grid)
next_tasks = {}
for new_points in new_points_list:
old = self.output.serialize_key(new_points[0])
new = self.output.serialize_key(new_points[1])
next_tasks[new] = complete_tasks[old]["final_molecule"]
# All done
if len(next_tasks) == 0:
return self.finalize()
self.submit_optimization_tasks(next_tasks)
return False
def submit_optimization_tasks(self, task_dict):
new_tasks = {}
for key, mol in task_dict.items():
# Update molecule
packet = json.loads(self.optimization_template)
# Construct constraints
constraints = json.loads(self.constraint_template)
scan_indices = self.output.deserialize_key(key)
for con_num, scan in enumerate(self.output.keywords.scans):
idx = scan_indices[con_num]
if scan.step_type == "absolute":
constraints[con_num]["value"] = scan.steps[idx]
else:
constraints[con_num]["value"] = (scan.steps[idx] + self.starting_molecule.measure(scan.indices))
packet["meta"]["keywords"]["constraints"] = {"set": constraints}
# Build new molecule
packet["data"] = [mol]
new_tasks[key] = packet
self.task_manager.submit_tasks("optimization", new_tasks)
def finalize(self):
"""
Finishes adding data to the GridOptimizationRecord object
"""
self.output = self.output.copy(update={
"status": "COMPLETE",
"starting_molecule": self.starting_molecule.id,
"starting_grid": self.starting_grid,
"grid_optimizations": self.grid_optimizations,
"final_energy_dict": self.final_energies,
})
return True
|
[
"[email protected]"
] | |
3cd41709b1409e8e8c53ca77f1b68be35db2c15f
|
9e7483cca39a82bcc219a51e9ccfeadeb026bff3
|
/4_Flask_MySQL/6_users/server.py
|
6cfc5605456d5ca529de90fb8c461ed66f74bd8b
|
[] |
no_license
|
nramiscal/PYTHON
|
2489cbfbe8d22fb6a96b5d2beab0218d0be30fe9
|
75bd1ef9e22abb7d17b6c92196f62cfbbd749199
|
refs/heads/master
| 2021-04-29T16:53:55.591187 | 2018-02-15T17:17:56 | 2018-02-15T17:17:56 | 121,657,850 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,833 |
py
|
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
app = Flask(__name__)
mysql = MySQLConnector(app,'users')
app.secret_key = 'ThisIsSecret'
@app.route('/')
def home():
return redirect('/users')
@app.route('/users')
def index():
users = mysql.query_db("SELECT * FROM users")
return render_template('index.html', users=users)
@app.route('/users/new')
def new():
return render_template('new.html')
@app.route('/users/<id>/edit')
def edit(id):
# Write query to select specific user by id. At every point where
# we want to insert data, we write ":" and variable name.
query = "SELECT * FROM users WHERE id = :specific_id"
# Then define a dictionary with key that matches :variable_name in query.
data = {'specific_id': id}
# Run query with inserted data.
users = mysql.query_db(query, data)
# Friends should be a list with a single object,
# so we pass the value at [0] to our template under alias one_friend.
return render_template('edit.html', user=users[0])
@app.route('/users/<id>')
def show(id):
# Write query to select specific user by id. At every point where
# we want to insert data, we write ":" and variable name.
query = "SELECT * FROM users WHERE id = :specific_id"
# Then define a dictionary with key that matches :variable_name in query.
data = {'specific_id': id}
# Run query with inserted data.
users = mysql.query_db(query, data)
# Friends should be a list with a single object,
# so we pass the value at [0] to our template under alias one_friend.
return render_template('show.html', user=users[0])
@app.route('/users/create', methods=['POST'])
def create():
query = "INSERT INTO users (first_name, last_name, email, created_at, updated_at) VALUES (:first_name, :last_name, :email, NOW(), NOW())"
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
}
mysql.query_db(query, data)
return redirect('/users')
@app.route('/users/<id>/destroy')
def destroy(id):
query = "DELETE FROM users WHERE id = :id"
data = {'id': id}
mysql.query_db(query, data)
return redirect('/users')
@app.route('/users/<id>', methods=['POST'])
def update(id):
print request.form
query = "UPDATE users SET first_name = :first_name, last_name = :last_name, email = :email WHERE id = :id"
data = {
'first_name':request.form['first_name'],
'last_name':request.form['last_name'],
'email':request.form['email'],
'id':id
}
print data
mysql.query_db(query, data)
return redirect('/users')
app.run(debug=True)
|
[
"[email protected]"
] | |
cfb1d1fad72c5c899ab7cf3e94f854cfd11ddc76
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/FXJSMM/YW_FXJSMM_SZSJ_302.py
|
68de52e02daf69b9998018265070985ae3fe4143
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,023 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FXJSMM_SZSJ_302(xtp_test_case):
# YW_FXJSMM_SZSJ_302
def test_YW_FXJSMM_SZSJ_302(self):
title = '交易日本方最优卖-最后一次卖为非100的倍数'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('001061', '2', '0', '0', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 10399,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.