blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbb6268281ee09c15af62c26d0df2d1c6065e735
|
f9d5bc590bd6c6274d7a6efec0f60cac1d8286b2
|
/assets/coins/monero/moneroImportPrices.py
|
6a92df9ceca004c233c3ecc5ce2799c0931dad42
|
[] |
no_license
|
pr0logas/grepblockBEpython
|
35c83c1bf2114fc9417bedff6cf2a6e2ad2e667e
|
bbeaa290d13d80f993d843c7f1dbbfd373eee332
|
refs/heads/master
| 2022-10-03T23:35:44.600740 | 2020-03-09T08:24:53 | 2020-03-09T08:24:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 782 |
py
|
#:: By GrepBlock.com developers // pr0logas
#:: Modified date: 2019-11-30
#:: Description: This file is a workspace for Prices importation.
import sys, time
from time import gmtime, strftime
from monero import *
sys.path.append('../../../')
from mongoDB import *
from parsePrices import parseCoinGeckoPrices
db = database
col = collectionForPricesUSD
# Init Classes;
MC = mongoConnection(mongoAuth, db, col)
PP = parseCoinGeckoPrices(apiProvider, vsCurrencyUSD, assetName)
# CoinGecko
result = PP.parsePrice()
# Insert Unix Time
aggregatedData = PP.aggregateInsertUnixTime(result)
#Insert to MongoDB
res = MC.insertPricesData(collectionForPricesUSD, aggregatedData)
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print timeSet + " Succefully inserted asset price: $" + res
|
[
"[email protected]"
] | |
f6d2ffae909f5992e8ceea3bdc223d04acc73d4b
|
2c3da6e0bddf55d64d650040bbf286c47b31811a
|
/学习路线/1.python基础/day05/02-for-else语句.py
|
ec56422c4833eede814e9a25e0dca957f39f600e
|
[
"MIT"
] |
permissive
|
Bngzifei/PythonNotes
|
76bd53db3033a9c51ab4bdd727842cd89607b584
|
01590e1b6c1bc0f04aa2d355fa2553c04cce27f2
|
refs/heads/master
| 2023-02-04T06:49:00.725463 | 2020-12-15T09:26:40 | 2020-12-15T09:26:40 | 155,154,662 | 1 | 2 |
MIT
| 2020-09-08T01:30:19 | 2018-10-29T05:02:48 |
Python
|
UTF-8
|
Python
| false | false | 670 |
py
|
list1 = ["zhansan", "lisi1", 'ww']
# for name in list1: # 运行2次,出现逻辑错误
# if name == 'lisi':
# print('找到')
# else:
# print("没有找到")
"""当for执行完成后,默认for后面的else都会执行一次,如果不想让for后面的else执行,在for里面写个break"""
for name in list1: # 批量查找数据 if ... in...(判断有没有,True或False) 判断有没有我要的那个并返回(因为后续要用这个返回的)用for(break) else (判断有没有我要的那个)
if name == 'lisi':
print('找到')
break
else:
print('没找到')
# for ...else ... 是一个循环体内的.用于批量查找并返回一次提示信息
|
[
"[email protected]"
] | |
e5029b3854dbaef24fb6cce6c6025ff4d71cca34
|
e8e2f3cb21e3f3c289b890dcf3cde567bb92dc32
|
/venv/bin/chardetect
|
a471d60fdc696af75d4b511e1d3b9a0af3f271c1
|
[] |
no_license
|
Timur597/Feliz
|
a0071b93a87eab015dd205e14cba88bcb5f34926
|
6f712ded791c84dee71f75934fb77d0ae101f5e6
|
refs/heads/master
| 2023-05-27T15:54:54.782528 | 2021-06-09T16:34:45 | 2021-06-09T16:34:45 | 373,058,036 | 0 | 1 | null | 2021-06-09T16:47:59 | 2021-06-02T06:07:12 |
Python
|
UTF-8
|
Python
| false | false | 262 |
#!/home/timur/PyCharmProjects/feeliz-master/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
fea402ed06f40785cacbf954f34865f10e62de55
|
76dba08689db40edf2d01a98856fa2a20d98d679
|
/甲鱼python/课程代码/第11讲/第11讲课后作业.py
|
f38d6087bebb08ecebe94960c7ce4388591454c7
|
[] |
no_license
|
pangfeiyo/PythonLearn
|
ce0747d75b53eb21acb6199acfe10934778420b2
|
b514b3d7baa62fa7b801d26ff49266f02cb9cbd2
|
refs/heads/master
| 2021-05-11T10:20:14.818774 | 2020-01-16T15:47:16 | 2020-01-16T15:47:16 | 118,096,858 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 515 |
py
|
# 从列表末尾取出一个元素,并将这个元素插入列表最前边
member = ['一','甲鱼','玩笑']
member.insert(0,member.pop())
print(member)
#python支持负数下标,列表最后一个元素为-1
list2 = [1,3,2,9,7,8]
print(list2[-3:-1])
#切片和赋值的区别
#切片相当于复制
sy1 = [1,3,2,9,7,8]
sy2 = sy1[:] #切片复制sy1的内容给sy2
sy3 = sy1 #sy1赋值给sy3
sy1.sort() #对sy1进行大小排序
print('sy1:',sy1)
print('sy2:',sy2)
print('sy3:',sy3)
|
[
"[email protected]"
] | |
8edf7add9dd89a5a59c9d84008f56f0adbe83abc
|
b7b40fffd7d192b89a7ad3bdb791a7dbd072ac64
|
/axelrod/tests/test_memoryone.py
|
44167991b5bf6387399275371a16858e90bad540
|
[
"MIT"
] |
permissive
|
DEFALT303/Axelrod
|
f91911ad7a404c30edfef38afd02319fcd12bc15
|
e59fc40ebb705afe05cea6f30e282d1e9c621259
|
refs/heads/master
| 2020-09-24T08:39:49.107919 | 2015-04-16T16:15:42 | 2015-04-16T16:15:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,640 |
py
|
"""Test for the memoryone strategies."""
import random
import axelrod
from test_player import TestPlayer
class TestWinStayLostShift(TestPlayer):
name = "Win-Stay Lose-Shift"
player = axelrod.WinStayLoseShift
def test_strategy(self):
"""Starts by cooperating"""
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
def test_effect_of_strategy(self):
"""Check that switches if does not get best payoff."""
P1 = self.player()
P2 = axelrod.Player()
P1.history = ['C']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'C')
class TestGTFT(TestPlayer):
name = "Generous Tit-For-Tat"
player = axelrod.GTFT
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticCooperator(TestPlayer):
name = "Stochastic Cooperator"
player = axelrod.StochasticCooperator
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(15)
# With probability .065 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(1)
# With probability .229 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(3)
# With probability .266 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(13)
# With probability .42 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticWSLS(TestPlayer):
name = "Stochastic WSLS"
player = axelrod.StochasticWSLS
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
class TestZDChi(TestPlayer):
name = "ZDChi"
player = axelrod.ZDChi
stochastic = True
def test_four_vector(self):
P1 = self.player()
expected_dictionary = {('C', 'D'): 0.5, ('D', 'C'): 0.75, ('D', 'D'): 0.0, ('C', 'C'): 1.1666666666666667}
for key in sorted(expected_dictionary.keys()):
self.assertAlmostEqual(P1._four_vector[key],
expected_dictionary[key])
def test_strategy(self):
# Testing the expected value is difficult here so these just ensure that
# future changes that break these tests will be examined carefully.
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
|
[
"[email protected]"
] | |
46a2e88f482b70548c82568f1d10bf2234d6b0e0
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_20/models/array.py
|
b454053102b55f917520181b04db56e7ba183f91
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 |
BSD-2-Clause
| 2023-09-08T09:08:30 | 2018-12-04T17:02:51 |
Python
|
UTF-8
|
Python
| false | false | 7,115 |
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class Array(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'banner': 'str',
'capacity': 'int',
'console_lock_enabled': 'bool',
'encryption': 'ArrayEncryption',
'eradication_config': 'EradicationConfig',
'idle_timeout': 'int',
'ntp_servers': 'list[str]',
'os': 'str',
'parity': 'float',
'scsi_timeout': 'int',
'space': 'Space',
'version': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'banner': 'banner',
'capacity': 'capacity',
'console_lock_enabled': 'console_lock_enabled',
'encryption': 'encryption',
'eradication_config': 'eradication_config',
'idle_timeout': 'idle_timeout',
'ntp_servers': 'ntp_servers',
'os': 'os',
'parity': 'parity',
'scsi_timeout': 'scsi_timeout',
'space': 'space',
'version': 'version'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
banner=None, # type: str
capacity=None, # type: int
console_lock_enabled=None, # type: bool
encryption=None, # type: models.ArrayEncryption
eradication_config=None, # type: models.EradicationConfig
idle_timeout=None, # type: int
ntp_servers=None, # type: List[str]
os=None, # type: str
parity=None, # type: float
scsi_timeout=None, # type: int
space=None, # type: models.Space
version=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
banner (str)
capacity (int): The usable capacity in bytes.
console_lock_enabled (bool)
encryption (ArrayEncryption)
eradication_config (EradicationConfig)
idle_timeout (int): The idle timeout in milliseconds. Valid values include `0` and any multiple of `60000` in the range of `300000` and `10800000`. Any other values are rounded down to the nearest multiple of `60000`.
ntp_servers (list[str])
os (str): Specifies the operating system. Valid values are `Purity`, `Purity//FA`, and `Purity//FB`.
parity (float): A representation of data redundancy on the array. Data redundancy is rebuilt automatically by the system whenever parity is less than `1.0`.
scsi_timeout (int): The SCSI timeout. If not specified, defaults to `60s`.
space (Space)
version (str)
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if banner is not None:
self.banner = banner
if capacity is not None:
self.capacity = capacity
if console_lock_enabled is not None:
self.console_lock_enabled = console_lock_enabled
if encryption is not None:
self.encryption = encryption
if eradication_config is not None:
self.eradication_config = eradication_config
if idle_timeout is not None:
self.idle_timeout = idle_timeout
if ntp_servers is not None:
self.ntp_servers = ntp_servers
if os is not None:
self.os = os
if parity is not None:
self.parity = parity
if scsi_timeout is not None:
self.scsi_timeout = scsi_timeout
if space is not None:
self.space = space
if version is not None:
self.version = version
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Array, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Array):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
ec1a5719f569715605b75d20d9dea2e9ea1a20ef
|
eee741a9d6d55357fb597e0cc3379085f47c2c13
|
/processData.py
|
85071304b5d9fe473ea285664cbd0cd5dac57f28
|
[] |
no_license
|
mbstacy/gdal_ok_mesonet_data_process
|
6505be783056eeade9664782035c284d76f29e1c
|
18fe989560d54cc0fff336462c26897778daeaef
|
refs/heads/master
| 2021-01-10T07:32:55.865328 | 2016-02-23T22:42:48 | 2016-02-23T22:42:48 | 52,396,676 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,487 |
py
|
#!/usr/bin/env python
'''
Created on Feb 2, 2016
@author: ledapsTwo
'''
from osgeo import gdal,osr
from os import path
from csv import DictReader
import shlex,sys
import pandas as pd
import numpy as np
class raster:
def __init__(self,inFile):
gf = gdal.Open(inFile)
self.raster = gf
self.grid = gf.ReadAsArray()
#get number of rows and columns in the shape
self.numGrids = 1
if len(self.grid.shape) == 3:
self.numGrids,self.numRows,self.numCols = self.grid.shape
else:
self.numRows,self.numCols = self.grid.shape
#get projection and spatial reference infomation
srs = osr.SpatialReference()
srs.ImportFromWkt(gf.GetProjection())
srsLatLong = srs.CloneGeogCS()
self.srs = srs ; self.srsLatLong = srsLatLong
#create coordinate transform object for sample/line to lon/lat conversion
self.ct = osr.CoordinateTransformation(srs, srsLatLong)
#create coordinate transform object for lon/lat to sample/line conversion
self.ctInv = osr.CoordinateTransformation(srsLatLong, srs)
#get geographic transform information in cartesian space
self.geoMatrix = gf.GetGeoTransform()
#with no north correction this is equal to (pixel height * pixel width) = -900
dev = (self.geoMatrix[1] * self.geoMatrix[5]) - (self.geoMatrix[2] * self.geoMatrix[4])
#divide height/width components by this -900 to get a decimal degrees value
self.gtinv = (self.geoMatrix[0], self.geoMatrix[5]/dev, -1 * self.geoMatrix[2]/dev, self.geoMatrix[3], -1 * self.geoMatrix[4]/dev, self.geoMatrix[1]/dev)
def parseMesonetFile():
mesoCSV = "{0}.csv".format(mesoFile.split('.')[0]) #path.join(curDir,'%s.csv'%path.basename(mesoFile).split('.')[0])
if not path.exists(mesoCSV):
with open(mesoFile,'r') as f1:
data = f1.read()
data_list=data.split('\n')
table = []
for line in data_list[2:-1]:
table.append(shlex.split(line))
headers = table.pop(0)
df = pd.DataFrame(table,columns=headers)
outFile = path.basename(mesoFile).split('.')[0]
df.to_csv("%s.csv" % (outFile),index=False)
f = open(mesoCSV,'r')
aSites = DictReader(f)
return aSites
def convertLatLontoPixelLine(inGrid,lat,lon):
#convert lon/lat to cartesian coordinates
x,y,z = inGrid.ctInv.TransformPoint(lon,lat,0)
#subtract out upper left pixel coordinates to move origin to upper-left corner of the grid
u = x - inGrid.gtinv[0]
v = y - inGrid.gtinv[3]
#print lon,lat,x,y,u,v
#multiply u & v by 0.333333 or -0.333333 to convert cartesian to pixel/line combo
col = (inGrid.gtinv[1] * u) + (inGrid.gtinv[2] * v)
row = (inGrid.gtinv[4] * u) + (inGrid.gtinv[5] * v)
#print lon,lat,x,y,u,v,col,row
return row,col
def convertPixelLinetoLatLong(inGrid,row,col):
X = (inGrid.geoMatrix[0] + (inGrid.geoMatrix[1] * col) + (inGrid.geoMatrix[2] * row)) + inGrid.geoMatrix[1]/2.0
Y = (inGrid.geoMatrix[3] + (inGrid.geoMatrix[4] * col) + (inGrid.geoMatrix[5] * row)) + inGrid.geoMatrix[5]/2.0
(lon, lat, height) = inGrid.ct.TransformPoint(X,Y)
lon = round(lon,11) ; lat = round(lat,11)
return lat,lon
def main():
#read in TIF file as a raster object
tif = raster(tifFile)
#read in mesonet data and break at each new line
aSites = parseMesonetFile()
#print(aSites)
aOut = []
#walk through each site, pull the lat/lon and determine point on raster grid
for mesoSite in aSites:
#print (mesoSite)
siteID = mesoSite["STID"] #the site ID from the CSV
stNum = mesoSite["STNM"] #station number
stTime = mesoSite["TIME"] #station time
lat = float(mesoSite["LATT"]) #the latitude from the CSV
lon = float(mesoSite["LONG"]) #the longitude from the CSV
#the row and column on the raster above this mesonet site
rasterRow,rasterColumn = convertLatLontoPixelLine(tif, lat, lon)
#the value on the raster at this grid point
rasterValue = tif.grid[rasterRow,rasterColumn]
#build skeleton for header and station lines
header = "STID,STNM,TIME,LATT,LONG,RASTERVAL"
strOut = "%s,%s,%s,%s,%s,%s"%(siteID,stNum,stTime,lat,lon,rasterValue)
#walk through all attributes and place into above strings
for param in sorted(mesoSite.keys()):
#skip any of these as they have already been defined above
if param in ["STID","STNM","TIME","LATT","LONG"]: continue
header += ",%s"%param
strOut += ",%s"%mesoSite[param]
#add header first so it will be at the top of the output file
if header not in aOut: aOut.append(header)
#append station attributes to list
aOut.append(strOut)
#convert list to block of text and write to file
outFile = open("summary%s.csv"%ext,'w')
outFile.write("\n".join(aOut))
outFile.close()
print ("DONE")
if __name__ == "__main__":
#global curDir ; curDir = path.dirname(path.realpath(__file__))
global tifFile ; tifFile = sys.argv[1] #path.join(curDir,'y12.modisSSEBopET.tif')
global mesoFile ; mesoFile = sys.argv[2] #path.join(curDir,'2012_annual.mdf')
global ext; ext = ""
main()
|
[
"[email protected]"
] | |
044ef7733d33340e7cf093fa5b1b04a826c31548
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_summary.py
|
18d09be192ac1b4023f64ab173806411d3dcea87
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
#calss header
class _SUMMARY():
def __init__(self,):
self.name = "SUMMARY"
self.definitions = [u'done suddenly, without discussion or legal arrangements: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
a0d550e2fdb493ba6c99d7490c06e07da09bcdde
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/reservoir.pyi
|
322d1d38c3d821602e3e08cb5f590e0f85608dd7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 337 |
pyi
|
from enum import Enum
class Reservoir:
def __init__(self) -> None: ...
def borrow_or_take(self, now, can_borrow): ...
def load_quota(self, quota, TTL, interval) -> None: ...
@property
def quota(self): ...
@property
def TTL(self): ...
class ReservoirDecision(Enum):
TAKE: str
BORROW: str
NO: str
|
[
"[email protected]"
] | |
60a71622737aa6e8a866253cebae37379422f533
|
7d84000f2977def7118b4c93a47b9d71c4ee38f8
|
/app/src/utils/log_streamer.py
|
ad37f010c1610fdbb84800feadcdb0afd9627020
|
[] |
no_license
|
tensorci/core
|
d405d17099987163dfc589711345ce414ace406e
|
50d18bb43f73b1d5d47fefad543c2554e87a6520
|
refs/heads/master
| 2021-03-19T13:27:26.219591 | 2020-12-03T01:14:57 | 2020-12-03T01:14:57 | 110,917,313 | 0 | 0 | null | 2020-12-03T01:15:26 | 2017-11-16T03:20:09 |
Python
|
UTF-8
|
Python
| false | false | 3,800 |
py
|
import log_formatter
from src import logger, dbi
from pyredis import redis
from src.helpers.definitions import tci_keep_alive
# TODO: This file is disgusting -- make it less disgusting
def should_complete_stream(data, deployment):
# Check if last_entry was specified in the log. Complete the stream if so.
complete = data.get('last_entry') == 'True'
# Check to see if this was an error log. Complete the stream if so.
if data.get('level') == 'error':
# Fail the deployment and log that this happened internally
logger.error('DEPLOYMENT FAILED: uid={}'.format(deployment.uid))
deployment.fail()
complete = True
return complete
def stream_deploy_logs(deployment, stream_key=None, block=30000):
complete = False
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while not complete:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
else:
ts = '0-0'
while not complete:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
def stream_train_logs(deployment, block=30000):
stream_key = deployment.train_log()
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while True:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
yield log_formatter.training_log(data, with_color=True)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break
else:
ts = '0-0'
while True:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break
|
[
"[email protected]"
] | |
67d8405dae494c985db55a7991291fe6a81e390a
|
38c10c01007624cd2056884f25e0d6ab85442194
|
/third_party/chromite/cbuildbot/autotest_rpc_errors.py
|
1ee19f4a5238f93886962b5f9968b1f009275cf6
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zenoalbisser/chromium
|
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
|
e71f21b9b4b9b839f5093301974a45545dad2691
|
refs/heads/master
| 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 |
BSD-3-Clause
| 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null |
UTF-8
|
Python
| false | false | 670 |
py
|
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Error codes used for the Autotest RPC Client, Proxy, and Server.
This is a copy of scripts/slave-internal/autotest_rpc/autotest_rpc_errors.py
from https://chrome-internal.googlesource.com/chrome/tools/build.
"""
CLIENT_CANNOT_CONNECT = 1
CLIENT_HTTP_CODE = 2
CLIENT_EMPTY_RESPONSE = 3
CLIENT_NO_RETURN_CODE = 4
PROXY_CANNOT_SEND_REQUEST = 11
PROXY_CONNECTION_LOST = 12
PROXY_TIMED_OUT = 13
SERVER_NO_COMMAND = 21
SERVER_NO_ARGUMENTS = 22
SERVER_UNKNOWN_COMMAND = 23
SERVER_BAD_ARGUMENT_COUNT = 24
|
[
"[email protected]"
] | |
46b142b96d6ec205f215bf65fe76cf618722fad6
|
7236d1d4873faa9735fd5e2d4598b211a370f731
|
/project/n/projects/projects/ecommapp/users/migrations/0007_myuser_date_join.py
|
d2f2c4be22f4cc171f14f93f40710f105bb9009e
|
[] |
no_license
|
Dreambigxz/my_first_django_app
|
05f5a5d330d72084489f9306fca9ca232af13999
|
9e21ebcbe63c7394280558d2977ef8a796960e0d
|
refs/heads/main
| 2023-01-03T18:45:20.712074 | 2020-10-23T09:05:47 | 2020-10-23T09:05:47 | 306,180,592 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
# Generated by Django 3.0.8 on 2020-09-03 16:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20200903_1752'),
]
operations = [
migrations.AddField(
model_name='myuser',
name='date_join',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
[
"[email protected]"
] | |
116d387dd717fabe096b4ea161ad403d2870e88a
|
33976fddb32feae0b6b5d38b0a8994490fc4b1db
|
/src/ar6/constants/gases.py
|
4df95e992cbd6ed95181fc2ed1bf4bafd19e54c8
|
[
"MIT"
] |
permissive
|
chrisroadmap/ar6
|
e72e4bad8d1c1fa2751513dbecddb8508711859c
|
2f948c862dbc158182ba47b863395ec1a4aa7998
|
refs/heads/main
| 2023-04-16T22:57:02.280787 | 2022-09-27T13:31:38 | 2022-09-27T13:31:38 | 305,981,969 | 27 | 20 |
MIT
| 2022-09-27T13:31:38 | 2020-10-21T10:02:03 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 6,315 |
py
|
"""
Gas properties
"""
# Number of bromine atoms
br_atoms = {
'CCl4': 0,
'CFC11': 0,
'CFC113': 0,
'CFC114': 0,
'CFC115': 0,
'CFC12': 0,
'CH2Cl2': 0,
'CH3Br': 1,
'CH3CCl3': 0,
'CH3Cl': 0,
'CHCl3': 0,
'HCFC141b': 0,
'HCFC142b': 0,
'HCFC22': 0,
'Halon1211': 1,
'Halon1301': 1,
'Halon2402': 2,
}
# Number of chlorine atoms
cl_atoms = {
'CCl4': 4,
'CFC11': 3,
'CFC113': 3,
'CFC114': 2,
'CFC115': 1,
'CFC12': 2,
'CH2Cl2': 2,
'CH3Br': 0,
'CH3CCl3': 3,
'CH3Cl': 1,
'CHCl3': 3,
'HCFC141b': 2,
'HCFC142b': 1,
'HCFC22': 1,
'Halon1211': 0,
'Halon1301': 0,
'Halon2402': 0,
}
# Fractional release (for ozone depletion)
# References:
# Daniel, J. and Velders, G.: A focus on information and options for
# policymakers, in: Scientific Assessment of Ozone Depletion, WMO, 2011
# Newman et al., 2007: A new formulation of equivalent effective stratospheric
# chlorine (EESC)
fracrel = {
'CCl4': 0.56,
'CFC11': 0.47,
'CFC113': 0.29,
'CFC114': 0.12,
'CFC115': 0.04,
'CFC12': 0.23,
'CH2Cl2': 0, # no literature value available
'CH3Br': 0.60,
'CH3CCl3': 0.67,
'CH3Cl': 0.44,
'CHCl3': 0, # no literature value available
'HCFC141b': 0.34,
'HCFC142b': 0.17,
'HCFC22': 0.13,
'Halon1211': 0.62,
'Halon1301': 0.28,
'Halon2402': 0.65,
}
# Conversion between GHG names in GHG spreadsheet and RCMIP.
ghg_to_rcmip_names={
'HFC-125': 'HFC125',
'HFC-134a': 'HFC134a',
'HFC-143a': 'HFC143a',
'HFC-152a': 'HFC152a',
'HFC-227ea': 'HFC227ea',
'HFC-23': 'HFC23',
'HFC-236fa': 'HFC236fa',
'HFC-245fa': 'HFC245fa',
'HFC-32': 'HFC32',
'HFC-365mfc': 'HFC365mfc',
'HFC-43-10mee': 'HFC4310mee',
'NF3': 'NF3',
'C2F6': 'C2F6',
'C3F8': 'C3F8',
'n-C4F10': 'C4F10',
'n-C5F12': 'C5F12',
'n-C6F14': 'C6F14',
'i-C6F14': None,
'C7F16': 'C7F16',
'C8F18': 'C8F18',
'CF4': 'CF4',
'c-C4F8': 'cC4F8',
'SF6': 'SF6',
'SO2F2': 'SO2F2',
'CCl4': 'CCl4',
'CFC-11': 'CFC11',
'CFC-112': 'CFC112',
'CFC-112a': None,
'CFC-113': 'CFC113',
'CFC-113a': None,
'CFC-114': 'CFC114',
'CFC-114a': None,
'CFC-115': 'CFC115',
'CFC-12': 'CFC12',
'CFC-13': None,
'CH2Cl2': 'CH2Cl2',
'CH3Br': 'CH3Br',
'CH3CCl3': 'CH3CCl3',
'CH3Cl': 'CH3Cl',
'CHCl3': 'CHCl3',
'HCFC-124': None,
'HCFC-133a': None,
'HCFC-141b': 'HCFC141b',
'HCFC-142b': 'HCFC142b',
'HCFC-22': 'HCFC22',
'HCFC-31': None,
'Halon-1211': 'Halon1211',
'Halon-1301': 'Halon1301',
'Halon-2402': 'Halon2402',
}
# Hodnebrog et al., 2020: https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
# unless stated
lifetimes = {
'CH4': 11.8, # chapter 6
'N2O': 109, # AR6 SOD
'HFC-125': 30,
'HFC-134a': 14,
'HFC-143a': 51,
'HFC-152a': 1.6,
'HFC-227ea': 36,
'HFC-23': 228,
'HFC-236fa': 213,
'HFC-245fa': 7.9,
'HFC-32': 5.4,
'HFC-365mfc': 8.9,
'HFC-43-10mee': 17,
'NF3': 569,
'C2F6': 10000,
'C3F8': 2600,
'n-C4F10': 2600,
'n-C5F12': 4100,
'n-C6F14': 3100,
'i-C6F14': 3100, # assumed
'C7F16': 3000,
'C8F18': 3000,
'CF4': 50000,
'c-C4F8': 3200,
'SF6': 3200,
'SO2F2': 36,
'CCl4': 32,
'CFC-11': 52,
'CFC-112': 63.6,
'CFC-112a': 52,
'CFC-113': 93,
'CFC-113a': 55,
'CFC-114': 189,
'CFC-114a': 105,
'CFC-115': 540,
'CFC-12': 102,
'CFC-13': 640,
'CH2Cl2': 0.4932,
'CH3Br': 0.8,
'CH3CCl3': 5,
'CH3Cl': 0.9,
'CHCl3': 0.5014,
'HCFC-124': 5.9,
'HCFC-133a': 4.6,
'HCFC-141b': 9.4,
'HCFC-142b': 18,
'HCFC-22': 11.9,
'HCFC-31': 1.2,
'Halon-1211': 16,
'Halon-1301': 72,
'Halon-2402': 28,
}
# Ozone depleting substances
ods_species = [
'CCl4',
'CFC11',
'CFC113',
'CFC114',
'CFC115',
'CFC12',
'CH2Cl2',
'CH3Br',
'CH3CCl3',
'CH3Cl',
'CHCl3',
'HCFC141b',
'HCFC142b',
'HCFC22',
'Halon1211',
'Halon1301',
'Halon2402',
]
# radiative efficiencies
# source: Hodnebrog et al 2020 https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
radeff = {
'HFC-125': 0.23378,
'HFC-134a': 0.16714,
'HFC-143a': 0.168,
'HFC-152a': 0.10174,
'HFC-227ea': 0.27325,
'HFC-23': 0.19111,
'HFC-236fa': 0.25069,
'HFC-245fa': 0.24498,
'HFC-32': 0.11144,
'HFC-365mfc': 0.22813,
'HFC-43-10mee': 0.35731,
'NF3': 0.20448,
'C2F6': 0.26105,
'C3F8': 0.26999,
'n-C4F10': 0.36874,
'n-C5F12': 0.4076,
'n-C6F14': 0.44888,
'i-C6F14': 0.44888,
'C7F16': 0.50312,
'C8F18': 0.55787,
'CF4': 0.09859,
'c-C4F8': 0.31392,
'SF6': 0.56657,
'SO2F2': 0.21074,
'CCl4': 0.16616,
'CFC-11': 0.25941,
'CFC-112': 0.28192,
'CFC-112a': 0.24564,
'CFC-113': 0.30142,
'CFC-113a': 0.24094,
'CFC-114': 0.31433,
'CFC-114a': 0.29747,
'CFC-115': 0.24625,
'CFC-12': 0.31998,
'CFC-13': 0.27752,
'CH2Cl2': 0.02882,
'CH3Br': 0.00432,
'CH3CCl3': 0.06454,
'CH3Cl': 0.00466,
'CHCl3': 0.07357,
'HCFC-124': 0.20721,
'HCFC-133a': 0.14995,
'HCFC-141b': 0.16065,
'HCFC-142b': 0.19329,
'HCFC-22': 0.21385,
'HCFC-31': 0.068,
'Halon-1211': 0.30014,
'Halon-1301': 0.29943,
'Halon-2402': 0.31169,
}
rcmip_to_ghg_names = {v: k for k, v in ghg_to_rcmip_names.items()}
|
[
"[email protected]"
] | |
2592cd0cd2bea747a58634eb2386b2e46bdca291
|
a0015a3dc121c8fcdd5d2eadd522ece03b4ceec8
|
/docs/cornell CS class/Lesson 29. Coroutines/demos/read2.py
|
fe3ecc58b16f7d80b45c890599a931e740dcc82a
|
[
"MIT"
] |
permissive
|
LizzieDeng/kalman_fliter_analysis
|
fc40d475797dbddba5f9f2dfb224fbf68d77865f
|
50e728f32c496c3fcbb8ca3ee00857b999b88d99
|
refs/heads/main
| 2023-03-03T02:46:19.020078 | 2021-02-05T07:53:10 | 2021-02-05T07:53:10 | 329,243,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,137 |
py
|
"""
A module to show off a long-running function as a coroutine.
This module shows another advantage of a coroutine. We can
interleave two functions as they load from a file. There are
reasons we might want to do this when working with large data,
but they are beyond the scope of this course.
Author: Walker M. White
Date: November 2, 2020
"""
def merge(dict1,dict2):
"""
Returns a new dictionary merging (joining keys) dict1
and dict2.
If a key appears in only one of dict1 or dict2, the
value is the value from that dictionary. If it is in
both, the value is the sum of values.
Example: merge({'a':1,'b':2},{'b':3,'c':4}) returns
{'a':1,'b':5,'c':4}
Parameter dict1: The first dictionary to merge
Precondition: dict1 a dictionary with int or float values
Parameter dict2: The second dictionary to merge
Precondition: dict2 a dictionary with int or float values
"""
result = dict(dict1) # Makes a (shallow) copy
for k in dict2:
if k in dict1:
result[k] = result[k]+1
else:
result[k] = 1
return result
def add_word(word,counts):
"""
Adds a word to a word-count dictionary.
The keys of the dictionaries are strings, and the values
are integers. If the word is already in the dictionary,
adding it will increase the value by 1. Otherwise it
will add the key and assign it a value for 1.
Example: If count = ['a':1,'b':1}, add_word('a',count)
alters count to be {'a':2,'b':1}
Parameter word: The word to add
Precondition: word is a string
Parameter counts: The word-count dictionary
Precondition: count is a dictionary with string keys
and integer values
"""
if word in counts:
counts[word] = counts[word]+1
else:
counts[word] = 1
def wordcount(fname):
"""
Returns a dictionary with the individual word count of
fname
The is function opens the specified text file and creates
a dictionary from it. The keys of the dictionaries are
words (i.e. adjacent letters with no spaces or
punctuation). For example, in the string 'Who are you?',
the words are 'Who', 'are', and 'you'. The values are
the number of times that word (paying attention to
capitalization) appears in the file.
This function is a generator-based coroutine that stops
at every 10% of the file to return its amount of progress
to the original caller (the function that calls next()).
Parameter fname: The file name
Precondition: fname is a string and the name of a text
file
"""
# Load the entire file into a single string
file = open(fname)
text = file.read()
file.close()
counts = {}
word = '' # Accumulator to build a word
for pos in range(len(text)):
# Yield every 10%
if pos % (len(text)//10) == 0:
# Indicate the amount of progress we made
yield round(100*pos/len(text))
# Build up the word, one letter at a time
x = text[pos]
if x.isalpha():
word = word+x
else: # Word ends
# Add it if not empty
if word != '':
add_word(word,counts)
word = '' # Reset the accumulator
# Add the last word
if word != '':
add_word(word,counts)
return counts
def loadfiles(fname1,fname2):
"""
Creates a word-count dictionary for fname1, fname2 and
prints the combined size
The size of the word-count dictionary is the number of
distinct words in the file.
This function is the parent of wordcount, pushing it
forward with the next() function until it is done
reading the file. This function creates two wordcount
coroutines and interleaves them.
Parameter fname1: The first file name
Precondition: fname1 is a string and the name of a text file
Parameter fname2: The second file name
Precondition: fname2 is a string and the name of a text file
"""
loader1 = wordcount(fname1)
loader2 = wordcount(fname2)
result = {}
# We keep going as long as either loader is working
while (not loader1 is None) or (not loader2 is None):
# Load the next batch from fname1
if not loader1 is None:
try:
amount = next(loader1)
print('Loaded '+str(amount)+'% of '+repr(fname1))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader1 = None # We are done
# Load the next batch from fname2
if not loader2 is None:
try:
amount = next(loader2)
print('Loaded '+str(amount)+'% of '+repr(fname2))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader2 = None # We are done
print('Read a total of '+str(len(result))+' words.')
if __name__ == '__main__':
loadfiles('warpeace10.txt','kingjames10.txt')
|
[
"[email protected]"
] | |
2cb33275754ec783f5f546a411cf0fe226a579eb
|
f7982a468b6f76dc72c53e7c3644ae4e7e6f2f49
|
/pyEX/refdata/ric.py
|
6e4ab19987f1f4ec33a268a2b177446c705a78b6
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/pyEX
|
55002c3718214c6e207976ab3661a47108c6c114
|
f678c791d05bc28911e25807241c392a9ee8134f
|
refs/heads/main
| 2023-08-20T00:17:53.162803 | 2022-11-22T02:51:13 | 2022-11-22T02:51:13 | 109,551,372 | 350 | 95 |
Apache-2.0
| 2023-09-11T12:26:54 | 2017-11-05T04:21:16 |
Python
|
UTF-8
|
Python
| false | false | 1,129 |
py
|
# *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import _get
def ricLookup(ric, token="", version="stable", filter="", format="json"):
"""This call converts a RIC to an iex symbol
https://iexcloud.io/docs/api/#ric-mapping
8am, 9am, 12pm, 1pm UTC daily
Args:
ric (str): ric to lookup
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame or list: result
"""
return _get(
"ref-data/ric?ric={}".format(ric),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(ricLookup)
def ricLookupDF(*args, **kwargs):
return pd.DataFrame(ricLookup(*args, **kwargs))
|
[
"[email protected]"
] | |
0f7d8ae5196b70d080e081d05be8478206494a1d
|
82cd10c024f284555845f006e518924fed3254c7
|
/Day-06[09-10-2021]/EmployeeProject/EmployeeProject/urls.py
|
256d1ab7ebea77beebcb3a9ed2b40858b129c6a2
|
[] |
no_license
|
AP-Skill-Development-Corporation/APSSDC-Workshop2021
|
61acba18eb55ec2e4bb96ded95d339c73c8ea1ac
|
fe1f5517f99b17bd0ebcf07c70ee26bd23f262ea
|
refs/heads/main
| 2023-08-12T16:29:53.208949 | 2021-10-16T15:47:22 | 2021-10-16T15:47:22 | 413,299,596 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,549 |
py
|
"""EmployeeProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Employee import views
urlpatterns = [
path('admin/', admin.site.urls),
path('demo/',views.sample),
path('de/<str:name>/',views.dsname),
path('fe/<str:name>/<int:age>/',views.fname),
path('g/<str:fname>/<str:lname>/<int:sal>/',views.emp),
path('gt/<str:empname>/<int:sal>/',views.empdetials),
path('fy/<str:sname>/',views.dname),
path('sty/<str:stname>/<int:year>/<str:branch>/',views.stname),
path('reg/',views.regis),
path('se/',views.index,name="ind"),
path('about/',views.about,name="ab"),
path('contact/',views.contact,name="cnt"),
path('sam/',views.sample),
path('re/',views.register,name="rg"),
path('',views.operations,name="op"),
path('emv/<int:t>/',views.eview,name="ev"),
path('eup/<int:p>/',views.emup,name="ep"),
path('ed/<int:f>/',views.emdl,name="edl"),
]
|
[
"[email protected]"
] | |
b53fb27016d732c08a7869d38d13162383b30b32
|
1e09bc56bf2904b349df1873e11da3d527437880
|
/lecture-27/AdjListGraph.py
|
8a03efffe1f2ce76c121133adbb645df489cf2d6
|
[] |
no_license
|
codeforcauseorg-archive/DSA-Live-Python-Jun-0621
|
f3444f5671cb4985644c7432517477c3585c70fb
|
e4fe544178d7851c24755242390f39675b99fabe
|
refs/heads/main
| 2023-08-09T08:31:41.449120 | 2021-09-14T16:44:39 | 2021-09-14T16:44:39 | 384,725,085 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,095 |
py
|
class Vertex:
def __init__(self, value):
self.value = value
self.neighbours = {}
class AdjListGraph:
def __init__(self):
self.vertices = dict()
def add_vertex(self, value):
if value not in self.vertices:
self.vertices[value] = Vertex(value)
def add_edge(self, first, second, weight):
if (first in self.vertices) and (second in self.vertices):
vfirst = self.vertices[first]
vsecond = self.vertices[second]
vfirst.neighbours[vsecond] = weight
vsecond.neighbours[vfirst] = weight
def min_spanning_tree(self):
edges = []
for vertex in self.vertices.values():
# print(vertex.neighbours.items())
for neighbour, weight in vertex.neighbours.items():
edges.append([weight, vertex.value, neighbour.value])
sorted_edges = sorted(edges)
acc = 0
for [weight, source, dest] in sorted_edges:
if self.union(source, dest):
acc += weight
return acc
# parents = {}
# for vertex in self.vertices:
# parents[vertex.value] = None
def union(self, parents, first, second):
first = self.find(parents, first)
second = self.find(parents, second)
if first == second:
return False
else:
parents[first] = second
def find(self, parents, item):
while parents[item] != None:
item = parents[item]
return item
def represent(self):
for vertex in self.vertices.values():
print(vertex.value, end="-> ")
for neighbour in vertex.neighbours:
print("[{} : {}]".format(neighbour.value, vertex.neighbours[neighbour]), end=", ")
print()
graph = AdjListGraph()
graph.add_vertex("A")
graph.add_vertex("B")
graph.add_vertex("C")
graph.add_vertex("D")
graph.add_edge("A", "B", 10)
graph.add_edge("B", "C", 15)
graph.add_edge("D", "C", 10)
graph.add_edge("A", "D", 20)
graph.min_spanning_tree()
|
[
"[email protected]"
] | |
cd3d97d846876037d74f4ccc46eefb915c555830
|
823b69dffc4a6e28b9e4c27ec176f8ce54d2e586
|
/args/arg_parser.py
|
c2cea4c5d26614670271806fddc28b28fb6b4b19
|
[] |
no_license
|
potlee/pbt
|
1f5af632aa100561da1c284b522a6ca181ea21c1
|
05160eca9f3a557a25d043502f90aca1a7b76b46
|
refs/heads/master
| 2020-03-25T23:48:47.867151 | 2018-06-23T19:40:16 | 2018-06-23T19:40:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,599 |
py
|
import argparse
import util
class ArgParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='Population-Based Training')
self.parser.add_argument('--gpu_ids', type=str, default='0',
help='Comma-separated list of GPUs to use.')
self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size.')
self.parser.add_argument('--num_workers', type=int, default=4, help='Number of workers per data loader.')
self.parser.add_argument('--num_epochs', type=int, default=30,
help='Number of epochs to train for. If 0, train forever.')
self.parser.add_argument('--population_size', type=int, default=3,
help='Number of models in a population.')
self.parser.add_argument('--dataset', type=str, default='CIFAR10', choices=('CIFAR10',),
help='Dataset to train on.')
self.parser.add_argument('--ckpt_dir', type=str, default='ckpts/',
help='Directory to save checkpoints and population info.')
self.parser.add_argument('--name', type=str, required=True, help='Experiment name.')
self.parser.add_argument('--model', type=str, default='resnet50', help='Model name.')
self.parser.add_argument('--metric_name', type=str, default='val_loss',
help='Metric to optimize during PBT. Make sure to also set --maximize_metric')
self.parser.add_argument('--maximize_metric', type=util.str_to_bool, default=False,
help='If true, maximize the metric. Else minimize.')
self.parser.add_argument('--max_eval', type=int, default=1000,
help='Max number of examples to evaluate from the training set.')
self.parser.add_argument('--max_ckpts', type=int, default=3,
help='Max number of recent checkpoints to keep per model.')
self.parser.add_argument('--save_dir', type=str, default='logs', help='Directory for saving logs.')
self.parser.add_argument('--learning_rate', type=float, default=1e-1, help='Initial learning rate.')
self.parser.add_argument('--optimizer', type=str, default='sgd', choices=('sgd', 'adam'), help='Optimizer.')
self.parser.add_argument('--sgd_momentum', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--sgd_dampening', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--adam_beta_1', type=float, default=0.9, help='Adam beta 1 (Adam only).')
self.parser.add_argument('--adam_beta_2', type=float, default=0.999, help='Adam beta 2 (Adam only).')
self.parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (i.e., L2 regularization factor).')
self.parser.add_argument('--iters_per_print', type=int, default=4,
help='Number of iterations between printing loss to the console and TensorBoard.')
self.parser.add_argument('--search_space', type=str, default='lr,momentum,weight_decay')
def parse_args(self):
args = self.parser.parse_args()
args.gpu_ids = [int(i) for i in str(args.gpu_ids).split(',') if int(i) >= 0]
args.device = 'cpu' if len(args.gpu_ids) == 0 else 'cuda:{}'.format(args.gpu_ids[0])
args.search_space = str(args.search_space).split(',')
return args
|
[
"[email protected]"
] | |
1a5cc4dd4b02297aa61785f8fe17d28cdf7bae2c
|
99e494d9ca83ebafdbe6fbebc554ab229edcbacc
|
/.history/Day 1/Test/Answers/NegativeMarking_20210304211811.py
|
d220b7261e6beb16198606a036f3688522eaee56
|
[] |
no_license
|
Datta2901/CCC
|
c0364caa1e4937bc7bce68e4847c8d599aef0f59
|
4debb2c1c70df693d0e5f68b5798bd9c7a7ef3dc
|
refs/heads/master
| 2023-04-19T10:05:12.372578 | 2021-04-23T12:50:08 | 2021-04-23T12:50:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
t = int(input())
for i in range(t):
questions,requiredscore = map(int,input().split())
if questions * 4 < requiredscore:
print(-1)
continue
attempt = (requiredscore/questions) + 3
accuracy = attempt / 7
print(format(accuracy*100,'.2f')
#
# Here Accuracy can be find by using two linear equations
# They are Total Score(Required Score) = 4 * x - 3 * y
# Total Questions = x + y
# Here x is the total number of crct answers
# y is the total number of wrong answers
|
[
"[email protected]"
] | |
39764d8d79f6697d5e9b2cffeb3f3e9487f9ea0a
|
2eee2da886d2574f030b22771e707e32f56cbaed
|
/chaospy/distributions/collection/chi.py
|
cb04231c2d16b7f21de4aa90574562d6e927b4fc
|
[
"MIT"
] |
permissive
|
lblonk/chaospy
|
1759f050229d1365802320d9b13f6195ec55a72c
|
1759a4307c6134b74ce63ff44973195f1e185f94
|
refs/heads/master
| 2022-11-12T19:50:15.108219 | 2020-07-03T11:13:42 | 2020-07-03T11:13:42 | 276,879,282 | 0 | 0 |
MIT
| 2020-07-03T11:03:14 | 2020-07-03T11:03:13 | null |
UTF-8
|
Python
| false | false | 3,779 |
py
|
"""Chi distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class chi(Dist):
"""Chi distribution."""
def __init__(self, df=1):
Dist.__init__(self, df=df)
def _pdf(self, x, df):
return x**(df-1.)*numpy.exp(-x*x*0.5)/(2.0)**(df*0.5-1)\
/special.gamma(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, q))
def _lower(self, df):
return 0.
def _mom(self, k, df):
return 2**(.5*k)*special.gamma(.5*(df+k))\
/special.gamma(.5*df)
class Chi(Add):
"""
Chi distribution.
Args:
df (float, Dist):
Degrees of freedom
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Chi(2, 4, 1)
>>> distribution
Chi(df=2, scale=4, shift=1)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 1. , 4.0341, 5.7096, 7.6604, 28.1446])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.1422, 0.1472, 0.1041, 0. ])
>>> distribution.sample(4).round(4)
array([ 6.8244, 2.9773, 10.8003, 5.5892])
>>> distribution.mom(1).round(4)
6.0133
"""
def __init__(self, df=1, scale=1, shift=0):
self._repr = {"df": df, "scale": scale, "shift": shift}
Add.__init__(self, left=chi(df)*scale, right=shift)
class Maxwell(Add):
"""
Maxwell-Boltzmann distribution
Chi distribution with 3 degrees of freedom
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Maxwell(2, 3)
>>> distribution
Maxwell(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 5.2023, 6.0763, 7.0538, 17.0772])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2638, 0.2892, 0.2101, 0. ])
>>> distribution.sample(4).round(4)
array([6.6381, 4.6119, 8.5955, 6.015 ])
>>> distribution.mom(1).round(4)
6.1915
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(3)*scale, right=shift)
class Rayleigh(Add):
"""
Rayleigh distribution
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Rayleigh(2, 3)
>>> distribution
Rayleigh(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 4.5171, 5.3548, 6.3302, 16.5723])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2844, 0.2944, 0.2081, 0. ])
>>> distribution.sample(4).round(4)
array([5.9122, 3.9886, 7.9001, 5.2946])
>>> distribution.mom(1).round(4)
5.5066
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(2)*scale, right=shift)
|
[
"[email protected]"
] | |
c0593805d9fcc7d217660376fbb2688f706642e2
|
0798277f2706998ab80442ac931579eb47f676e5
|
/boundary/property_handler.py
|
45635669e8b5a3731f321b2d7a0d6eb87f6a6557
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulse-api-cli
|
49ed38b0694ab289802f69ee6df4911cf3378e3f
|
b01ca65b442eed19faac309c9d62bbc3cb2c098f
|
refs/heads/master
| 2023-03-18T00:23:15.295727 | 2016-05-13T15:44:08 | 2016-05-13T15:44:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PropertyHandler(object):
def __init__(self):
self._properties = None
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1]
def _add_property_argument(self, parser, help_text):
parser.add_argument('-p', '--property', dest='properties', action='append',
required=False, metavar='property=value', help=help_text)
|
[
"[email protected]"
] | |
87339e4385a890dc9a46c6e5efc4674cb85aefa2
|
4073f351551c2f73c5659cb3038a68360cc5b369
|
/Arbetsbok/kap 14/övn 14.1, sid. 36 - söka tal.py
|
9b318176e080635b41a000e7aeb4734430c42602
|
[
"MIT"
] |
permissive
|
Pharou/programmering1python
|
b9a5aca72354d3e7e91a5023a621d22a962ecd7c
|
9b689027db1f7fbf06925f3094fcb126880453e4
|
refs/heads/master
| 2022-11-28T06:33:17.295157 | 2020-07-25T11:02:07 | 2020-07-25T11:02:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,437 |
py
|
#!/usr/bin/python3.8
# Filnamn: övn 14.1, sid. 36 - söka tal.py
# Sökning
# Programmeringsövningar till kapitel 14
# Programmet slumpar först fram 20 tal mellan 1 och 100 och lagrar alla talen i
# en lista och sedan skrivs listan ut på skärmen. Därefter frågar programmet
# användaren efter ett tal som ska eftersökas. Slutligen undersöker programmet
# om talet finns i listan och om det finns, skriva ut på indexet det finns på.
# Om inte talet finns så ska användaren informeras om att det inte finns.
# Sökmetod: Linjär sökning
# Import av modul
from random import randint
# Funktionsdefinitioner
# Huvudprogram
def main():
lista = []
# Slumpa 20 st heltal mellan 1 och 100 och lägg dem eftervarandra i listan
for c in range(20):
lista.append(randint(1,100))
# Skriv ut listan
print(lista)
# Fråga användaren efte tal som eftersöks
tal = int(input('Anget tal som eftersöks: '))
# Utför en linjär sökning i hela listan
# Utgå ifrån att talet inte finns
index = -1
for i in range(len(lista)):
if tal == lista[i]:
# Om talet hittas sätt index till det och avbryt loopen
index = i
break
if index >= 0:
print('Talet ' + str(tal) + ' finns på index ' + str(index) + ' i listan.')
else:
print('Talet ' + str(tal) + ' finns inte i listan.')
## Huvudprogram anropas
main()
|
[
"[email protected]"
] | |
4ad42cd6418d521ed2f275b7b73aaa4e7036fbea
|
964b063c2461aad267ddd991fefaf7ab53b1ca94
|
/6-kyu/iq-test/python/solution.py
|
2221bc57375308dc79e1d3f085e299509f860057
|
[] |
no_license
|
lucasbflopes/codewars-solutions
|
26c4e2cd1be19db50cc8c1d9fc117c51c82a2259
|
72ef2c02dde7bd0d5a691e04e3b2a383e892f84b
|
refs/heads/master
| 2022-03-14T01:26:41.816498 | 2019-11-23T17:17:19 | 2019-11-23T17:17:19 | 114,834,447 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
def iq_test(numbers):
arr = [i % 2 == 0 for i in [int(j) for j in numbers.split()]]
if arr.count(True) > 1:
return arr.index(False)+1
else:
return arr.index(True)+1
|
[
"[email protected]"
] | |
5be34879011c0f4d0308e93c05824f2a437ec963
|
44b87d9faad99d542914c35410ba7d354d5ba9cd
|
/1/collection/list/divisible by 8 using compre.py
|
857a0b6ada0c2d9dc98bd9180ec1370a09173462
|
[] |
no_license
|
append-knowledge/pythondjango
|
586292d1c7d0ddace3630f0d77ca53f442667e54
|
0e5dab580e8cc48e9940fb93a71bcd36e8e6a84e
|
refs/heads/master
| 2023-06-24T07:24:53.374998 | 2021-07-13T05:55:25 | 2021-07-13T05:55:25 | 385,247,677 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
num=[i for i in range(1,1000) if i%8==0]
print(num)
print("length of num is",len(num))
# odd=[i for i in range(1000) if i%2!=0]
# print(odd)
|
[
"[email protected]"
] | |
00f065d20644809c36a60a0fbfe0ad0aa3cd6ef9
|
4a0f2cc27cd39b8b8901ade728f3b1dc20c2a2ee
|
/controller/qt_classes/UbNewDocumentViewDelegate.py
|
096e2b7becda90dbfcb58540466702c64771dd6f
|
[] |
no_license
|
teamlm2/lm2_mgis
|
2c016423983a31fcdf15e34508401acf48177f35
|
9144b1234b25665737986995bd1da7492871151c
|
refs/heads/master
| 2021-11-11T23:43:12.647749 | 2021-10-26T07:55:58 | 2021-10-26T07:55:58 | 155,568,182 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,303 |
py
|
# coding=utf8
__author__ = 'B.Ankhbold'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy.exc import SQLAlchemyError
from ...model import SettingsConstants
from ...model.SetOfficialDocument import SetOfficialDocument
from ...utils.FileUtils import FileUtils
from ...utils.PluginUtils import PluginUtils
from ...utils.SessionHandler import SessionHandler
from ...utils.DatabaseUtils import *
from ...utils.FilePath import *
from ftplib import *
import shutil
import codecs
NAME_COLUMN = 0
DESCRIPTION_COLUMN = 1
VIEW_COLUMN = 2
FILE_PDF = 'pdf'
FILE_IMAGE = 'png'
class UbNewDocumentViewDelegate(QStyledItemDelegate):
def __init__(self, widget, parent):
super(UbNewDocumentViewDelegate, self).__init__(parent)
self.widget = widget
self.parent = parent
self.session = SessionHandler().session_instance()
self.button = QPushButton("", parent)
self.button.hide()
self.viewIcon = QIcon(":/plugins/lm2/file.png")
def paint(self, painter, option, index):
if index.column() == VIEW_COLUMN:
self.button.setIcon(self.viewIcon)
else:
super(UbNewDocumentViewDelegate, self).paint(painter, option, index)
return
self.button.setGeometry(option.rect)
button_picture = QPixmap.grabWidget(self.button)
painter.drawPixmap(option.rect.x(), option.rect.y(), button_picture)
def editorEvent(self, event, model, option, index):
if index is not None:
if index.isValid() and event.type() == QEvent.MouseButtonRelease:
if event.button() == Qt.RightButton:
return False
if index.column() == VIEW_COLUMN:
ftp = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole)
file_name = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 1)
file_type = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 2)
# print file_name
# print file_type
# print ftp.pwd()
# print ftp.nlst()
view_pdf = open(FilePath.view_file_path(), 'wb')
view_png = open(FilePath.view_file_png_path(), 'wb')
if file_type == FILE_IMAGE:
ftp.retrbinary('RETR ' + file_name, view_png.write)
else:
ftp.retrbinary('RETR ' + file_name, view_pdf.write)
try:
if file_type == FILE_IMAGE:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_png_path()))
else:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_path()))
except SQLAlchemyError, e:
PluginUtils.show_error(self.parent, self.tr("File Error"), self.tr("Could not execute: {0}").format(e.message))
return True
elif index.column() == DESCRIPTION_COLUMN or index.column() == NAME_COLUMN:
return True
else:
index.model().setData(index, 0, Qt.EditRole)
return False
|
[
"[email protected]"
] | |
116f6963b88edfdb0db9fda927ba4e4947b376fa
|
5ec7d0bad8a77c79843a2813f5effcb3a2b7e288
|
/lean/models/brokerages/cloud/tradier.py
|
fd5e10b9f48bced5ac4faae3e74d4fac7886ec50
|
[
"Apache-2.0"
] |
permissive
|
xdpknx/lean-cli
|
aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0
|
c1051bd3e8851ae96f6e84f608a7116b1689c9e9
|
refs/heads/master
| 2023-08-08T02:30:09.827647 | 2021-09-21T21:36:24 | 2021-09-21T21:36:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,163 |
py
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import click
from lean.components.util.logger import Logger
from lean.models.brokerages.cloud.base import CloudBrokerage
class TradierBrokerage(CloudBrokerage):
"""A CloudBrokerage implementation for Tradier."""
def __init__(self, account_id: str, access_token: str, environment: str) -> None:
self._account_id = account_id
self._access_token = access_token
self._environment = environment
@classmethod
def get_id(cls) -> str:
return "TradierBrokerage"
@classmethod
def get_name(cls) -> str:
return "Tradier"
@classmethod
def build(cls, logger: Logger) -> CloudBrokerage:
logger.info("""
Your Tradier account id and API token can be found on your Settings/API Access page (https://dash.tradier.com/settings/api).
The account id is the alpha-numeric code in a dropdown box on that page.
Your account details are not saved on QuantConnect.
""".strip())
account_id = click.prompt("Account id")
access_token = logger.prompt_password("Access token")
environment = click.prompt("Environment", type=click.Choice(["demo", "real"], case_sensitive=False))
return TradierBrokerage(account_id, access_token, environment)
def _get_settings(self) -> Dict[str, str]:
return {
"account": self._account_id,
"token": self._access_token,
"environment": "live" if self._environment == "real" else "paper"
}
|
[
"[email protected]"
] | |
19f3c8b7d94aae6549e86646e36334cb826a906e
|
6e820756b82ffbe9837348937e53f1a0ce0e6cca
|
/Lib/site-packages/pandas_datareader/io/jsdmx.py
|
d602ca88beb058636aceaac714662ee2f457a6c4
|
[] |
no_license
|
AndreasPatsimas/pms_papei
|
c2afd941de6ae234dd37784d746e794183ebb8d3
|
da10220ea468304c1066bed55b8f92ba9e5ada8a
|
refs/heads/master
| 2023-02-01T23:33:39.221747 | 2020-12-19T12:17:59 | 2020-12-19T12:17:59 | 321,115,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,167 |
py
|
# pylint: disable-msg=E1101,W0613,W0603
from __future__ import unicode_literals
from collections import OrderedDict
import itertools
import re
import sys
import numpy as np
import pandas as pd
from pandas_datareader.io.util import _read_content
def read_jsdmx(path_or_buf):
"""
Convert a SDMX-JSON string to panda object
Parameters
----------
path_or_buf : a valid SDMX-JSON string or file-like
https://github.com/sdmx-twg/sdmx-json
Returns
-------
results : Series, DataFrame, or dictionary of Series or DataFrame.
"""
jdata = _read_content(path_or_buf)
try:
import simplejson as json
except ImportError:
if sys.version_info[:2] < (2, 7):
raise ImportError("simplejson is required in python 2.6")
import json
if isinstance(jdata, dict):
data = jdata
else:
data = json.loads(jdata, object_pairs_hook=OrderedDict)
structure = data["structure"]
index = _parse_dimensions(structure["dimensions"]["observation"])
columns = _parse_dimensions(structure["dimensions"]["series"])
dataset = data["dataSets"]
if len(dataset) != 1:
raise ValueError("length of 'dataSets' must be 1")
dataset = dataset[0]
values = _parse_values(dataset, index=index, columns=columns)
df = pd.DataFrame(values, columns=columns, index=index)
return df
def _get_indexer(index):
if index.nlevels == 1:
return [str(i) for i in range(len(index))]
else:
it = itertools.product(*[range(len(level)) for level in index.levels])
return [":".join(map(str, i)) for i in it]
def _fix_quarter_values(value):
"""Make raw quarter values Pandas-friendly (e.g. 'Q4-2018' -> '2018Q4')."""
m = re.match(r"Q([1-4])-(\d\d\d\d)", value)
if not m:
return value
quarter, year = m.groups()
value = "%sQ%s" % (quarter, year)
return value
def _parse_values(dataset, index, columns):
size = len(index)
series = dataset["series"]
values = []
# for s_key, s_value in iteritems(series):
for s_key in _get_indexer(columns):
try:
observations = series[s_key]["observations"]
observed = []
for o_key in _get_indexer(index):
try:
observed.append(observations[o_key][0])
except KeyError:
observed.append(np.nan)
except KeyError:
observed = [np.nan] * size
values.append(observed)
return np.transpose(np.array(values))
def _parse_dimensions(dimensions):
arrays = []
names = []
for key in dimensions:
values = [v["name"] for v in key["values"]]
role = key.get("role", None)
if role in ("time", "TIME_PERIOD"):
values = [_fix_quarter_values(v) for v in values]
values = pd.DatetimeIndex(values)
arrays.append(values)
names.append(key["name"])
midx = pd.MultiIndex.from_product(arrays, names=names)
if len(arrays) == 1 and isinstance(midx, pd.MultiIndex):
# Fix for panda >= 0.21
midx = midx.levels[0]
return midx
|
[
"[email protected]"
] | |
42242438bea8875d7471ea2ddf09291f67a15799
|
30a34b3503decf1b4516039df3106cd152631819
|
/4AL17IS050_T_K_HARSHITH_PRASAD/19_05_2020/2.py
|
90236ef15cb59e0d27deb74598351d1745cafda7
|
[] |
no_license
|
alvas-education-foundation/ISE_3rd_Year_Coding_challenge
|
8ddb6c325bf6ab63e2f73d16573fa0b6e2484136
|
b4074cab4a47aad07ed0fa426eacccbfafdef7f8
|
refs/heads/master
| 2022-11-23T20:52:19.204693 | 2020-07-23T11:28:15 | 2020-07-23T11:28:15 | 265,195,514 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
# This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = float(num1) + float(num2)
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
|
[
"[email protected]"
] | |
190c0b7174e3ee074dcee7447dd6149444d96d20
|
9030481ef925278a174cbbf58c74bc5058e8d302
|
/contrib/testgen/base58.py
|
0b6e6e1ae339c3c25f894b09b621c4777509d655
|
[
"MIT"
] |
permissive
|
hideoussquid/aureus-13-gui
|
1b8f85f262cbc1970c3d8072b064956073bc4182
|
8865c958ba1680d4615128dabcc3cc4d47a24c51
|
refs/heads/master
| 2021-01-19T08:22:45.795165 | 2017-04-26T07:34:19 | 2017-04-26T07:34:19 | 87,622,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,999 |
py
|
# Copyright (c) 2012 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Aureus base58 encoding and decoding.
Based on https://aureustalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Aureus does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/aureus/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
[
"[email protected]"
] | |
58bb40f95b996bb5aaf4c9706c5271c0c5978cc2
|
25d8bac5635ac1cc3577a3593a4512e042ea7ecd
|
/scripts/asyncore-example-2.py
|
27a4738c22e98525faf3534d4f880e283ad582e0
|
[] |
no_license
|
mtslong/demo
|
2333fa571d6d9def7bdffc90f7bcb623b15e6e4b
|
a78b74e0eea7f84df489f5c70969b9b4797a4873
|
refs/heads/master
| 2020-05-18T18:28:48.237100 | 2013-11-11T16:10:11 | 2013-11-11T16:10:11 | 4,136,487 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
import asyncore
import socket, time
# reference time
TIME1970 = 2208988800L
class TimeChannel(asyncore.dispatcher):
def handle_write(self):
t = int(time.time()) + TIME1970
t = chr(t>>24&255) + chr(t>>16&255) + chr(t>>8&255) + chr(t&255)
self.send(t)
self.close()
class TimeServer(asyncore.dispatcher):
def __init__(self, port=37):
self.port = port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
print "listening on port", self.port
def handle_accept(self):
channel, addr = self.accept()
TimeChannel(channel)
server = TimeServer(8037)
asyncore.loop()
## log: adding channel <TimeServer at 8cb940>
## listening on port 8037
## log: adding channel <TimeChannel at 8b2fd0>
## log: closing channel 52:<TimeChannel connected at 8b2fd0>
|
[
"[email protected]"
] | |
deece369baf689aed3e350790563652c99e1df4c
|
ca0d710ed0469beb7f87ae53f5efdef7bac19a27
|
/MainView/migrations/0001_initial.py
|
c421c7915ab1a3ced242749c9b05288a7231a3c2
|
[
"MIT"
] |
permissive
|
CiganOliviu/wedding_invitation
|
5d441d786f742d6a4baf5ff418370c0cfbb1b81e
|
8b243b287b6577b4f5f899e33ade1fec651152f0
|
refs/heads/main
| 2023-03-03T08:12:36.345173 | 2021-02-08T15:37:04 | 2021-02-08T15:37:04 | 333,568,503 | 0 | 0 |
MIT
| 2021-02-08T15:37:05 | 2021-01-27T21:43:34 | null |
UTF-8
|
Python
| false | false | 646 |
py
|
# Generated by Django 3.0.8 on 2020-08-10 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConfirmAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('submitted', models.BooleanField(default=True)),
('answer_sent', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
86a20d0a802a3b77e91c16b62fb4c5702450b991
|
dc69872f21492d34d7da6eee9f0d03f7c09a8a8d
|
/libraries/edge/opensearch/granuleisoresponse.py
|
fd3ed16eb03bd91778c8ff34354a963de13a58c8
|
[
"Apache-2.0"
] |
permissive
|
isabella232/incubator-sdap-edge
|
125e9ba8cb1738d8407222f9d21f5452fc5fa840
|
c725dad1098096048faed9a42a56f3cfc5c25bc5
|
refs/heads/master
| 2022-03-19T18:49:03.752184 | 2019-12-02T23:40:12 | 2019-12-02T23:40:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,127 |
py
|
import datetime
from edge.opensearch.isoresponsebysolr import IsoResponseBySolr
class GranuleIsoResponse(IsoResponseBySolr):
def __init__(self, linkToGranule):
super(GranuleIsoResponse, self).__init__()
self.linkToGranule = linkToGranule.split(',')
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
link = self._getLinkToGranule(doc)
if link is not None:
doc['link'] = link
def _getLinkToGranule(self, doc):
link = None
if 'GranuleReference-Type' in doc and len(self.linkToGranule) > 0:
granuleRefDict = dict(list(zip(doc['GranuleReference-Type'], list(zip(doc['GranuleReference-Path'], doc['GranuleReference-Status'])))))
for type in self.linkToGranule:
# check if reference type exists
if type in granuleRefDict:
# check if reference is online
if granuleRefDict[type][1] == 'ONLINE':
link = granuleRefDict[type][0]
break
return link
|
[
"[email protected]"
] | |
22cce56ad1cf624ac9db09d203ea57c2bd8a72fe
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/sprint/SquaresOfSortedArray.py
|
a58ff6bd16baa33b009ff18fbabf44af40766e9e
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344 | 2020-01-13T02:29:02 | 2020-01-13T02:29:02 | 233,494,907 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
class Solution:
def sortedSquares(self, A):
size = len(A)
squares = [0] * size
for i in range(size):
squares[i] = A[i] * A[i]
copy = [0] * size
begin = 0
end = size - 1
i = size - 1
while begin <= end:
if squares[begin] > squares[end]:
copy[i] = squares[begin]
begin += 1
else:
copy[i] = squares[end]
end -= 1
i -= 1
return copy
if __name__ == '__main__':
s = Solution()
ans = s.sortedSquares([-3,-3,-2,1])
print(ans)
|
[
"[email protected]"
] | |
81286eab7404c79ae264329c873fd324031b3ce5
|
b7054c7dc39eeb79aa4aecb77a8de222400b19a7
|
/object.py
|
deee2a4715df5ac355f73bac61921bfff028351c
|
[] |
no_license
|
csuxh/python_fullstack
|
89027133c7f9585931455a6a85a24faf41792379
|
f78571976b3bef104309e95304892fdb89739d9e
|
refs/heads/master
| 2023-05-11T09:36:40.482788 | 2019-06-12T14:21:26 | 2019-06-12T14:21:26 | 145,090,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 988 |
py
|
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
#!@Auther : jack.xia
#!@Time : 2018/5/29 21:56
#!@File : object.py
class Stuf(object):
count = 0
__slots__ = ('name', 'id', 'position')
def __init__(self, name, id, position):
self.__name = name
self.__id = id
self.__position = position
def print_obj(self):
print('name: %s ;id: %d ;position %s ' %(self.__name, self.__id, self.__position))
class Account(Stuf):
pass
class IT(Stuf):
pass
if Stuf.count != 0:
print('测试失败!')
else:
bart = Stuf('Bart', 12, '2-4')
if Stuf.count != 1:
print('测试失败!')
Stuf.count +=1
print('%d' %(Stuf.count + 1) )
else:
lisa = Stuf('lisa', 11, '2-5')
if Stuf.count != 2:
print('测试失败!')
else:
print('Stuf:', Stuf.count)
print('测试通过!')
#stu1 = Stuf('jack', 13, '1-2')
#stu1.print_obj()
#print(stu1.id)
#print(stu1.__name)
|
[
"[email protected]"
] | |
08f4aced36fe56bcec48deaa99f0e5ad628d5792
|
b978cf7f47c5cd6295f3c0c104752d3e1e9d89d6
|
/test.py
|
f88b6b9a5b2b21a543c221161f595e2588fd53b5
|
[] |
no_license
|
sepidmnorozy/backup-crawler
|
1e4cd62d5a48b6e3bf974f89d1d513765e5d9c5b
|
73beddd2febd0dec3a0d1f5706557de073035a06
|
refs/heads/master
| 2022-11-18T19:56:43.507394 | 2020-07-22T13:11:53 | 2020-07-22T13:11:53 | 281,674,079 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from pymongo import MongoClient
from rss import rss_reader
import json
if rss_reader('https://www.khabaronline.ir/rss') == 'Success':
with open("links.json", 'r') as f:
urls = json.load(f)
else:
urls = []
client = MongoClient()
db = client['newsdb_week']
articles = db.weekarticles
start_urls = []
for url in urls:
if articles.find_one({"url": url}) is None:
start_urls.append(url)
print(start_urls)
print(len(start_urls))
|
[
"[email protected]"
] | |
e286247caef6608e64d3f83668b0e57d5c35c469
|
07e6fc323f657d1fbfc24f861a278ab57338b80a
|
/python/test_chem_reaction.py
|
a45fb01f6793461a249921c48059b569c7d781b2
|
[
"MIT"
] |
permissive
|
ProkopHapala/SimpleSimulationEngine
|
99cf2532501698ee8a03b2e40d1e4bedd9a12609
|
47543f24f106419697e82771289172d7773c7810
|
refs/heads/master
| 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 462 |
py
|
#!/usr/bin/python
import re
import numpy as np
import sys
from pySimE import chemistry as ch
#print ch.str2composition( sys.argv[1] )
#sides = ch.parseReaction( 'Fe+O2=Fe2O3' )
#sides = ch.parseReaction( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print sides
#print ch.reaction2string( sides )
#print ch.balanceReactionString( 'Fe+O2=Fe2O3' )
print ch.balanceReactionString( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print atomicBalance( reaction[0], reaction[1] )
|
[
"[email protected]"
] | |
fb4d6144389ec8eb93a016186bb5908c2683cdc8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_clattering.py
|
3893e7f6289447dca25d947171005c4f61ce3729
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
#calss header
class _CLATTERING():
def __init__(self,):
self.name = "CLATTERING"
self.definitions = clatter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['clatter']
|
[
"[email protected]"
] | |
a0c529fe9ac1114d4ea620a3a09ab644868c12c2
|
7c59bbd4ff413a95dc9d25fbfccd11c6db60202a
|
/python_stack/full_stack_django/test_orm/apps/test_orm_app/migrations/0001_initial.py
|
ff84e3ca46db76c12c5baaeb018a42283bcbe193
|
[] |
no_license
|
soikatesc/DojoAssignments
|
9a185a1164e42a985aea5e49d0ee270fd476d42a
|
c5c84bc9bd4aedd0fe6aa26bf75793e284edb248
|
refs/heads/master
| 2021-01-23T04:34:19.617679 | 2017-05-16T03:52:58 | 2017-05-16T03:52:58 | 86,211,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,310 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-19 00:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('blog', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_orm_app.Blog')),
],
),
]
|
[
"[email protected]"
] | |
b6cd32dd7c58e44b484925d0981c527b8eb6d61f
|
ddd09683d9cbd681db5dae4e2d036d28bd4d24c1
|
/PA3/BAL3.py
|
f82978400cd729be26ca286631abcea6caa2356a
|
[] |
no_license
|
nivedn3/DL4CV-EE6132-
|
41f9cd877a4c43db0a2f511a57df8b624fbc0a07
|
2cd97c7d2170a8e4fe36b6ccc8443c009e3d003a
|
refs/heads/master
| 2021-01-20T05:41:37.019460 | 2017-11-22T10:17:16 | 2017-11-22T10:17:16 | 101,465,640 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,240 |
py
|
import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
def data(number,size):
a = []
b = []
out = []
for i in range(number):
a_in = np.random.choice([0,1],size)
a_in = a_in.tolist()
#a_in = [1,0,0,0,0]
b_in = np.random.choice([0,1],size)
b_in = b_in.tolist()
#b_in = [1,0,0,0,0]
a_str = ','.join(str(x) for x in a_in).replace(',','')
b_str = ','.join(str(x) for x in b_in).replace(',','')
c = bin(int(a_str,2) + int(b_str,2)).split('b')[1]
c = [int(i) for i in list(c)]
c_out = np.array(c)
if len(c_out) == size:
c_out = np.insert(c_out,0,0)
if len(c_out) < size:
while(len(c_out) != size+1):
c_out = np.insert(c_out,0,0)
test = []
for j in range(len(a_in)):
test.append(a_in[j])
test.append(b_in[j])
a.append(test)
#b.append(b_in)
out.append(c_out)
return a,out
size = 3
hs = 5
x = tf.placeholder(tf.float32,shape = [None,size,2])
y = tf.placeholder(tf.float32,shape = [None,size+1])
w = tf.Variable(tf.random_normal([hs,size+1]))
b = tf.Variable(tf.random_normal([size+1]))
rnn_inp = tf.unstack(x,size,1)
lstm = tf.contrib.rnn.BasicRNNCell(hs)
outputs, states = tf.contrib.rnn.static_rnn(lstm, rnn_inp, dtype=tf.float32)
logits = tf.sigmoid(tf.matmul(outputs[-1], w) + b)
logitst = tf.add(logits,tf.scalar_mul(-0.5,tf.ones_like(logits)))
logitst = tf.nn.relu(logits)
logitst = tf.scalar_mul(1000000,logits)
logitst = tf.clip_by_value(logits,0,1)
logitsc = tf.cast(logitst,tf.int32)
yc = tf.cast(y,tf.int32)
with tf.name_scope("cross_entropy"):
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
cross_entropy = tf.losses.mean_squared_error(labels = y, predictions = logits)
tf.summary.scalar('cross entropy',cross_entropy)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(logitsc,yc)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar('accuracy',accuracy)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("/home/psycholearner/projects//DL4CV-EE6132-/PA3/2035")
writer.add_graph(sess.graph)
writer2 = tf.summary.FileWriter("/home/psycholearner/projects/DL4CV-EE6132-/PA3/20351")
writer2.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(20000):
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
#batch_y.astype(float)
if i % 25 == 0:
s = sess.run(merged_summary,feed_dict = {x: batch_x,y: batch_y})
writer.add_summary(s,i)
at,batch_yt = data(500,size)
batch_xt = np.array(at)
batch_xt = batch_xt.reshape(500,size,2)
batch_xt = [j[::-1] for j in batch_xt]
batch_xt = np.array(batch_xt)
batch_xt.astype(float)
batch_yt = np.array(batch_yt)
k = sess.run(merged_summary,feed_dict = {x: batch_xt,y: batch_yt})
writer2.add_summary(k,i)
#train_accuracy = sess.run(accuracy.eval(feed_dict={x: batch[0], y: batch[1]}))
#[train_accuracy] = sess.run([cross_entropy],feed_dict = {x: batch_x, y:batch_y})
#[test] = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#logits = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#print('step %d, training accuracy %g %g' % (i, train_accuracy,test))
#[test_acc] = sess.run([test_accuracy],feed_dict = {x: mnist.test.images, y:mnist.test.labels})
#print('step %d, test accuracy %g' % (i, test_acc))
#saver.restore(sess, "/home/psycholearner/projects//DL4CV-EE6132-/PA2/model.ckpt")
sess.run(train_step,feed_dict = {x:batch_x,y:batch_y})
'''
test_data = mnist.test.images[:128].reshape((-1, 28, 28))
test_label = mnist.test.labels[:128]
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: test_data, y: test_label}))
'''
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: batch_x, y: batch_y}))
|
[
"[email protected]"
] | |
f0921f29f3f682945a8f671213dc391d565db088
|
9d41570295cc05b66fd52584a90fe87f29155943
|
/src/crawler/delay.py
|
649fb6282c26a77936487a5bcd18eeda56ff6aa7
|
[
"MIT"
] |
permissive
|
diegojromerolopez/relwrac
|
ed56feeb2a5e455e0fa58f6bc130445e5a0831bd
|
23ee278ab4019b98269419c53feed2194f079c25
|
refs/heads/master
| 2022-12-11T08:06:19.888698 | 2019-11-16T12:35:34 | 2019-11-16T12:35:34 | 219,372,323 | 0 | 0 |
MIT
| 2022-12-08T06:49:05 | 2019-11-03T22:09:35 |
Python
|
UTF-8
|
Python
| false | false | 294 |
py
|
import random
class Delay(object):
@classmethod
def none(cls):
return None
@classmethod
def uniform(cls, lower_bound: float, upper_bound: float):
def uniform_delay_():
return random.uniform(lower_bound, upper_bound)
return uniform_delay_
|
[
"[email protected]"
] | |
5f1c2a99593a7553184a6e88dacd5cfddfa94dc2
|
11286e7989264134a8a8d610e0f609e6fbff9140
|
/ch06/ch06_6.py
|
611bb36abeda2b0457a21b95c8675ec3d9cc42ed
|
[] |
no_license
|
p2c2e/machine_learning_with_python_cookbook
|
04eeed2e00e0a3e9c0681d4b2f4125aa85485a1d
|
b176323a02f5b5722e312a579ad764a0276ec9c6
|
refs/heads/main
| 2023-01-30T06:54:34.138786 | 2020-12-13T05:02:07 | 2020-12-13T05:02:07 | 320,987,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
# Load libraries
import unicodedata
import sys
# Create text
text_data = ['Hi!!!! I. Love. This. Song....',
'10000% Agree!!!! #LoveIT',
'Right?!?!']
# Create a dictionary of punctuation characters
punctuation = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# For each string, remove any punctuation characters
[string.translate(punctuation) for string in text_data]
|
[
"[email protected]"
] | |
2b59d2bc871b13882aa71629e364e5ee5cde3a00
|
186736f265fa7954e95198955546305ab1b9b981
|
/notesApi/settings.py
|
d3fd465d97e808c8f69bde9fd61320c402413ffb
|
[] |
no_license
|
nova-sangeeth/notes-api
|
6449669870dfb69a72e1aad71c8859ca9de8bfbb
|
d5d15a4df615b0b276ccf8f49efc9e21eb177b65
|
refs/heads/master
| 2022-12-22T11:38:03.065884 | 2020-09-23T19:58:14 | 2020-09-23T19:58:14 | 298,022,798 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,607 |
py
|
"""
Django settings for notesApi project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "v1jk=4%^w9@)42-xumnuc3ho+7!&ug#q3*^y)x^@rlu#-96o*d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# crispy forms
"crispy_forms",
# all auth apps
"django.contrib.sites",
"allauth",
"allauth.account",
"allauth.socialaccount",
# apps
"rest_framework",
"api_notes",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "notesApi.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "notesApi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
ACCOUNT_EMAIL_VERIFICATION = "required"
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
ACCOUNT_EMAIL_REQUIRED = False
|
[
"[email protected]"
] | |
4c92871a9b092599b369eba37b5e69ca438f451d
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week4/Practice/Trace1.py
|
6db80027484d73a47f843382e033603034f1470c
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 267 |
py
|
def onesDigit(n):
return n%10
def ct1(L):
for i in range(len(L)):
L[i] += sum(L) + max(L)
# The function onesDigit is called on each element before
# making comparison.
return sorted(L, key=onesDigit)
a = [2,1,0]
print(ct1(a))
print(a)
|
[
"[email protected]"
] | |
67c8f6e68f42cf14fa5dda19c602fbd7976c47fc
|
b61efe2686feb44c5b0d2fb3094dd2ea94e6ca93
|
/src/control_decision_4.py
|
be6dc49f088a3f399c8bf5df9b0a6c7de0b509ca
|
[] |
no_license
|
idrissahil/bat_wifi_exploration
|
888f0f7243cc4bedeba6fe8d702762e6e2ad5da9
|
5a1bc74c1b35360d21d01e5e2a721b38fb380ac8
|
refs/heads/master
| 2020-05-31T16:38:49.118742 | 2019-06-29T14:03:28 | 2019-06-29T14:03:28 | 190,386,321 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,239 |
py
|
#! /usr/bin/env python
import rospy
import math
from sensor_msgs.msg import BatteryState
from geometry_msgs.msg import Twist, PoseArray, Pose, PoseStamped
rospy.init_node('control_decision_drone')
control_decision_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=1)
state=1
curr_pos = [0,0,0]
rrt_list=[]
index=0
def callback_gps(gps):
global curr_pos
global rrt_list
global state
global index
curr_pos[0] = gps.pose.position.x
curr_pos[1] = gps.pose.position.y
curr_pos[2] = gps.pose.position.z
if state==1:
print(state)
#curr_pos[0]=gps.pose.position.x
#curr_pos[1]=gps.pose.position.y
#curr_pos[2]=gps.pose.position.z
if len(rrt_list)>1:
state=2
print(state)
dist_point = math.sqrt(math.pow(rrt_list[index].position.x - curr_pos[0], 2)+math.pow(rrt_list[index].position.y - curr_pos[1], 2)+math.pow(rrt_list[index].position.z - curr_pos[2], 2))
if dist_point<0.3:
index=index+1
if index==len(rrt_list):
index=index-1
curr_position=PoseStamped()
#hold_position.pose.position.x= 0
#hold_position.pose.position.y = 14
#hold_position.pose.position.z= 1
curr_position.pose.position.x= rrt_list[index].position.x
curr_position.pose.position.y= rrt_list[index].position.y
curr_position.pose.position.z= rrt_list[index].position.z
curr_position.header.frame_id = "map"
control_decision_pub.publish(curr_position)
def callback_battery(rrt):
global state
global curr_pos
global rrt_list
rrt_list=rrt.poses
def callback_exploration(explore):
global state
global exploration_point_x
exploration_point_x = explore.pose.position.x
print(state)
if state ==1:
control_decision_pub.publish(explore)
def main():
exploration_sub = rospy.Subscriber('/mavros/setpoint_position/local1', PoseStamped, callback_exploration)
battery_sub = rospy.Subscriber('visual_marker_rrt', PoseArray, callback_battery)
gps_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback_gps)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
59944bb8fa971396a0f7e49931ba6f9bf8ed1091
|
4b29c3e3c8a2cad5071a3fb2ea674253c6f0ef21
|
/pycharm/digiin/case/TestLogin.py
|
70e3880684b38a0a5d5a1bb7b50cd59768931663
|
[] |
no_license
|
yz9527-1/1YZ
|
a0303b00fd1c7f782b7e4219c52f9589dd3b27b7
|
5f843531d413202f4f4e48ed0c3d510db21f4396
|
refs/heads/master
| 2022-11-30T23:50:56.682852 | 2020-08-10T02:11:13 | 2020-08-10T02:11:13 | 286,354,211 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,586 |
py
|
#coding=utf-8
import ddt,data
from common.ExcelUtil import ExcelUtil
import time
import unittest
from selenium import webdriver
def self(args):
pass
class Case(object):
def __init__(self):
pass
def get_case(self):
"""
获取数据
得到有用的数据,并且使数据以邮箱地址、密码、预期结果定位、预期结果的顺序返回
:return:
"""
#获取Excel中的文件数据
sheet='Login'
file=ExcelUtil(sheet_name=sheet)
data=file.get_data()
#得到所需要数据的索引,然后根据索引获取相应顺序的数据
email_index=data[0].index("邮箱地址")
password_index=data[1].index("密码")
expected_element_index=data[2].index("预期结果定位")
expected_index=data[3].index("预期结果")
data_length=data.__len__()
all_cass=[]
#去除header行,和其他无用的数据
for i in range(1,data_length):
case=[]
case.append(data[i][email_index])
case.append(data[i][password_index])
case.append(data[i][expected_element_index])
case.append(data[i][expected_index])
all_cass.append(case)
return all_cass
class Login(object):
def __init__(self,driver):
self.driver=driver
def login(self,email,password):
"""登录步骤"""
#driver=webdriver.Chrome()
#self.driver=driver
#邮箱地址、密码、点击登录按钮操作
time.sleep(1)
if email!=None:
email_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input')
email_element.send_keys(email)
time.sleep(1)
if password!=None:
password_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input')
password_element.send_keys(password)
time.sleep(1)
login_btn=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input')
login_btn.click()
def login_assert(self,assert_type,assert_message):
"""登录断言"""
time.sleep(1)
if assert_type=='email error':
email_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input').text
assert email_message==assert_message
elif assert_type=='password error':
password_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input').text
assert password_message==assert_message
elif assert_type=='login sucess'or assert_type=='login fail':
login_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input').text
assert login_message==assert_message
else:
print("输入的断言类型不正确")
@ddt
class TextLogin(unittest.TestCase):
"""测试登录"""
def setUp(self):
self.driver=webdriver.Chrome()
url="http://192.168.0.30:18069"
self.driver.implicitly_wait(20)
self.driver.maximize_window()
self.driver.get(url=url)
def tearDown(self):
self.driver.quit()
case=Case().get_case()
@data(*case)
@unpack
def test_login(self,password,assert_type,assert_message):
login=Login(driver=self.driver)
login.login(email=email,password=password)
login.login_assert(assert_type=assert_type,assert_message=assert_message)
if __name__=='__main__':
unittest.main
|
[
"[email protected]"
] | |
2d24c2b1849fbb578426985672e634ca4e13e282
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/baiduads/keyword/api/__init__.py
|
d86d7640ef2ab230105e5b576757bc5d81a011fe
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 |
Apache-2.0
| 2023-06-02T05:19:40 | 2022-01-11T07:23:17 |
Python
|
UTF-8
|
Python
| false | false | 151 |
py
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from baiduads.keyword.api.keyword_service import KeywordService
|
[
"[email protected]"
] | |
6bb7357e4c3c78a71da4398592fc78ff38a7ab5c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_09_14_14_36_41_642784.py
|
986bff292e3d397ff9a597fd31a1ee3912e49175
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
import math
def calcula_gaussiana (x,mu,sigma) :
f1 = 1/(sigma*math.sqrt(2*math.pi))
f2 = math.exp((-0.5*((x-mu)/(sigma)**2))
y = f1*f2
return y
|
[
"[email protected]"
] | |
91b306ecb2af69f0d6d781d57251266678f159f2
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/models/research/syntaxnet/dragnn/python/file_diff_test.py
|
9e9f1daa40a64ff9595724e30dbc95591ae299c2
|
[
"Apache-2.0"
] |
permissive
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 |
Python
|
UTF-8
|
Python
| false | false | 1,631 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diff test that compares two files are identical."""
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('actual_file', None, 'File to test.')
flags.DEFINE_string('expected_file', None, 'File with expected contents.')
class DiffTest(tf.test.TestCase):
def testEqualFiles(self):
content_actual = None
content_expected = None
try:
with open(FLAGS.actual_file) as actual:
content_actual = actual.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.actual_file, e.strerror))
try:
with open(FLAGS.expected_file) as expected:
content_expected = expected.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.expected_file, e.strerror))
self.assertTrue(content_actual == content_expected)
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
22e70becf6b691016982f2b828b13d8eeaf45564
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02571/s663642129.py
|
60a84cc30f58c36b037db16bb95f49473b02d187
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 314 |
py
|
def main():
s = input()
t = input()
min = int(1e9)
for i in range(len(s)-len(t)+1):
cnt = 0
for j in range(len(t)):
if s[i+j] != t[j]:
cnt += 1
if min > cnt:
min = cnt
print(min)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
200e9917ea1a71489173315c12ac6c736aac3a7c
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/PyBox/pybox2d/library/Box2D/examples/chain.py
|
c1f19e55dbac3e2fa63532f8b24c48d5d1e22b19
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e79af0d06dbe1710b8ba767355096adc26f63f6435e754284e2a3caa01b35291
size 2366
|
[
"[email protected]"
] | |
cf37aac9d227dfbd4c7430df7abe6facb7d78387
|
9bb01fa882e713aa59345051fec07f4e3d3478b0
|
/tests/cysparse_/sparse/memory/test_copy_CSCSparseMatrix_INT32_t_COMPLEX64_t.py
|
647b1079524c4d905c0e53d370b23d6cd9d3eca0
|
[] |
no_license
|
syarra/cysparse
|
f1169c496b54d61761fdecbde716328fd0fb131b
|
7654f7267ab139d0564d3aa3b21c75b364bcfe72
|
refs/heads/master
| 2020-05-25T16:15:38.160443 | 2017-03-14T21:17:39 | 2017-03-14T21:17:39 | 84,944,993 | 0 | 0 | null | 2017-03-14T12:11:48 | 2017-03-14T12:11:48 | null |
UTF-8
|
Python
| false | false | 4,646 |
py
|
#!/usr/bin/env python
"""
This file tests ``copy()`` for all sparse-likes objects.
"""
import unittest
from cysparse.sparse.ll_mat import *
from cysparse.common_types.cysparse_types import *
########################################################################################################################
# Tests
########################################################################################################################
#######################################################################
# Case: store_symmetry == False, Store_zero==False
#######################################################################
class CySparseCopyNoSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==False
#######################################################################
class CySparseCopyWithSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == False, Store_zero==True
#######################################################################
class CySparseCopyNoSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==True
#######################################################################
class CySparseCopyWithSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
4e554d1fb9a88ed2d04b9397feb311493507f223
|
289da5146b8991942ba22eefe948289ee024d3ff
|
/sheng/tutorial/L3函数/8 global.py
|
380ea400f5deb82c17c96c689facbc7d471efff3
|
[] |
no_license
|
a1424186319/tutorial
|
263585961ab40e7a9a55405263d80057a88298d4
|
909bfc9f850118af7892a7ba4b0f7e3d0798db8a
|
refs/heads/master
| 2022-12-09T01:05:36.063099 | 2019-02-18T12:12:52 | 2019-02-18T12:12:52 | 166,967,437 | 0 | 0 | null | 2021-06-01T23:20:20 | 2019-01-22T09:38:51 |
Python
|
UTF-8
|
Python
| false | false | 500 |
py
|
#
#(老写法 a是全局变量) 从1 加到 100的和
# a = 0
# for i in range(1,101):
# a = a + i
# print(a)
## global(全局) 显示声明变量为全局变量
# total = 0
# def add1(n):
# global total
# total = total + 1
# add1()
# add1()
# add1()
# print(total)
## nonlocal(局部的)https://www.cnblogs.com/saintdingspage/p/7788958.html
def outer():
num = 10
def inner():
nonlocal num
num = 100
print(num)
inner()
print(num)
outer()
|
[
"[email protected]"
] | |
86dce18c7b5d76d01f32df22306412f7ca2feb73
|
d7d19d6918029de88bcf060cea23d5b4a1f7efb1
|
/xiab/apps/subjects/models.py
|
85c54cc05e21150cfe80e2ddb9d412d7c622452e
|
[] |
no_license
|
petercollingridge/xiab
|
8abe2b2b7124eeb0cfa06d2f21ce858a4ffbd975
|
ae84d3d228f3fe9392d0fd894652e290b219b1d2
|
refs/heads/master
| 2020-03-26T04:25:28.163381 | 2019-09-29T16:20:25 | 2019-09-29T16:20:25 | 144,503,055 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class SubjectPage(Page):
summary = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('summary'),
]
def get_context(self, request):
# Update context to include only published posts
context = super().get_context(request)
context['children'] = self.get_children().live()
return context
|
[
"[email protected]"
] | |
8aa017b49485a93529f5842ebd6c1605b6019aba
|
e63c45db069ea20b41fb850c5940e6f99db94914
|
/TranskribusDU/tasks/TablePrototypes/DU_Table_Row.py
|
c69734cdcc09f2b14bb86df4a56c86e3b895773d
|
[
"BSD-3-Clause"
] |
permissive
|
Transkribus/TranskribusDU
|
669607cc32af98efe7380831d15b087b3fc326c9
|
9f2fed81672dc222ca52ee4329eac3126b500d21
|
refs/heads/master
| 2021-12-29T10:14:49.153914 | 2021-12-22T10:53:10 | 2021-12-22T10:53:10 | 72,862,342 | 24 | 6 |
BSD-3-Clause
| 2019-07-22T08:49:02 | 2016-11-04T15:52:04 |
Python
|
UTF-8
|
Python
| false | false | 5,449 |
py
|
# -*- coding: utf-8 -*-
"""
*** Same as its parent apart that text baselines are reflected as a LineString (instead of its centroid)
DU task for ABP Table:
doing jointly row BIO and near horizontal cuts SIO
block2line edges do not cross another block.
The cut are based on baselines of text blocks, with some positive or negative inclination.
- the labels of cuts are SIO
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import math
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
TranskribusDU_version
from common.trace import traceln
from tasks import _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_Table.DU_ABPTableSkewed import GraphSkewedCut, main
from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import SkewedCutAnnotator
from tasks.DU_Table.DU_ABPTableSkewed_txtBIO_sepSIO_line import DU_ABPTableSkewedRowCutLine
from tasks.DU_Table.DU_ABPTableSkewed_txtBIOH_sepSIO_line import DU_ABPTableSkewedRowCutLine_BIOH
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
# parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iLineVisibility', action="store",
type=int, default=GraphSkewedCut.iLineVisibility,
help="seeline2line: how far in pixel can a line see another cut line?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store",
type=int, default=GraphSkewedCut.iBlockVisibility,
help="seeblock2line: how far in pixel can a block see a cut line?")
parser.add_option("--height", dest="fCutHeight", default=GraphSkewedCut.fCutHeight
, action="store", type=float, help="Minimal height of a cut")
# parser.add_option("--cut-above", dest='bCutAbove', action="store_true", default=False
# ,help="Each object defines one or several cuts above it (instead of below as by default)")
parser.add_option("--angle", dest='lsAngle'
, action="store", type="string", default="-1,0,+1"
,help="Allowed cutting angles, in degree, comma-separated")
parser.add_option("--graph", dest='bGraph', action="store_true", help="Store the graph in the XML for displaying it")
parser.add_option("--bioh", "--BIOH", dest='bBIOH', action="store_true", help="Text are categorised along BIOH instead of BIO")
parser.add_option("--text", "--txt", dest='bTxt', action="store_true", help="Use textual features.")
# ---
#parse the command line
(options, args) = parser.parse_args()
options.bCutAbove = True # Forcing this!
if options.bBIOH:
DU_CLASS = DU_ABPTableSkewedRowCutLine_BIOH
else:
DU_CLASS = DU_ABPTableSkewedRowCutLine
if options.bGraph:
import os.path
# hack
DU_CLASS.bCutAbove = options.bCutAbove
traceln("\t%s.bCutAbove=" % DU_CLASS.__name__, DU_CLASS.bCutAbove)
DU_CLASS.lRadAngle = [math.radians(v) for v in [float(s) for s in options.lsAngle.split(",")]]
traceln("\t%s.lRadAngle=" % DU_CLASS.__name__, DU_CLASS.lRadAngle)
for sInputFilename in args:
sp, sf = os.path.split(sInputFilename)
sOutFilename = os.path.join(sp, "graph-" + sf)
doer = DU_CLASS("debug", "."
, iBlockVisibility=options.iBlockVisibility
, iLineVisibility=options.iLineVisibility
, fCutHeight=options.fCutHeight
, bCutAbove=options.bCutAbove
, lRadAngle=[math.radians(float(s)) for s in options.lsAngle.split(",")]
, bTxt=options.bTxt)
o = doer.cGraphClass()
o.parseDocFile(sInputFilename, 9)
o.addEdgeToDoc()
print('Graph edges added to %s'%sOutFilename)
o.doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True)
SkewedCutAnnotator.gtStatReport()
exit(0)
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(DU_CLASS, sModelDir, sModelName, options)
|
[
"[email protected]"
] | |
5fb152a03b97239720932a800dcb93ed2841278e
|
fd6fab64e64031b319b7dc88b66ad960d30fdfc7
|
/assignment02_ModelQueryProcess/run_assignment.py
|
12b99e32a4e8faed2c013945d46efacf258c313c
|
[] |
no_license
|
mkadhirvel/DSC650
|
297fa63da3668f91d9ce17c6195522dc21d8b5f2
|
75556e3a11a3b5801cad7df124dcc19df219934d
|
refs/heads/master
| 2023-03-17T12:19:34.332707 | 2021-02-11T00:29:11 | 2021-02-11T00:29:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
"""
Author: Alan Danque
Date: 20201205
Class: DSC 650
Exercise: Week 2 Assignment - Run all assignments
"""
import os
os.system('python ./kvdb.py')
os.system('python ./documentdb.py')
os.system('python ./objectdb.py')
os.system('python ./rdbms.py')
|
[
"[email protected]"
] | |
a26ec63f56bad3f7991ace4eb345ea52f222d5e9
|
44032f82bcb767175cf86aeccee623eb6cfbd40e
|
/deploy/compose/gpu/__init__.py
|
2303c0b0cf1621e03ddbbda08853f070befb4247
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
veyselkoparal/DeepVideoAnalytics
|
3628d41f8e06547e177a7badd20b399bd7f9028a
|
013f7e1efcc11f9ed5762192a91589aa6b4df359
|
refs/heads/master
| 2020-03-16T04:22:46.603989 | 2018-05-07T06:55:47 | 2018-05-07T06:55:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,462 |
py
|
"""
Code in this file assumes that it is being run via dvactl and git repo root as current directory
"""
CONFIG = {
"deploy/gpu/docker-compose-2-gpus.yml": {"global_model_gpu_id": 0,
"global_model_memory_fraction": 0.1,
"workers":
[(0, 0.25, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(0, 0.5, "LAUNCH_BY_NAME_detector_coco", "coco"),
(1, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(1, 0.19, "LAUNCH_BY_NAME_detector_face", "face"),
(1, 0.15, "LAUNCH_BY_NAME_indexer_facenet", "facenet"),
(1, 0.15, "LAUNCH_BY_NAME_analyzer_tagger", "tagger")]
},
"deploy/gpu/docker-compose-4-gpus.yml": {"global_model_gpu_id": 2,
"global_model_memory_fraction": 0.29,
"workers":
[(0, 0.3, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.4, "LAUNCH_BY_NAME_analyzer_tagger", "tagger"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(1, 1.0, "LAUNCH_BY_NAME_detector_coco", "coco"),
(2, 0.7, "LAUNCH_BY_NAME_detector_face", "face"),
(3, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(3, 0.45, "LAUNCH_BY_NAME_indexer_facenet", "facenet")
]
},
}
SKELETON = """ version: '3'
services:
db:
image: postgres:9.6.6
container_name: dva-pg
volumes:
- dvapgdata:/var/lib/postgresql/data
env_file:
- ../../../custom.env
rabbit:
image: rabbitmq
container_name: dva-rmq
env_file:
- ../../../custom.env
volumes:
- dvarabbit:/var/lib/rabbitmq
redis:
image: bitnami/redis:latest
container_name: dva-redis
env_file:
- ../../../custom.env
volumes:
- dvaredis:/bitnami
webserver:
image: akshayubhat/dva-auto:gpu
container_name: webserver
env_file:
- ../../../custom.env
environment:
- LAUNCH_SERVER_NGINX=1
- LAUNCH_NOTEBOOK=1
command: bash -c "git reset --hard && git pull && sleep 10 && ./start_container.py"
ports:
- "127.0.0.1:8000:80"
- "127.0.0.1:8888:8888"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
non-gpu-workers:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- LAUNCH_BY_NAME_retriever_inception=1
- LAUNCH_BY_NAME_retriever_facenet=1
- LAUNCH_Q_qextract=1
- LAUNCH_Q_qstreamer=1
- LAUNCH_SCHEDULER=1
- LAUNCH_Q_GLOBAL_RETRIEVER=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
{gpu_workers}
global-model:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={global_model_gpu_id}
- GPU_MEMORY={global_model_memory_fraction}
- LAUNCH_Q_GLOBAL_MODEL=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
volumes:
dvapgdata:
dvadata:
dvarabbit:
dvaredis:
"""
BLOCK = """ {worker_name}:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={gpu_id}
- GPU_MEMORY={memory_fraction}
- {env_key}={env_value}
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media"""
def generate_multi_gpu_compose():
for fname in CONFIG:
blocks = []
worker_specs = CONFIG[fname]['workers']
for gpu_id, fraction, env_key, worker_name, in worker_specs:
blocks.append(
BLOCK.format(worker_name=worker_name, gpu_id=gpu_id, memory_fraction=fraction, env_key=env_key,
env_value=1))
with open(fname, 'w') as out:
out.write(SKELETON.format(gpu_workers="\n".join(blocks),
global_model_gpu_id=CONFIG[fname]['global_model_gpu_id'],
global_model_memory_fraction=CONFIG[fname]['global_model_memory_fraction']))
|
[
"[email protected]"
] | |
7266db340ad3c001b2476e0d9677e9d1a795cf48
|
46a5df524f1d96baf94f6eb0f6222f2b856235f3
|
/src/data/image/sliced_grid.py
|
7612a11c9ffd5b6b038a1658df956563308349f9
|
[
"MIT"
] |
permissive
|
PhilHarnish/forge
|
5dfbb0aa2afdb91e55d85187bd86fbeb9b6b2888
|
c544fb8b499e1e13793c94159f4c35bce187311e
|
refs/heads/master
| 2023-03-11T17:23:46.569359 | 2023-02-25T15:09:01 | 2023-02-25T15:09:01 | 1,818,598 | 2 | 0 |
MIT
| 2023-02-25T15:09:02 | 2011-05-29T19:36:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,215 |
py
|
import math
from typing import Iterable
import cv2
import numpy as np
from data.image import coloring, image, model
from puzzle.constraints.image import sliced_grid_constraints
from util.geometry import np2d
class SlicedGrid(model.LineSpecification):
_source: image.Image
_constraints: sliced_grid_constraints.SlicedGridConstraints
def __init__(
self,
source: image.Image,
constraints: sliced_grid_constraints) -> None:
self._source = source
self._constraints = constraints
def set_source(self, source: image.Image) -> None:
self._source = source
self._constraints.set_source(source)
def get_debug_data(self) -> np.ndarray:
data = cv2.cvtColor(self._source.get_debug_data(), cv2.COLOR_GRAY2RGB)
c = self._constraints.center
cv2.circle(data, c, 3, coloring.WHITE, thickness=3)
for (theta, distances, divisions), color in zip(
self._constraints.get_specs(),
coloring.colors(self._constraints.slices)):
for distance in distances:
x, y = np2d.move_from(c, theta, distance)
cv2.circle(data, (round(x), round(y)), 3, color, thickness=3)
return data
def __iter__(self) -> Iterable[model.Divisions]:
c = self._constraints.center
max_distance = sum(self._source.shape)
for theta, distances, divisions in self._constraints.get_specs():
endpoints = []
total_distance = 0
for distance in distances:
moved = np2d.move_from(c, theta, distance)
endpoints.append(moved)
total_distance += abs(distance)
start, end = endpoints
division_distance = math.copysign(
total_distance / divisions, -distances[0])
right_angle = theta + math.pi / 2
dx = round(math.cos(right_angle) * max_distance)
dy = round(math.sin(right_angle) * max_distance)
result = []
for i in range(0, divisions + 1): # n_divisions requires n+1 iterations.
x, y = np2d.move_from(start, theta, division_distance * i)
result.append((
theta,
(round(x - dx), round(y - dy)), (round(x + dx), round(y + dy)),
i / divisions))
yield result
def __len__(self) -> int:
return self._constraints.slices
|
[
"[email protected]"
] | |
f1979087cd1398a523b893f6bdb223fc4f3c142e
|
65585dce782bb50d92caa69be2431e094ac36a1f
|
/examples/recursive_dirtree_generator.py
|
50307af4a1c3021c3703469a8d1c6028f5d8ab66
|
[
"Apache-2.0"
] |
permissive
|
vishalbelsare/treelib
|
6e52f594cecb69210332b7092abcf1456be14666
|
12d7efd50829a5a18edaab01911b1e546bff2ede
|
refs/heads/master
| 2023-08-31T07:38:06.461212 | 2022-04-13T15:07:52 | 2022-04-13T15:07:52 | 153,905,842 | 0 | 0 |
NOASSERTION
| 2023-03-27T15:17:00 | 2018-10-20T12:59:18 |
Python
|
UTF-8
|
Python
| false | false | 1,691 |
py
|
#!/usr/bin/env python
"""
Example of treelib usage to generate recursive tree of directories.
It could be useful to implement Directory Tree data structure
2016 samuelsh
"""
import treelib
import random
import hashlib
from string import digits, letters
MAX_FILES_PER_DIR = 10
def get_random_string(length):
return ''.join(random.choice(digits + letters) for _ in range(length))
def build_recursive_tree(tree, base, depth, width):
"""
Args:
tree: Tree
base: Node
depth: int
width: int
Returns:
"""
if depth >= 0:
depth -= 1
for i in xrange(width):
directory = Directory()
tree.create_node("{0}".format(directory.name), "{0}".format(hashlib.md5(directory.name)),
parent=base.identifier, data=directory) # node identifier is md5 hash of it's name
dirs_nodes = tree.children(base.identifier)
for dir in dirs_nodes:
newbase = tree.get_node(dir.identifier)
build_recursive_tree(tree, newbase, depth, width)
else:
return
class Directory(object):
def __init__(self):
self._name = get_random_string(64)
self._files = [File() for _ in xrange(MAX_FILES_PER_DIR)] # Each directory contains 1000 files
@property
def name(self):
return self._name
@property
def files(self):
return self._files
class File(object):
def __init__(self):
self._name = get_random_string(64)
@property
def name(self):
return self._name
tree = treelib.Tree()
base = tree.create_node('Root', 'root')
build_recursive_tree(tree, base, 2, 10)
tree.show()
|
[
"[email protected]"
] | |
4425e109b0efe53b2e51a04bcddab969c531489c
|
d27bf22683710ff090642c05c1df2d13b18c2509
|
/allauth/openid/admin.py
|
0967c5c39ae1d4e1a60416bffb65e3f68ea3ecd1
|
[
"MIT"
] |
permissive
|
snswa/django-allauth
|
b8db554519111e5d022fb137d259e272db9998f4
|
0b58191f5d954d7f5a7c4e5bc8c33cf6fdf0c416
|
refs/heads/master
| 2021-01-18T10:29:31.434368 | 2010-10-21T18:24:56 | 2010-10-21T18:24:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
from django.contrib import admin
from models import OpenIDAccount
class OpenIDAccountAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
admin.site.register(OpenIDAccount, OpenIDAccountAdmin)
|
[
"[email protected]"
] | |
88199abd4462b61b8c1e468a0166393a1ea355c4
|
699cad5fee497cce94463decf1bf2b811e3fd244
|
/06프로그램의 흐름 제어하기/if.py
|
95d092e7f3d31f5adce1aa2a57ab88f03995c7b0
|
[] |
no_license
|
Jeonghwan-Yoo/brain_python3
|
91974019a29013abe8c9f9ed132c48b404259e2f
|
a22e870515e760aaa497cbc99305977cf2f01a3d
|
refs/heads/master
| 2020-07-27T00:02:29.604848 | 2019-09-16T13:16:09 | 2019-09-16T13:16:09 | 208,802,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
py
|
import sys #파이썬 프로그램을 종료하는 exit()을 사용하기 위해
print('수를 입력하세요 : ')
a=int(input())
if a==0:
print('0은 나눗셈에 이용할 수 없습니다.') #경고 메시지를 출력한 뒤
sys.exit(0) #프로그램을 종료시킵니다.
print('3 /', a, '=', 3/a)
|
[
"[email protected]"
] | |
42e9fe3ab57bd3c1e296f665413fc82fba5070e3
|
21e6a09131ac76d734102c829260c3b8e3a0094b
|
/solutions/21_textfsm/task_21_4.py
|
9986cf1ad1531aef03cb29f28f968dc09e18cec7
|
[] |
no_license
|
Egor-Ozhmegoff/Python-for-network-engineers
|
5fbe8f3a754263ab65c28093fed667684ae76ded
|
6b70f4f9df658698ea0d770a064ee0e12b4e4de2
|
refs/heads/master
| 2023-08-11T20:52:12.999495 | 2021-09-09T14:42:14 | 2021-09-09T14:42:14 | 306,354,285 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,879 |
py
|
# -*- coding: utf-8 -*-
"""
Задание 21.4
Создать функцию send_and_parse_show_command.
Параметры функции:
* device_dict - словарь с параметрами подключения к одному устройству
* command - команда, которую надо выполнить
* templates_path - путь к каталогу с шаблонами TextFSM
* index - имя индекс файла, значение по умолчанию "index"
Функция должна подключаться к одному устройству, отправлять команду show с помощью netmiko,
а затем парсить вывод команды с помощью TextFSM.
Функция должна возвращать список словарей с результатами обработки вывода команды (как в задании 21.1a):
* ключи - имена переменных в шаблоне TextFSM
* значения - части вывода, которые соответствуют переменным
Проверить работу функции на примере вывода команды sh ip int br и устройствах из devices.yaml.
"""
import os
from pprint import pprint
from netmiko import ConnectHandler
import yaml
def send_and_parse_show_command(device_dict, command, templates_path):
if "NET_TEXTFSM" not in os.environ:
os.environ["NET_TEXTFSM"] = templates_path
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command, use_textfsm=True)
return output
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
# Второй вариант без использования use_textfsm в netmiko
from task_21_3 import parse_command_dynamic
def send_and_parse_show_command(device_dict, command, templates_path, index="index"):
attributes = {"Command": command, "Vendor": device_dict["device_type"]}
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command)
parsed_data = parse_command_dynamic(
output, attributes, templ_path=templates_path, index_file=index
)
return parsed_data
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
|
[
"[email protected]"
] | |
aba8fcd3ea58d7fe66b3bbe8099f8f60d5f4097d
|
b64fcb9da80d12c52bd24a7a1b046ed9952b0026
|
/client_sdk_python/providers/eth_tester/main.py
|
68fdf1d3a68dcfcbb67e83434e4836cccf5581b6
|
[
"MIT"
] |
permissive
|
PlatONnetwork/client-sdk-python
|
e59f44a77690806c8763ed6db938ed8447d42417
|
94ad57bb34b5ee7bb314ac858071686382c55402
|
refs/heads/master
| 2022-07-09T08:49:07.312759 | 2021-12-24T08:15:46 | 2021-12-24T08:15:46 | 173,032,954 | 7 | 16 |
MIT
| 2022-08-31T02:19:42 | 2019-02-28T03:18:03 |
Python
|
UTF-8
|
Python
| false | false | 1,773 |
py
|
from client_sdk_python.providers import (
BaseProvider,
)
from .middleware import (
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
)
class EthereumTesterProvider(BaseProvider):
middlewares = [
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
]
ethereum_tester = None
api_endpoints = None
def __init__(self, ethereum_tester=None, api_endpoints=None):
if ethereum_tester is None:
# do not import eth_tester until runtime, it is not a default dependency
from eth_tester import EthereumTester
self.ethereum_tester = EthereumTester()
else:
self.ethereum_tester = ethereum_tester
if api_endpoints is None:
# do not import eth_tester derivatives until runtime, it is not a default dependency
from .defaults import API_ENDPOINTS
self.api_endpoints = API_ENDPOINTS
else:
self.api_endpoints = api_endpoints
def make_request(self, method, params):
namespace, _, endpoint = method.partition('_')
try:
delegator = self.api_endpoints[namespace][endpoint]
except KeyError:
return {
"error": "Unknown RPC Endpoint: {0}".format(method),
}
try:
response = delegator(self.ethereum_tester, params)
except NotImplementedError:
return {
"error": "RPC Endpoint has not been implemented: {0}".format(method),
}
else:
return {
'result': response,
}
def isConnected(self):
return True
|
[
"[email protected]"
] | |
0915102cfa0343f989eef246184cd916f8cc46c4
|
4bdbec7ad33b31c392b9d1f88ddf84e4b9230467
|
/cross_origin_test/cross_origin_test/wsgi.py
|
5bf61a3cc71d9dc0d96e87531d460711a5070d70
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
mohawkhq/django-cross-origin
|
4aa775b15612e505404a9eb6cfe24a568561d265
|
f73f5c9a49d4044c34e443153c071b6bb0acda31
|
refs/heads/master
| 2020-06-08T20:13:02.690261 | 2013-11-19T15:33:34 | 2013-11-19T15:33:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
"""
WSGI config for cross_origin_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cross_origin_test.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"[email protected]"
] | |
1c07e950336bf700663363367fa33ecf43c0f407
|
0cb1ff9d0be4387e33f1003ab5cc72bab0345e7a
|
/wildcard/dashboards/settings/password/tests.py
|
3372ec782591fc679b4e3a892d89731e3b8335cc
|
[
"Apache-2.0"
] |
permissive
|
kickstandproject/wildcard
|
65995fb0090c4cfcad34f8373cfc912199ecf5da
|
0ef2a15d8ac6b1d37db964d0baa7e40f9f771bc9
|
refs/heads/master
| 2020-05-17T00:41:09.908059 | 2015-01-27T20:25:33 | 2015-01-28T03:30:22 | 14,288,349 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,365 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
# TODO(mrunge): remove, when keystone v3 supports
# change_own_password, incl. password validation
kver = api.keystone.VERSIONS.active
if kver == 2:
INDEX_URL = reverse('horizon:settings:password:index')
class ChangePasswordTests(test.TestCase):
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd',).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData)
self.assertNoFormErrors(res)
def test_change_validation_passwords_not_matching(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
formData = {'method': 'PasswordForm',
'current_password': 'currpasswd',
'new_password': 'testpassword',
'confirm_password': 'doesnotmatch'}
res = self.client.post(INDEX_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password_shows_message_on_login_page(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd').AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData, follow=True)
info_msg = "Password changed. Please log in again to continue."
self.assertContains(res, info_msg)
def test_on_keystone_v3_disabled(self):
try:
reverse('horizon:settings:password:index')
except NoReverseMatch:
pass
|
[
"[email protected]"
] | |
ba497dd3afdf87eae4b1e1d9fa84bbe788335f77
|
385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28
|
/source/hall/src/hall/entity/hallfree.py
|
63e7e839d8986e8730bf43df1ef165e4c0acc70a
|
[] |
no_license
|
csirui/hall37
|
17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db
|
5c4eb4b2bf57bbbee4731470c830d8d81915d603
|
refs/heads/master
| 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,420 |
py
|
# -*- coding=utf-8 -*-
from sre_compile import isstring
from datetime import datetime
import freetime.util.log as ftlog
import poker.entity.events.tyeventbus as pkeventbus
from hall.entity import hallconf, hallpopwnd, datachangenotify
from hall.entity.hallconf import HALL_GAMEID
from hall.entity.hallusercond import UserConditionRegister
from poker.entity.biz.exceptions import TYBizConfException
from poker.entity.events.tyevent import EventConfigure, ItemCountChangeEvent
class HallFree(object):
def __init__(self):
self.freeItemId = None
self.iconRes = None
self.itemName = None # 前端图片上显示的字
self.states = []
def decodeFromDict(self, d):
self.freeItemId = d.get('freeItemId')
self.iconRes = d.get('iconRes')
self.itemName = d.get("itemName", "")
self.states = []
for state in d.get('states', []):
self.states.append(HallFreeState().decodeFromDict(state))
return self
class HallFreeState(object):
def __init__(self):
# str
self.desc = ''
# str
self.btnText = ''
# bool
self.hasMark = False
# int
self.enable = True
# bool
self.visible = True
# 条件
self.conditionList = None
# todotask
self.todotaskList = None
def decodeFromDict(self, d):
self.desc = d.get('desc', '')
self.btnText = d.get('btnText', '')
self.hasMark = d.get('hasMark', False)
self.enable = d.get('enable', True)
self.visible = d.get('visible', True)
self.conditionList = UserConditionRegister.decodeList(d.get('conditions', []))
self.todotaskList = []
for todotaskDict in d.get('todotasks', []):
self.todotaskList.append(hallpopwnd.decodeTodotaskFactoryByDict(todotaskDict))
return self
class HallFreeTemplate(object):
def __init__(self):
self.name = None
self.freeItems = None
def decodeFromDict(self, d, freeItemMap):
self.name = d.get('name')
if not isstring(self.name) or not self.name:
raise TYBizConfException(d, 'HallFreeTemplate.name must be not empty string')
self.freeItems = []
for itemId in d.get('freeItems', []):
if freeItemMap.has_key(itemId):
self.freeItems.append(freeItemMap[itemId])
return self
_inited = False
# key=promotionId, value=HallPromotion
_freeItemMap = {}
# key=templateName, value=HallPromoteTemplate
_templateMap = {}
def _reloadConf():
global _freeItemMap
global _templateMap
freeItemMap = {}
templateMap = {}
conf = hallconf.getFreeConf()
for freeDict in conf.get('freeItems', []):
freeItem = HallFree().decodeFromDict(freeDict)
if freeItem.freeItemId in freeItemMap:
raise TYBizConfException(freeDict, 'Duplicate freeId %s' % (freeItem.freeItemId))
freeItemMap[freeItem.freeItemId] = freeItem
if ftlog.is_debug():
ftlog.debug('hallfree._reloadConf freeIds=', freeItemMap.keys())
for templateDict in conf.get('templates', []):
template = HallFreeTemplate().decodeFromDict(templateDict, freeItemMap)
if template.name in templateMap:
raise TYBizConfException(templateDict, 'Duplicate templateName %s' % (template.name))
templateMap[template.name] = template
_freeItemMap = freeItemMap
_templateMap = templateMap
ftlog.debug('hallfree._reloadConf successed freeIds=', _freeItemMap.keys(),
'templateNames=', _templateMap.keys())
def _onConfChanged(event):
if _inited and event.isChanged('game:9999:free:tc'):
ftlog.debug('hallfree._onConfChanged')
_reloadConf()
def _onItemCountChanged(event):
if _inited:
ftlog.debug('hallfree._onItemCountChanged', event.userId)
datachangenotify.sendDataChangeNotify(HALL_GAMEID, event.userId, ['free', 'promotion_loc'])
def _initialize():
ftlog.debug('hallfree._initialize begin')
global _inited
if not _inited:
_inited = True
_reloadConf()
pkeventbus.globalEventBus.subscribe(EventConfigure, _onConfChanged)
pkeventbus.globalEventBus.subscribe(ItemCountChangeEvent, _onItemCountChanged)
ftlog.debug('hallfree._initialize end')
# 获取用户对应的免费列表配置数据
def getFree(gameId, userId, clientId, timestamp):
ret = []
templateName = hallconf.getFreeTemplateName(clientId)
template = _templateMap.get(templateName)
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'templateName=', templateName)
if not template:
template = _templateMap.get('default')
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'freeItems=', [fi.freeItemId for fi in template.freeItems] if template else [])
if template:
for freeItem in template.freeItems:
ret.append(freeItem)
return ret
|
[
"[email protected]"
] | |
2892ca34dda7c6bac350599fac9f051e71e64ce2
|
f0c6b43e325064511c4e2d7ce9c59e88a12d81d5
|
/Assignment/DataTypes/problem10.py
|
0565ed8531943f1e8764d0ac461c28ed26bea342
|
[] |
no_license
|
kendraregmi/Assignment1
|
bda8402fa216bf54789c4d3b5092a5540d4ee68d
|
83a8365e508f5b83cee71fc14155b7838103b3ba
|
refs/heads/main
| 2023-03-26T17:42:54.255731 | 2021-03-08T07:29:04 | 2021-03-08T07:29:04 | 344,406,856 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
# 10. Write a Python program to remove the characters which have odd index
# values of a given string.
my_string= "Kathmandu"
result=""
for i in range(len(my_string)):
if i%2==0:
result= result+my_string[i]
print(result)
|
[
"[email protected]"
] | |
eb2c8258f0156a186c1b5525851bf8627d0ebad7
|
d7f43ee7b91c216b1740dead4cc348f3704d2f5a
|
/src/beginner_tutorials/scripts/add_two_ints_server.py~
|
ef69b404916f90b0f5cf43bc27b89200b6fda426
|
[] |
no_license
|
capslockqq/catkin_ws
|
26f734cf45cb5fe15301f5448a6005f2b21073b5
|
a0989427e42988f36ae9e4d83ba7eb871a56b64e
|
refs/heads/master
| 2021-08-24T07:04:07.551220 | 2017-12-08T14:42:19 | 2017-12-08T14:42:19 | 113,569,359 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 465 |
#!/usr/bin/env python
from beginner_tutorials.srv import
import rospy
def handle_add_two_ints(req):
print "Returning [%s + %s = %s]"%(req.a, req.b, (req.a + req.b))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node('add_two_ints_server')
s = rospy.Service('add_two_ints', AddTwoInts, handle_add_two_ints)
print "Ready to add two ints."
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
|
[
"ubuntu@ubuntu.(none)"
] |
ubuntu@ubuntu.(none)
|
|
58d0a8905b5a6546432140bf05e9ab8f06dfb857
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/77/usersdata/216/42255/submittedfiles/exercicio24.py
|
f70202f0601ef08a2d7723413c6c64658abd3963
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('Digite um número:'))
b=int(input('Digite um número:'))
i=0
for i in range(1,a,1):
if a%i==0 and b%i==0:
print(i)
|
[
"[email protected]"
] | |
8ca9fad8cd78573c8d3ca2e9a76b0d607134371b
|
ce214c2cbecb3591665b2748c1c777dd83625f96
|
/lesson_13/api/routers.py
|
ff242ade61e4fcebb7697a8a760da6bb173b9707
|
[] |
no_license
|
antonplkv/itea_advanced_june
|
e35af2f10d93d8ffb43664cd0cf7dfd46b969aef
|
c20e81167bfd87b7e16f340210b246a4cbc1751e
|
refs/heads/master
| 2022-12-04T20:27:21.908624 | 2020-08-19T18:19:49 | 2020-08-19T18:19:49 | 272,512,423 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
from flask import Flask
from flask_restful import Api
from .resources import AuthorResource
app = Flask(__name__)
api = Api(app)
api.add_resource(AuthorResource, '/authors', '/authors/<author_id>')
|
[
"[email protected]"
] | |
91ff95988bce1d58997328ad6d6def469c878d07
|
452c33c0622ec36e93e6ff6637533a15a067a8a4
|
/samples/client/petstore/python-experimental/petstore_api/models/outer_composite.py
|
f3887c8a3267c6a6532d498e3de2a32c135c4da3
|
[
"Apache-2.0"
] |
permissive
|
eric-erki/openapi-generator
|
40c4294433bada9f693aca0c32326609e2234f9c
|
0ea1ead59e41e4e8a959235dc8234d44447a9658
|
refs/heads/master
| 2023-01-07T03:33:36.315459 | 2019-09-20T18:13:33 | 2019-09-20T18:13:33 | 209,955,560 | 1 | 3 |
Apache-2.0
| 2023-01-04T10:58:25 | 2019-09-21T09:09:49 |
Java
|
UTF-8
|
Python
| false | false | 4,876 |
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OuterComposite(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'my_number': 'float',
'my_string': 'str',
'my_boolean': 'bool',
}
attribute_map = {
'my_number': 'my_number', # noqa: E501
'my_string': 'my_string', # noqa: E501
'my_boolean': 'my_boolean', # noqa: E501
}
def __init__(self, my_number=None, my_string=None, my_boolean=None): # noqa: E501
"""OuterComposite - a model defined in OpenAPI
Keyword Args:
my_number (float): [optional] # noqa: E501
my_string (str): [optional] # noqa: E501
my_boolean (bool): [optional] # noqa: E501
"""
self._my_number = None
self._my_string = None
self._my_boolean = None
self.discriminator = None
if my_number is not None:
self.my_number = my_number # noqa: E501
if my_string is not None:
self.my_string = my_string # noqa: E501
if my_boolean is not None:
self.my_boolean = my_boolean # noqa: E501
@property
def my_number(self):
"""Gets the my_number of this OuterComposite. # noqa: E501
:return: The my_number of this OuterComposite. # noqa: E501
:rtype: float
"""
return self._my_number
@my_number.setter
def my_number(
self,
my_number):
"""Sets the my_number of this OuterComposite.
:param my_number: The my_number of this OuterComposite. # noqa: E501
:type: float
"""
self._my_number = (
my_number)
@property
def my_string(self):
"""Gets the my_string of this OuterComposite. # noqa: E501
:return: The my_string of this OuterComposite. # noqa: E501
:rtype: str
"""
return self._my_string
@my_string.setter
def my_string(
self,
my_string):
"""Sets the my_string of this OuterComposite.
:param my_string: The my_string of this OuterComposite. # noqa: E501
:type: str
"""
self._my_string = (
my_string)
@property
def my_boolean(self):
"""Gets the my_boolean of this OuterComposite. # noqa: E501
:return: The my_boolean of this OuterComposite. # noqa: E501
:rtype: bool
"""
return self._my_boolean
@my_boolean.setter
def my_boolean(
self,
my_boolean):
"""Sets the my_boolean of this OuterComposite.
:param my_boolean: The my_boolean of this OuterComposite. # noqa: E501
:type: bool
"""
self._my_boolean = (
my_boolean)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OuterComposite):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
9aab50959e6376757d51b3fef3e88483eb1d7494
|
07c3124153a6909f19a21c3c664d8e3f8e0481d0
|
/fractals/sierpinski_triangle/sierpinski_triangle.py
|
aae6e3da8f1aaeec51acdaeab10b98c9d1557216
|
[] |
no_license
|
gridl/art-of-turtle-programming
|
94ed422a4e75f83e4c3abf7910ed9e5ed8a40aa9
|
db6b2c1059bffc9df468691c6ecf1c110b38aafd
|
refs/heads/master
| 2020-03-19T16:20:48.680667 | 2015-12-15T05:46:03 | 2015-12-15T05:46:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
from turtle import *
import math
tracer(1, 0)
setworldcoordinates(0, 0, 960, 810)
bgcolor(0.1, 0.1, 0.1)
BASE_SIZE = 13
BASE_HEIGHT = BASE_SIZE * math.sin(60 * (math.pi / 180))
START_X = 50
START_Y = 20
def draw_triangle(x, y, color):
penup()
pencolor(color)
goto(x, y) # go to bottom-left corner
pendown()
setheading(60)
forward(BASE_SIZE) # draw first side
right(120)
forward(BASE_SIZE) # draw second side
right(120)
forward(BASE_SIZE) # draw third side
def draw_sierpinski(x, y, level, color):
if level == 0:
draw_triangle(x, y, color)
draw_triangle(x + (BASE_SIZE * 0.5), y + BASE_HEIGHT, color)
draw_triangle(x + BASE_SIZE, y, color)
else:
draw_sierpinski(x, y, level - 1, color)
draw_sierpinski(x + (BASE_SIZE * 0.5 * (2 ** level)), y + (BASE_HEIGHT * (2 ** level)), level - 1, color)
draw_sierpinski(x + (BASE_SIZE * (2 ** level)), y, level - 1, color)
# loop from 5 to 0, drawing 5 sets of sierpinski triangles each with a different color
for i in range(5, -1, -1):
red = 1 - (0.2 * i)
green = 0.1 * i
blue = 0.1 * i
draw_sierpinski(START_X, START_Y, i, (red, green, blue))
hideturtle()
update()
exitonclick()
|
[
"[email protected]"
] | |
dd2581b2b922761111f73de6a66b37bef9ca71ad
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/servicebus/latest/list_disaster_recovery_config_keys.py
|
25a135b1c7de1f742920f2d68de3190e3c721078
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 6,888 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_disaster_recovery_config_keys(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Use this data source to access information about an existing resource.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus/latest:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
|
[
"[email protected]"
] | |
2d5ccf17197699d50e0b2fa57a4243eb7ca907aa
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/carbuisness/main_currency_supply.py
|
ed84e5c37083ff51e2afabd4f2216adcf44c254f
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from scrapy.cmdline import execute
import sys
import os
website = "currency_supply"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
|
[
"[email protected]"
] | |
e59eaebb53a1dd0de0208e35718b32e92973811d
|
b7126fb70f72fea0e7bba6fe2fef6925302ef07b
|
/tceh5_opp/self_work/task1.py
|
735da977c22bdb199e6944c42bfec6b0ac104bb8
|
[] |
no_license
|
duk1edev/tceh
|
79cd909c5a6221a2ca77d342b917462345140faa
|
21649d42488883beb58d709f4a9d1a05c75d2900
|
refs/heads/master
| 2021-07-12T10:20:22.330005 | 2020-04-29T09:24:08 | 2020-04-29T09:24:08 | 239,434,484 | 0 | 0 | null | 2021-03-20T03:38:26 | 2020-02-10T05:25:33 |
Python
|
UTF-8
|
Python
| false | false | 1,781 |
py
|
# 1. Создать класс корзина у кторого можно выставить разную вмесительность
# для разных обьектов. В обект можн опомещать разные
# 2. Создать класс - пакет в кторый тожно можн опомещать предмет у него тоже есть вместимость
# 3. Любой класс что бы можно было помещать в корзину и в пакет
# 4. Если вместимоть не достаточна сказать, что обьект поместить нельзя
class Trash:
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that trash, \n'
'trash size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the trash'.format(obj, obj.size))
class Packet(Trash):
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that packet, \n'
'packet size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the packet'.format(obj, obj.size))
class SomeStuff:
def __init__(self, set_size):
self.size = set_size
small_trash = Trash(5)
middle_trash = Trash(10)
big_trash = Trash(50)
small_packet = Packet(3)
middle_packet = Packet(5)
big_packet = Packet(10)
apple = SomeStuff(25)
print(apple.size)
garbage = SomeStuff(50)
small_trash.get_obj(apple)
big_trash.get_obj(garbage)
big_packet.get_obj(garbage)
|
[
"[email protected]"
] | |
35614a4b8e4a335c54fd174d3cf65ff29c823483
|
db9ff8accaa4d8d4a96d3f9122c0fdc5e83ea2a5
|
/test/test_price_quantity.py
|
12635c2d23b1dcacf3ca517e059fcaba37c32bd5
|
[] |
no_license
|
agtt/ebay-openapi-inventory
|
4754cdc8b6765acdb34f6b8f89b017ccbc6b1d2b
|
d990c26f16e811431892ac6401c73c4599c2d414
|
refs/heads/master
| 2023-06-17T10:53:43.204075 | 2021-07-14T18:32:38 | 2021-07-14T18:32:38 | 386,039,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,200 |
py
|
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
The version of the OpenAPI document: 1.13.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.offer_price_quantity import OfferPriceQuantity
from openapi_client.model.ship_to_location_availability import ShipToLocationAvailability
globals()['OfferPriceQuantity'] = OfferPriceQuantity
globals()['ShipToLocationAvailability'] = ShipToLocationAvailability
from openapi_client.model.price_quantity import PriceQuantity
class TestPriceQuantity(unittest.TestCase):
"""PriceQuantity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPriceQuantity(self):
"""Test PriceQuantity"""
# FIXME: construct object with mandatory attributes with example values
# model = PriceQuantity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
49ad24efef53d23c86760ee96c78f87e3dbe2cf5
|
7200d065030f2daf00a5249e9e4fe569438c78c7
|
/scrapers/dizilab_scraper.py
|
76713de8e84af6b17220f3eaed0295e7b7a714f8
|
[] |
no_license
|
matt2005/salts
|
c765b037be1a2bb0e486ae9b30eceaf2b7c3bf14
|
5f71bc71e7b0b480f40d948d5568604dd181b6ad
|
refs/heads/master
| 2020-12-31T04:16:45.574380 | 2015-12-07T22:57:31 | 2015-12-07T22:57:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,957 |
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import urllib
from salts_lib import kodi
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
BASE_URL = 'http://dizilab.com'
class Dizilab_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Dizilab'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
stream_url = match.group(1)
if 'dizlab' in stream_url.lower():
continue
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': self._gv_get_quality(stream_url), 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Dizilab_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="episode"\s+href="([^"]+/sezon-%s/bolum-%s)"' % (video.season, video.episode)
title_pattern = 'class="episode-name"\s+href="(?P<url>[^"]+)">(?P<title>[^<]+)'
return super(Dizilab_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/arsiv?limit=&tur=&orderby=&ulke=&order=&yil=&dizi_adi=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=8)
results = []
for item in dom_parser.parse_dom(html, 'div', {'class': 'tv-series-single'}):
try:
url = re.search('href="([^"]+)', item).group(1)
except:
url = ''
try:
match_year = re.search('<span>\s*(\d{4})\s*</span>', item).group(1)
except:
match_year = ''
try:
match_title = dom_parser.parse_dom(item, 'a', {'class': 'title'})
match_title = re.search('([^>]+)$', match_title[0]).group(1)
match_title = match_title.strip()
except:
match_title = ''
if url and match_title and (not year or not match_year or year == match_year):
result = {'url': self._pathify_url(url), 'title': match_title, 'year': ''}
results.append(result)
return results
|
[
"[email protected]"
] | |
5804b448d279b66e3077be6b2016ef4e6230d463
|
46279163a543cd8820bdc38133404d79e787c5d2
|
/benchmarks/tensorexpr/reduction.py
|
bc3e4e158a1750a0c9732c91297461f01ff5126b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
erwincoumans/pytorch
|
31738b65e7b998bfdc28d0e8afa7dadeeda81a08
|
ae9f39eb580c4d92157236d64548b055f71cf14b
|
refs/heads/master
| 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 |
NOASSERTION
| 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null |
UTF-8
|
Python
| false | false | 5,706 |
py
|
from . import benchmark
class ReduceBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
self.inputs = [self.randn(
[M, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if case == "row":
self.dims = [1, 2]
elif case == "mid":
self.dims = [0, 2]
elif case == "col":
self.dims = [0, 1]
else:
raise ValueError("invalid case: %s" % case)
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, self.dims)
return y
def config(self):
return [self.M, self.N, self.K]
@staticmethod
def default_configs():
return [
# [512, 512, 512],
[512, 64, 512],
]
@staticmethod
def module():
return "reduce"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + 1
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
class ReduceRowBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceRowBench, self).__init__(mode, device, dtype, "row", M, N, K)
@staticmethod
def module():
return "reduce_row"
class ReduceMidBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceMidBench, self).__init__(mode, device, dtype, "mid", M, N, K)
@staticmethod
def module():
return "reduce_mid"
class ReduceColBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceColBench, self).__init__(mode, device, dtype, "col", M, N, K)
@staticmethod
def module():
return "reduce_col"
class Reduce2DBench(benchmark.Benchmark):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
super().__init__(mode, device, dtype)
self.red_dim = red_dim
self.dim0 = dim0
self.dim1 = dim1
self.inputs = [self.randn(
[dim0, dim1], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if red_dim != 0 and red_dim != 1 :
raise ValueError("invalid reduction dimension: {}".format(red_dim))
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, [self.red_dim])
return y
def config(self):
return [self.red_dim, self.dim0, self.dim1]
@staticmethod
def default_configs():
return [
[1, 640, 524288],
]
@staticmethod
def module():
return "reduce2d"
@staticmethod
def input_iterable() :
return True
def memory_workload(self):
assert self.mode == "fwd", "Only the forward operation is modeled!"
buffer_size = self.dim0 * self.dim1
if self.red_dim == 0 :
buffer_size += self.dim1
else :
buffer_size += self.dim0
return {
"sol": buffer_size,
"algorithmic": buffer_size,
}
class Reduce2DInnerBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DInnerBench, self).__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_inner"
class Reduce2DOuterBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DOuterBench, self).__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_outer"
benchmark.register_benchmark_class(ReduceRowBench)
benchmark.register_benchmark_class(ReduceMidBench)
benchmark.register_benchmark_class(ReduceColBench)
benchmark.register_benchmark_class(Reduce2DInnerBench)
benchmark.register_benchmark_class(Reduce2DOuterBench)
class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
benchmark.DynamicShape.__init__(self)
Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)
def instantiate_input(self):
dim0, dim1 = self.rand_shape([self.dim0, self.dim1])
self.inputs = [self.randn(
[dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)]
@staticmethod
def module():
return "dynamicreduce2d"
class DynamicReduce2DInnerBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_inner"
class DynamicReduce2DOuterBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
|
[
"[email protected]"
] | |
084d8ca89f293bf5398b5ab07d7076af43a5fb8d
|
590a0c3a7254b8dac85ab18072dbf766aca7af93
|
/Python-Exercise-100/python-exercise-example07.py
|
01777ba168c7f8e9c5ee7615fd7642d9f407aaf6
|
[
"MIT"
] |
permissive
|
MiracleWong/PythonPractice
|
90c66d29a9cdf0200d3dbac946d05f12dd856e91
|
40aecd84045ad18f6aff95d5b8be8e352ca0a726
|
refs/heads/master
| 2021-08-15T17:19:51.543013 | 2021-06-15T03:59:51 | 2021-06-15T03:59:51 | 98,256,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example7.html
a = [1, 2, 4, 5, 5, 6, 7, 7]
b = a[:]
print(b)
|
[
"[email protected]"
] | |
69bef76ac68fc60f87f5f5e549027b0bcfae66f7
|
91a2ecfaf5dc6c917ec2fda31f56291103f68ceb
|
/tests/protos/test_ctc_loss.py
|
6da44120062bdda6381ed74e2c0f8225fffc8ae4
|
[
"BSD-3-Clause"
] |
permissive
|
MyrtleSoftware/myrtlespeech
|
635d1d16d1bd60fb07a4d30edbf9acb61786c13f
|
8522048fd37744ffa06827a0cbd202b839a15453
|
refs/heads/master
| 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 |
NOASSERTION
| 2020-03-20T14:33:17 | 2019-06-18T08:44:33 |
Python
|
UTF-8
|
Python
| false | false | 1,042 |
py
|
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import hypothesis.strategies as st
from myrtlespeech.protos import ctc_loss_pb2
from tests.protos.utils import all_fields_set
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_losses(
draw, return_kwargs: bool = False, alphabet_len: Optional[int] = None
) -> Union[
st.SearchStrategy[ctc_loss_pb2.CTCLoss],
st.SearchStrategy[Tuple[ctc_loss_pb2.CTCLoss, Dict]],
]:
"""Returns a SearchStrategy for CTCLoss plus maybe the kwargs."""
kwargs = {}
end = 1000
if alphabet_len is not None:
end = max(0, alphabet_len - 1)
kwargs["blank_index"] = draw(st.integers(0, end))
kwargs["reduction"] = draw(
st.sampled_from(ctc_loss_pb2.CTCLoss.REDUCTION.values())
)
all_fields_set(ctc_loss_pb2.CTCLoss, kwargs)
ctc_loss = ctc_loss_pb2.CTCLoss(**kwargs)
if not return_kwargs:
return ctc_loss
return ctc_loss, kwargs
|
[
"[email protected]"
] | |
c03744b393ec5f98ff295969921ddf3de80aecaf
|
9c52998e7d92640b82284e7e85bf69205fc94d73
|
/SeleniumLearningFiles/SeleniumLearning01/webdrivertest/web04.py
|
ec6aa9036031cb6a57f01829bff64e05c5c91ab3
|
[] |
no_license
|
github653224/GitProjects_SeleniumLearing
|
b0c57d27fa48b0cd7475f8d8e8b19c57160e65fc
|
818b573a3b0f18def98610e59e3c0c6500a675bc
|
refs/heads/master
| 2021-07-20T05:54:46.392948 | 2017-10-27T12:53:41 | 2017-10-27T12:53:41 | 107,764,014 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from random import randint
verify =randint(1000,9999)
print(u"生成的随机数字: %d " %verify)
number=input("请输入随机数字:")
print(number)
number=int(number)
if number ==verify:
print ("登录成功!!")
elif number==132741:
print("登陆成功!!")
else:
print("输入错误")
|
[
"[email protected]"
] | |
31bda42177c67668b02106a2e58888a61630ed09
|
99e1a15d8f605be456f17608843c309dd8a3260f
|
/src/Battle/Attack/Steps/Test/suite.py
|
a11d3df523d7d71da56074941becf66d934c86c9
|
[] |
no_license
|
sgtnourry/Pokemon-Project
|
e53604096dcba939efca358e4177374bffcf0b38
|
3931eee5fd04e18bb1738a0b27a4c6979dc4db01
|
refs/heads/master
| 2021-01-17T23:02:25.910738 | 2014-04-12T17:46:27 | 2014-04-12T17:46:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
import unittest
from Battle.Attack.Steps.Test.remove_pp_step_test import suite as remove_pp_step_suite
from Battle.Attack.Steps.Test.handle_miss_effects_step_test import suite as handle_miss_effects_step_suite
from Battle.Attack.Steps.Test.handle_contact_step_test import suite as handle_contact_step_suite
from Battle.Attack.Steps.Test.effects_step_test import suite as effects_step_suite
from Battle.Attack.Steps.Test.damage_step_test import suite as damage_step_suite
from Battle.Attack.Steps.Test.announcement_step_test import suite as announcement_step_suite
from Battle.Attack.Steps.Test.hit_step_test import suite as hit_step_suite
from Battle.Attack.Steps.Test.precondition_step_test import suite as precondition_step_suite
suites = [precondition_step_suite,
hit_step_suite,
announcement_step_suite,
damage_step_suite,
effects_step_suite,
handle_contact_step_suite,
handle_miss_effects_step_suite,
remove_pp_step_suite]
suite = unittest.TestSuite(suites)
|
[
"[email protected]"
] | |
6843646e4bfc8dd6d189f4981122d415672c1403
|
8937c4d452c98699610923f76a395a2247f576df
|
/preprocess/crop.py
|
5b05cb13ad998812b4d8e78a1b99878b47e16046
|
[] |
no_license
|
mistycheney/MouseBrainAtlas
|
812b204af06ed303f3c12d5c81edef50c8d9d1ed
|
bffbaa1ede9297084e64fc197716e63d5cb54275
|
refs/heads/master
| 2020-04-11T13:44:09.632311 | 2018-11-20T22:32:15 | 2018-11-20T22:32:15 | 20,377,173 | 3 | 9 | null | 2017-03-15T19:39:27 | 2014-06-01T12:42:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,884 |
py
|
#! /usr/bin/env python
import os
import argparse
import sys
import time
import numpy as np
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
from learning_utilities import *
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='')
parser.add_argument("stack", type=str, help="Brain name")
parser.add_argument("versions", type=str, help="json encoded str list")
parser.add_argument("resolutions", type=str, help="json encoded str list")
parser.add_argument("prep_in", type=str, help="")
parser.add_argument("prep_out", type=str, help="")
parser.add_argument("input_crop_json", type=str, help="")
parser.add_argument("output_crop_json", type=str, help="")
parser.add_argument("n_jobs", type=int, help="", default=1)
args = parser.parse_args()
versions = json.loads(args.versions)
if isinstance(versions, str):
versions = [versions]
else:
assert isinstance(versions, list), "Argument versions must be str or str list."
resolutions = json.loads(args.resolutions)
if isinstance(resolutions, str):
resolutions = [resolutions]
else:
assert isinstance(resolutions, list), "Argument resolutions must be str or str list."
n_jobs = args.n_jobs
def crop(stack, img_name, version, resol, x,y,w,h):
input_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol=resol, version=version, fn=img_name)
output_fp = DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol=resol)
img = imread(input_fp)
save_data(img[y:y+h, x:x+w], output_fp)
for version in versions:
for resol in resolutions:
if resol == 'raw':
x = x_tb * 32
y = y_tb * 32
w = w_tb * 32
h = h_tb * 32
elif resol == 'thumbnail':
x = x_tb
y = y_tb
w = w_tb
h = h_tb
else:
raise
# input_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=5, version=version, resol='raw')
out_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=2, resol=resol, version=version)
print 'out_dir:', out_dir
# script = os.path.join(REPO_DIR, 'preprocess', 'warp_crop_IM_v3.py')
# ! rm -rf {out_dir}
create_if_not_exists(out_dir)
t = time.time()
pool = Pool(8)
_ = pool.map(lambda img_name: crop(stack=stack, img_name=img_name, version=version, resol=resol,
x=x, y=y, w=w, h=h),
metadata_cache['valid_filenames'][stack])
pool.close()
pool.join()
# for img_name in metadata_cache['valid_filenames'][stack]:
# f(stack=stack, img_name=img_name, version=version, resol=resol,
# x=x, y=y, w=w, h=h)
# run_distributed('convert \"%%(input_fp)s\" -crop %(w)dx%(h)d+%(x)d+%(y)d \"%%(output_fp)s\"' % \
# {'w':w_raw, 'h':h_raw, 'x':x_raw, 'y':y_raw},
# kwargs_list=[{'input_fp': DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol='raw', version=version, fn=img_name),
# 'output_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol='raw')}
# for img_name in metadata_cache['valid_filenames'][stack]],
# # for img_name in ['CHATM3_slide35_2018_02_17-S1']],
# argument_type='single',
# jobs_per_node=1,
# local_only=True)
# wait_qsub_complete()
print 'done in', time.time() - t, 'seconds' # 1500s
|
[
"[email protected]"
] | |
821a36d24596e0ac1a7bce97e1a3d9b9992c271f
|
03043b715d2e177dd3ba93078463ce79c33173dc
|
/NI_DAQmx/models/NI_PXIe_6535.py
|
ffdfbaabce93ed1ea32f606174fc1da92d542ec7
|
[] |
no_license
|
labscript-suite-bitbucket-archive/cavitylab-labscript_devices--forked-from--labscript_suite-labscript_devices
|
2efc068eb35ca70e1eecab9c7fec7991fd596c9c
|
e665d3ee0ce1cfd7fb7cd5c6cc4d783528bc4935
|
refs/heads/master
| 2020-12-27T02:35:41.710162 | 2019-12-06T20:57:48 | 2019-12-06T20:57:48 | 253,143,395 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,629 |
py
|
#####################################################################
# #
# /NI_DAQmx/models/_subclass_template.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
#####################################################################
# WARNING #
# #
# This file is auto-generated, any modifications may be #
# overwritten. See README.txt in this folder for details #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
from labscript_devices.NI_DAQmx.labscript_devices import NI_DAQmx
CAPABILITIES = {
'AI_range': None,
'AI_start_delay': None,
'AO_range': None,
'max_AI_multi_chan_rate': None,
'max_AI_single_chan_rate': None,
'max_AO_sample_rate': None,
'max_DO_sample_rate': 10000000.0,
'min_semiperiod_measurement': None,
'num_AI': 0,
'num_AO': 0,
'num_CI': 0,
'ports': {
'port0': {'num_lines': 8, 'supports_buffered': True},
'port1': {'num_lines': 8, 'supports_buffered': True},
'port2': {'num_lines': 8, 'supports_buffered': True},
'port3': {'num_lines': 8, 'supports_buffered': True},
'port4': {'num_lines': 6, 'supports_buffered': False},
},
'supports_buffered_AO': False,
'supports_buffered_DO': True,
'supports_semiperiod_measurement': False,
}
class NI_PXIe_6535(NI_DAQmx):
description = 'NI-PXIe-6535'
def __init__(self, *args, **kwargs):
# Any provided kwargs take precedent over capabilities
combined_kwargs = CAPABILITIES.copy()
combined_kwargs.update(kwargs)
NI_DAQmx.__init__(self, *args, **combined_kwargs)
|
[
"[email protected]"
] | |
37e0fb4dbe4d99d999a4a4ff25c33d7f504d8fc8
|
ab574f7511fa15e5ea50a26f26e3e38f7e33505a
|
/win_2018/scipy/special/_ufuncs_cxx.py
|
65fc513447b7d344b151f7ba228174ebe12f7257
|
[] |
no_license
|
zclongpop123/maya_python_packages
|
49d6b340512a2580bc8c14ae6281ca3f57017acd
|
4dd4a48c41749443ac16053d20aec04e9d2db202
|
refs/heads/master
| 2021-11-30T01:49:41.846727 | 2021-11-17T01:47:08 | 2021-11-17T01:47:08 | 49,186,909 | 16 | 9 | null | 2017-03-07T00:13:41 | 2016-01-07T06:48:35 |
Python
|
UTF-8
|
Python
| false | false | 288 |
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_ufuncs_cxx.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"[email protected]"
] | |
139a60ffd6e82195e835f691c53c0f317ab5a8d9
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/heatmap/_yperiod.py
|
6496c7ed1592b867d1b2a5946e177c084910c381
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 |
MIT
| 2023-09-08T19:55:32 | 2013-11-21T05:53:08 |
Python
|
UTF-8
|
Python
| false | false | 470 |
py
|
import _plotly_utils.basevalidators
class YperiodValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod", parent_name="heatmap", **kwargs):
super(YperiodValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs,
)
|
[
"[email protected]"
] | |
d978aee1a03ddbd4eec8a61a6d7792586dbbeb14
|
a25aa09af984d08084a395f9b6df427d3756f11a
|
/35.Search Insert Position.py
|
39611cdd7879d9f73747e131d4d9446fec4691dc
|
[] |
no_license
|
luyihsien/leetcodepy
|
31971e851a4ae77942a5d9e3ff07faea6e504c66
|
a54bd09f4b28f106196a6cd8a0f9c056bcd237e6
|
refs/heads/master
| 2020-05-19T13:21:57.854086 | 2019-10-16T14:23:00 | 2019-10-16T14:23:00 | 185,037,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
''''
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
'''
class Solution:
def searchInsert(self, nums, target):
if len(nums)==0:
return 0
for i in range(len(nums)):
if nums[i]==target:
return i
for i in range(1,len(nums)):
if nums[i]>target and nums[i-1]<target:
return i
if max(nums)<target:
return len(nums)
if min(nums)>target:
return 0
'''
成功
显示详情
执行用时 : 52 ms, 在Search Insert Position的Python3提交中击败了90.74% 的用户
内存消耗 : 13.5 MB, 在Search Insert Position的Python3提交中击败了96.03% 的用户
'''
|
[
"[email protected]"
] | |
713a24a7ccdd51e993b29e4b2f542ce44c4723f6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03448/s790400785.py
|
17c0ac19efb39097ef60a9bdde7f5b5bfd5d9764
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 337 |
py
|
def resolve():
A = int(input())
B = int(input())
C = int(input())
X = int(input())
ans = []
for a in range(A + 1):
for b in range(B + 1):
c = (X - 500 * a - 100 * b) / 50
if c <= C and c >= 0:
ans.append((a, b, c))
print((len(set(ans))))
return
resolve()
|
[
"[email protected]"
] | |
80fc4b38b7dff6b4f630a8e31f713c5c9b512f3c
|
53163d4129930426c2d7aa650cb1b638d1347d21
|
/lxmert/lxmert/src/tasks/nlvr2_model.py
|
ef93474403461f18461d1da85fb8877b6f6b5364
|
[
"MIT"
] |
permissive
|
fdsig/Transformer-MM-Explainability
|
5e4d9d0c927afd0316311259fc318b325d74628e
|
accc4dd3491d321948e826079ce85f61bb02e0a6
|
refs/heads/main
| 2023-09-03T01:21:27.188260 | 2021-11-17T23:56:49 | 2021-11-17T23:56:49 | 433,759,755 | 1 | 0 |
MIT
| 2021-12-01T09:20:31 | 2021-12-01T09:20:31 | null |
UTF-8
|
Python
| false | false | 1,773 |
py
|
# coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from lxrt.modeling import GeLU, BertLayerNorm
from lxrt.entry import LXRTEncoder
from param import args
class NLVR2Model(nn.Module):
def __init__(self):
super().__init__()
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=20
)
self.hid_dim = hid_dim = self.lxrt_encoder.dim
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim * 2, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, 2)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
def forward(self, feat, pos, sent):
"""
:param feat: b, 2, o, f
:param pos: b, 2, o, 4
:param sent: b, (string)
:param leng: b, (numpy, int)
:return:
"""
# Pairing images and sentences:
# The input of NLVR2 is two images and one sentence. In batch level, they are saved as
# [ [img0_0, img0_1], [img1_0, img1_1], ...] and [sent0, sent1, ...]
# Here, we flat them to
# feat/pos = [ img0_0, img0_1, img1_0, img1_1, ...]
# sent = [ sent0, sent0, sent1, sent1, ...]
sent = sum(zip(sent, sent), ())
batch_size, img_num, obj_num, feat_size = feat.size()
assert img_num == 2 and obj_num == 36 and feat_size == 2048
feat = feat.view(batch_size * 2, obj_num, feat_size)
pos = pos.view(batch_size * 2, obj_num, 4)
# Extract feature --> Concat
x = self.lxrt_encoder(sent, (feat, pos))
x = x.view(-1, self.hid_dim*2)
# Compute logit of answers
logit = self.logit_fc(x)
return logit
|
[
"[email protected]"
] | |
b1b504761ef386bea3c5ec22159ec1973a0ac635
|
d4c47276c8fbd15240aa228eda04ee8e338caf02
|
/Python/Python Lesson/Second/Lesson9/Sample8.py
|
447d9972d35e1c1f96525406233e419f925a3a61
|
[] |
no_license
|
developer579/Practice
|
a745384450172fb327913c130303ab76492096f1
|
54084468af83afcc44530e757800c8c3678147c1
|
refs/heads/main
| 2023-05-06T01:36:06.222554 | 2021-06-02T07:04:03 | 2021-06-02T07:04:03 | 324,312,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
import re
ptr = ["TXT","TXT..",".TXT","..TXT"]
str = ["TXT","TXTT","TXTTT","TTXT","TTTXT"]
for valueptr in ptr:
print("------")
pattern = re.compile(valueptr)
for valuestr in str:
res = pattern.search(valuestr)
if res is not None:
m = "o"
else:
m = "x"
mrs = "(パターン)" + valueptr + "(文字列)" + valuestr + "(マッチ)" + m
print(mrs)
|
[
"[email protected]"
] | |
bfc47b482deb0ccf1f3e645d49665369758987ff
|
3a3e823f6b94b7eae8a363b0b51b036d2b0a1669
|
/metvae/dataset/biom.py
|
aa3196a0a38243f360389493a4983f3f36972811
|
[] |
no_license
|
mortonjt/metvae
|
8a28bbbd72ee79d66992bd31bd82af65b83ea819
|
f2f241fdedd2f4c045a088727df1f155b9ce9a20
|
refs/heads/main
| 2022-12-31T16:24:26.014394 | 2020-10-20T23:38:50 | 2020-10-20T23:38:50 | 305,812,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,780 |
py
|
import os
import re
import biom
import math
import logging
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import List
logger = logging.getLogger(__name__)
class BiomDataset(Dataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_category : str
Column name forr batch indices
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame = None,
batch_category: str = None,
):
super(BiomDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
if self.metadata is not None:
# match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
self.batch_indices = None
if self.batch_category is not None and self.metadata is not None:
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
logger.info("Finished preprocessing dataset")
def __len__(self) -> int:
return len(self.table.ids())
def __getitem__(self, i):
""" Returns all of the samples for a given subject
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples. If not specified, return None.
"""
sample_idx = self.table.ids()[i]
if self.batch_indices is not None:
batch_indices = self.batch_indices[i]
else:
batch_indices = None
counts = self.table.data(id=sample_idx, axis='sample')
return counts, batch_indices
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
start = 0
end = self.__len__()
if worker_info is None: # single-process data loading
for i in range(end):
yield self.__getitem__(i)
else:
worker_id = worker_info.id
w = float(worker_info.num_workers)
t = (end - start)
w = float(worker_info.num_workers)
per_worker = int(math.ceil(t / w))
worker_id = worker_info.id
iter_start = start + worker_id * per_worker
iter_end = min(iter_start + per_worker, end)
for i in range(iter_start, iter_end):
yield self.__getitem__(i)
class BiomBatchDataset(BiomDataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_differentials : str
Pre-trained batch differentials effects
batch_category : str
Column name in metadata for batch indices
Notes
-----
Important, periods cannot be handled in the labels
in the batch_category. Make sure that these are converted to
hyphens or underscores.
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame,
batch_differentials : pd.DataFrame,
batch_category: str,
format_columns=True,
):
super(BiomBatchDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.batch_differentials = batch_differentials
self.format_columns = format_columns
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
# Match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
# Clean up the batch indexes
if self.format_columns:
if (self.metadata[self.batch_category].dtypes == np.float64 or
self.metadata[self.batch_category].dtypes == np.int64):
# format the batch category column
m = self.metadata[self.batch_category].astype(np.int64)
self.metadata[self.batch_category] = m.astype(np.str)
cols = self.batch_differentials.columns
def regex_f(x):
return re.findall(r"\[([A-Za-z0-9_]+).*\]", x)[0]
cols = list(map(regex_f, cols))
print('columns', cols)
self.batch_differentials.columns = cols
# Retrieve batch labels
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
# Clean up batch differentials
table_features = set(self.table.ids(axis='observation'))
batch_features = set(self.batch_differentials.index)
ids = table_features & batch_features
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='observation')
table_obs = self.table.ids(axis='observation')
self.batch_differentials = self.batch_differentials.loc[table_obs]
logger.info("Finished preprocessing dataset")
def __getitem__(self, i):
""" Returns all of the samples for a given subject.
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples.
"""
sample_idx = self.table.ids()[i]
batch_index = self.batch_indices[i]
counts = self.table.data(id=sample_idx, axis='sample')
batch_diffs = self.batch_differentials
assert batch_index < batch_diffs.shape[1], f'Batch diffs " {batch_diffs.shape[1]} > index : {batch_index}'
batch_diffs = np.array(batch_diffs.iloc[:, batch_index].values)
return counts, batch_diffs
def collate_single_f(batch):
counts_list = np.vstack([b[0] for b in batch])
counts = torch.from_numpy(counts_list).float()
return counts
def collate_batch_f(batch):
counts_list = np.vstack([b[0] for b in batch])
batch_diffs = np.vstack([b[1] for b in batch])
counts = torch.from_numpy(counts_list).float()
batch_diffs = torch.from_numpy(batch_diffs).float()
return counts, batch_diffs
|
[
"[email protected]"
] | |
6e0ae3e9c859c2ff133011147002083abb1e1ecf
|
6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f
|
/simp_py_examples/course/S1800/t105.py
|
14b64f55e86d1ce9d76af5b273b6ada48bd93378
|
[
"MIT"
] |
permissive
|
kcfkwok2003/Simp_py
|
11d6813fac83ab6309eb8efc22fcd8edde5b19b8
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
refs/heads/master
| 2021-05-11T00:36:36.872754 | 2018-12-19T01:41:15 | 2018-12-19T01:41:15 | 118,306,332 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
from simp_py import tft
lcd = tft.tft
lcd.clear()
import time
cnt=10
while cnt >=0:
lcd.text(10,10, 'count: %s ' % cnt)
cnt -=1
time.sleep(1)
|
[
"[email protected]"
] | |
a5da3fc38c2b91b2122f0fd2cb7e5d2e1f764ad9
|
9dc3ae479c1b5c6941681917151fcb0379f9173d
|
/CanvasFeatureFlag.py
|
7a8e37d3b28a61f52fb91ba58b6f1eb53cf1381a
|
[] |
no_license
|
cthacker-udel/Python-Canvas-API-Wrapper
|
bf2400b42b644791f45bbda7ed42e2c03a8d97b2
|
0263c591a2b02197529559346558b9be02f592c3
|
refs/heads/master
| 2023-08-25T12:01:48.417204 | 2021-10-09T10:49:51 | 2021-10-09T10:49:51 | 388,362,237 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
from CanvasClient import CanvasClient
class CanvasFeatureFlags(CanvasClient):
def __init__(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
def generate_queries(self):
body = {}
if self.state is not None:
body['state'] = self.state
return body
def clear_queries(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
|
[
"[email protected]"
] | |
3d613b080afe7af474f8504d12bf40d8034710ab
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binaryTree2_20200615152326.py
|
64f23d35b04053fcbead026e6e8a6c7c2d94f816
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
# Create a node and assign a value to the node
class Node:
def __init__(self,data):
# designate one node as root
self.data = data
# then the two others as child nodes
self.left = None
self.right = None
# A
def printTree(self):
print(self.data)
root = Node(10)
root.left = Node(2)
root.right = Node(3)
root.printTree()
|
[
"[email protected]"
] | |
8a8680338eb791a54e04854473d5d7158ca44726
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pytype/pytype/tools/merge_pyi/test_data/var_annot.comment.py
|
8d3907c0a79e522e7a66e1587e8a8ca132b76a38
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fbf532cb3bc3376967d6a665559e5b50273ee6371ee9080fcc2f2d7e3592c2eb
size 156
|
[
"[email protected]"
] | |
0efae463197cf4b67c08549dc4459158bc1c5d11
|
a3c7c11c607800155457ea1f886e2d84eadd9610
|
/examples/3_NeuralNetworks/convolutional_network.py
|
17aa1d84f64834e38d5523b130d66d3e697d1ee0
|
[
"MIT"
] |
permissive
|
353622088/CapsNet
|
eddba478143bd092ce27bd49dbb65c63d80824e4
|
04408978dfccd9a6545fc250648fd2f600974a95
|
refs/heads/master
| 2021-08-28T02:22:56.958370 | 2017-12-11T03:03:52 | 2017-12-11T03:03:52 | 112,295,252 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,934 |
py
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that still share the same weights.
logits_train = conv_net(features, num_classes, dropout, reuse=False,
is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True,
is_training=False)
# Predictions
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
print(logits_train.shape)
print(labels.shape)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
# tf.summary.scalar(name='loss', tensor=loss_op)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,
global_step=tf.train.get_global_step())
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# merge_all_op = tf.summary.merge_all()
# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Build the Estimator
model = tf.estimator.Estimator(model_fn, model_dir='logdir')
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Testing Accuracy:", e['accuracy'])
|
[
"[email protected]"
] | |
88f37dcfa3636c5a91c3546ae84c383167f931e2
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python-flask/generated/openapi_server/models/com_adobe_cq_social_commons_emailreply_impl_custom_email_client_provider_properties.py
|
4d9bc47c42da303a7c969c543512bee62080c310
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 |
Apache-2.0
| 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null |
UTF-8
|
Python
| false | false | 4,134 |
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.config_node_property_array import ConfigNodePropertyArray # noqa: F401,E501
from openapi_server.models.config_node_property_integer import ConfigNodePropertyInteger # noqa: F401,E501
from openapi_server import util
class ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, priority_order: ConfigNodePropertyInteger=None, reply_email_patterns: ConfigNodePropertyArray=None): # noqa: E501
"""ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties - a model defined in OpenAPI
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type priority_order: ConfigNodePropertyInteger
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type reply_email_patterns: ConfigNodePropertyArray
"""
self.openapi_types = {
'priority_order': ConfigNodePropertyInteger,
'reply_email_patterns': ConfigNodePropertyArray
}
self.attribute_map = {
'priority_order': 'priorityOrder',
'reply_email_patterns': 'replyEmailPatterns'
}
self._priority_order = priority_order
self._reply_email_patterns = reply_email_patterns
@classmethod
def from_dict(cls, dikt) -> 'ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:rtype: ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties
"""
return util.deserialize_model(dikt, cls)
@property
def priority_order(self) -> ConfigNodePropertyInteger:
"""Gets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyInteger
"""
return self._priority_order
@priority_order.setter
def priority_order(self, priority_order: ConfigNodePropertyInteger):
"""Sets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type priority_order: ConfigNodePropertyInteger
"""
self._priority_order = priority_order
@property
def reply_email_patterns(self) -> ConfigNodePropertyArray:
"""Gets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyArray
"""
return self._reply_email_patterns
@reply_email_patterns.setter
def reply_email_patterns(self, reply_email_patterns: ConfigNodePropertyArray):
"""Sets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type reply_email_patterns: ConfigNodePropertyArray
"""
self._reply_email_patterns = reply_email_patterns
|
[
"[email protected]"
] | |
8e4033741ac16a69170a9bfaf0ba7158c207ddc2
|
d0cf8b68b68e33900544dc056566511428692b71
|
/tests/spoof/gs_feature_elision.py
|
c2aabeb4d4d1e9b78fab46632764e38d376bfe25
|
[
"MIT"
] |
permissive
|
ryanfb/OCRmyPDF
|
3f1547c164d3b74b5e6c003bb875e50c292b36a4
|
f6a4d8f1f808a1c963c85e498a773ef0439db5ed
|
refs/heads/master
| 2021-01-21T04:25:00.603736 | 2017-08-27T20:53:36 | 2017-08-27T20:53:36 | 101,911,301 | 1 | 0 | null | 2017-08-30T17:44:15 | 2017-08-30T17:44:15 | null |
UTF-8
|
Python
| false | false | 800 |
py
|
#!/usr/bin/env python3
# © 2016 James R. Barlow: github.com/jbarlow83
import sys
import os
from subprocess import check_call
"""Replicate one type of Ghostscript feature elision warning during
PDF/A creation."""
def real_ghostscript(argv):
gs_args = ['gs'] + argv[1:]
os.execvp("gs", gs_args)
return # Not reachable
elision_warning = """GPL Ghostscript 9.20: Setting Overprint Mode to 1
not permitted in PDF/A-2, overprint mode not set"""
def main():
if '--version' in sys.argv:
print('9.20')
print('SPOOFED: ' + os.path.basename(__filename__))
sys.exit(0)
gs_args = ['gs'] + sys.argv[1:]
check_call(gs_args)
if '-sDEVICE=pdfwrite' in sys.argv[1:]:
print(elision_warning)
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c93c5ccd6c588a6c7f2b024b62acc6badd12163b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/HDGiiCmSgJeeu3388_19.py
|
09b87a15f58f460743f3b6ef6eaacc88c698ba44
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
"""
A fuse melts when a current in an electrical device exceeds the fuse's rating,
breaking the circuit and preventing the heat from building up too much (which
can cause a fire). The ideal fuse to choose is **higher** than the device's
current output, yet **as close as possible** to it as well.
Given a list of _fuse ratings_ , and the _device's current output_ , return
which of the fuses is the best for the device.
### Examples
choose_fuse(["3V", "5V", "12V"], "4.5V") ➞ "5V"
choose_fuse(["5V", "14V", "2V"], "5.5V") ➞ "14V"
choose_fuse(["17V", "15V", "12V"], "9V") ➞ "12V"
### Notes
* You will be given three possible ratings in voltage.
* Fuses may not be in a sorted order.
* Assume that there is a valid fuse in every test case
"""
def choose_fuse(f, c):
f = [int(e[:-1]) for e in f if float(e[:-1]) >= float(c[:-1])]
return str(min(f))+'V'
|
[
"[email protected]"
] | |
448a496d6cf183fe73cf62e90b39b8f5e925a6f8
|
cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be
|
/python3/qr/zbarlight_test.py
|
5944e63c9ba7fb774948ce49dce2fe4de1a416f1
|
[] |
no_license
|
ericosur/ericosur-snippet
|
dda2200546b13fb9b84632d115a0f4ca5e3d5c47
|
0309eeb614612f9a35843e2f45f4080ae03eaa81
|
refs/heads/main
| 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 |
Perl
|
UTF-8
|
Python
| false | false | 864 |
py
|
#!/usr/bin/env python3
# coding: utf-8
'''
apt-get install libzbar-dev
pip install zbarlight
I do not recomment use this module to decode qrcode.
'''
import sys
from PIL import Image
import common
try:
import zbarlight
except ImportError:
print('need to install zbarligt (python) and libzbar-dev')
sys.exit(1)
def read_image(fn):
''' read image '''
im = None
with open(fn, "rb") as fin:
im = Image.open(fin)
im.load()
return im
def process():
''' process '''
arr = common.get_pngs()
for fn in arr:
print('fn:', fn)
im = read_image(fn)
codes = zbarlight.scan_codes(['qrcode'], im)
# codes in type 'byte'
for s in codes:
print(s)
print(s.decode('utf-8'))
def main():
''' main '''
process()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
77842a6aee9b5ded6310e374e78ec44dfddb45bd
|
d2cb930ed5df0b1b5f7944e00f6f884bf014803d
|
/douban/twisted-demo.py
|
fcf677fc5cecf53c84cde258c7d3baea35271f91
|
[] |
no_license
|
sixDegree/python-scrapy-demo
|
3cae4298b01edab65449cfe9af56b2fa59f4c07d
|
b66530e54156be8c7877f1fc4d497fd497b6fdda
|
refs/heads/master
| 2020-06-17T03:16:23.038061 | 2019-07-08T09:25:15 | 2019-07-08T09:25:15 | 195,777,787 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,184 |
py
|
from twisted.internet import reactor # 事件循环(自动终止条件:所有socket都已移除)
from twisted.internet import defer # defer.Deferred 特殊的socket对象(需手动调用执行,手动移除)
from twisted.internet import task
import treq # 用于发送异步Request,返回Deferred对象
import time
# 延迟机制:
# Deferred 延迟对象,代表的是一个无法立即获取的值
def demo_defer1():
d = defer.Deferred()
print("called:", d.called) # False
print("call...")
d.callback("Hello")
print("called:", d.called) # True
print("result:", d.result) # Hello
def demo_defer2():
def done(v):
print("done called")
return "Hello " + v
d = defer.Deferred()
d.addCallback(done)
print("called:", d.called) # False
print("call...")
d.callback("Tom")
print("called:", d.called) # True
print("result:", d.result) # Hello Tom
def demo_defer3():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
a.callback(3) # b_callback called with arg = 3
print(status(a, b)) # [(<Deferred at 0x1047a0da0>, 1), ('N/A', 1)]
b.callback(4) # on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer4():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
b.callback(4)
print(status(a, b)) # [('N/A', 2), (4, 0)]
a.callback(3) # b_callback called with arg = 3
# on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer5():
def on_done(arg):
print("on_done called with arg =", arg)
return arg
dfds = [defer.Deferred() for i in range(5)]
defer.DeferredList(dfds).addCallback(on_done)
for i in range(5):
dfds[i].callback(i)
# on_done called with arg = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4)]
# on_done 要等到列表中所有延迟都触发(调用`callback(...)`)后调用
def demo_reactor1():
def done(arg):
print("Done", arg)
def defer_task():
print("Start")
d = defer.Deferred()
time.sleep(3)
d.callback("123")
return d
def stop():
reactor.stop()
defer_task().addCallback(done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor2():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
def defer_task(i):
print("Start", i)
d = defer.Deferred()
d.addCallback(done)
time.sleep(2)
d.callback(i)
return d
def stop():
print("Stop reactor")
reactor.stop()
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addCallback(all_done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor3():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
print("Stop reactor")
reactor.stop()
def defer_task(i):
print("Start", i)
return task.deferLater(reactor, 2, done, i)
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addBoth(all_done)
# dfds.addCallback(all_done)
# reactor.callLater(5, stop)
reactor.run()
def demo_treq_get(url):
def get_done(response):
print("get response:", response)
reactor.stop()
treq.get(url).addCallback(get_done)
reactor.run()
def main():
@defer.inlineCallbacks
def my_task1():
print("Start task1")
url = "http://www.baidu.com"
d = treq.get(url.encode('utf-8'))
d.addCallback(parse)
yield d
def my_task2():
print("Start task2")
return task.deferLater(reactor, 2, parse, "200")
@defer.inlineCallbacks # need use `yield`
def my_task3():
print("Start task3")
yield task.deferLater(reactor, 2, parse, "400")
def parse(response):
print("parse response:", response)
def all_done(arg):
print("All done", arg)
reactor.stop()
dfds = defer.DeferredList([my_task1(), my_task2(), my_task3(), ])
dfds.addBoth(all_done)
reactor.run()
if __name__ == "__main__":
# demo_defer1()
# demo_defer2()
# demo_defer3()
# demo_defer4()
# demo_defer5()
# demo_reactor1()
# demo_reactor2()
# demo_reactor3()
# demo_treq_get('http://www.baidu.com')
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.