seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
474451690
|
from collections import OrderedDict
from .models import GoodsChannel
def get_categories():
"""
获取商城商品分类菜单
:return:
"""
# 商品频道及分类菜单
# 使用有序字典保存类别的顺序
# categories = {
# 1: { # 组1
# 'channels': [{'id': 1, 'name': 'xxx', 'url': 'yyy'}, {}, {}],
# 'sub_cats': [{'id': 1, 'name': 'xxx', 'sub_cats': [{}, {}]}, {}, {}]
# },
# 2: { # 组2
# 'channels': [{'id': 1, 'name': 'xxx', 'url': 'yyy'}, {}, {}],
# 'sub_cats': [{'id': 1, 'name': 'xxx', 'sub_cats': [{}, {}]}, {}, {}]
# }
# }
# orderedDict记忆插入顺序的字典
categories = OrderedDict()
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
for channel in channels: # <class goodschannel> id:1, group_id:1, url:http://shouji.jd.com, sequence:1
group_id = channel.group_id # 当前组: 属于目录的第几行 # group_id = 1
if group_id not in categories: # not in categories = {}
categories[group_id] = {'channels': [], 'sub_cats': []} # categories = {1: {'channels':[], 'sub_cats': []}
cat1 = channel.category # 当前频道的类别对象 # cat1 = <class goods_category> id:1, name:手机, parent_id:none
# 追加当前频道
categories[group_id]['channels'].append({ # categories = {1: {'channels':[{'id': 1, 'name': '手机', 'url': 'http://shouji.jd.com'}], 'sub_cats': []}
'id': cat1.id, # 'id': 1
'name': cat1.name, # 'name': 手机
'url': channel.url # 'url': http://shouji.jd.com
})
# 构建当前类别的子类别
for cat2 in cat1.goodscategory_set.all(): # cat1.goodscategory_set.all() = [<class goods_category (id: 38, name: 手机通讯, parent_id: 1)>, <class goods_category (id: 39, name: 手机配件, parent_id: 1)>]
cat2.sub_cats = [] # cat2 = <class goods_category (id: 38, name: 手机通讯, parent_id: 1, sub_cats: [])>
for cat3 in cat2.goodscategory_set.all(): # cat2.goodscategory_set.all() = [<class goods_category (id: 113, name: 手机, parent_id: 38)>, <class goods_category (id: 114, name: 游戏手机, parent_id: 38)>, <class goods_category (id: 115, name: 老人机, parent_id: 38)>, <class goods_category (id: 116, name: 对讲机, parent_id: 38)>]
cat2.sub_cats.append(cat3) # cat3 = <class goods_category (id: 113, name: 手机, parent_id: 38)>
categories[group_id]['sub_cats'].append(cat2) # categories = {1: {'channels':[{'id': 1, 'name': '手机', 'url': 'http://shouji.jd.com'}], 'sub_cats': [<class goods_category (id: 38, name: 手机通讯, parent_id: 1, sub_cats: [<class goods_category (id: 113, name: 手机, parent_id: 38)>])>]}
return categories
| null |
meiduo_mall/meiduo_mall/apps/goods/utils.py
|
utils.py
|
py
| 3,241 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.OrderedDict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.GoodsChannel.objects.order_by",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.GoodsChannel.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "models.GoodsChannel",
"line_number": 26,
"usage_type": "name"
}
] |
322181877
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions for ResolverOps."""
from typing import Dict, List, Optional, Sequence
from tfx import types
# Maps from "span" and "version" to PropertyType.INT. Many ResolverOps require
# one or both of these properties, so we define constants here for convenience.
SPAN_PROPERTY = {'span': types.artifact.PropertyType.INT}
VERSION_PROPERTY = {'version': types.artifact.PropertyType.INT}
SPAN_AND_VERSION_PROPERTIES = {**SPAN_PROPERTY, **VERSION_PROPERTY}
def get_valid_artifacts(
artifacts: Sequence[types.Artifact],
property_types: Dict[str,
types.artifact.PropertyType]) -> List[types.Artifact]:
"""Returns artifacts that have the required property names and types."""
valid_artifacts = []
for artifact in artifacts:
if artifact.PROPERTIES is None:
continue
for property_name, property_type in property_types.items():
if (property_name not in artifact.PROPERTIES or
artifact.PROPERTIES[property_name].type != property_type):
break
else:
valid_artifacts.append(artifact)
return valid_artifacts
def filter_artifacts_by_span(
artifacts: List[types.Artifact],
span_descending: bool,
n: int = 1,
skip_last_n: int = 0,
keep_all_versions: bool = False,
min_span: Optional[int] = None,
) -> List[types.Artifact]:
"""Filters artifacts by their "span" PROPERTY.
This should only be used a shared utility for LatestSpan and ConsecutiveSpans.
Args:
artifacts: The list of Artifacts to filter.
span_descending: If true, then the artifacts will be sorted by span in
descending order. Else, they will be sorted in ascending order by span.
Set to true for LatestSpan, and set to false for ConsecutiveSpans.
n: The number of spans to return. If n <= 0, then n is set to the total
number of unique spans.
skip_last_n: Number of largest spans to skip. For example, if the spans are
[1, 2, 3] and skip_last_n=1, then only spans [1, 2] will be considered.
keep_all_versions: If true, all versions of the n spans are returned. Else,
only the latest version is returned.
min_span: Minimum span before which no span will be considered.
Returns:
The filtered artifacts.
"""
if not artifacts:
return []
# Only keep artifacts with spans >= min_span and account for skip_last_n
spans = sorted({a.span for a in artifacts})
if min_span is not None:
spans = [s for s in spans if s >= min_span]
if skip_last_n:
spans = spans[:-skip_last_n]
# Sort spans in descending order, if specified.
if span_descending:
spans = spans[::-1]
# Keep n spans, if n is positive.
if n > 0:
spans = spans[:n]
if not spans:
return []
artifacts_by_span = {}
for artifact in artifacts:
artifacts_by_span.setdefault(artifact.span, []).append(artifact)
result = []
version_and_id = lambda a: (a.version, a.id)
for span in sorted(spans):
if keep_all_versions:
# span_descending only applies to sorting by span, but version should
# always be sorted in ascending order.
result.extend(sorted(artifacts_by_span[span], key=version_and_id))
else:
# Latest version is defined as the largest version. Ties broken by id.
result.append(max(artifacts_by_span[span], key=version_and_id))
return result
| null |
tfx/dsl/input_resolution/ops/ops_utils.py
|
ops_utils.py
|
py
| 3,948 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tfx.types.artifact",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tfx.types.artifact",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tfx.types.Artifact",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "tfx.types.artifact",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "tfx.types.Artifact",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "tfx.types.Artifact",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "tfx.types.Artifact",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "tfx.types",
"line_number": 57,
"usage_type": "name"
}
] |
590013784
|
#number of points inside a quadrilateral
from math import sqrt,ceil,floor
from fractions import gcd
def inside(a,b,c,d):
#calculate area
area = (c*b+a*b+a*d+d*c)/2
bp = gcd(a,b)+gcd(b,c)+gcd(c,d)+gcd(d,a)
return area-bp/2+1
def sol(m):
count=0
for a in range(1,m+1):
for b in range(1,m+1):
for c in range(1,m+1):
for d in range(1,m+1):
p=inside(a,b,c,d)
s=sqrt(p)
if ceil(s)==floor(s):
count+=1
return count
| null |
Python/E504/E504/E504.py
|
E504.py
|
py
| 560 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "fractions.gcd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 19,
"usage_type": "call"
}
] |
585452927
|
##Patterns: B304
from Crypto.Cipher import ARC2
from Crypto.Cipher import ARC4
from Crypto.Cipher import Blowfish
from Crypto.Cipher import DES
from Crypto.Cipher import XOR
from Crypto.Hash import SHA
from Crypto import Random
from Crypto.Util import Counter
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.backends import default_backend
from struct import pack
import socket
key = b'Sixteen byte key'
iv = Random.new().read(ARC2.block_size)
##Warn: B304
cipher = ARC2.new(key, ARC2.MODE_CFB, iv)
msg = iv + cipher.encrypt(b'Attack at dawn')
key = b'Very long and confidential key'
nonce = Random.new().read(16)
tempkey = SHA.new(key+nonce).digest()
##Warn: B304
cipher = ARC4.new(tempkey)
msg = nonce + cipher.encrypt(b'Open the pod bay doors, HAL')
bs = Blowfish.block_size
key = b'An arbitrarily long key'
iv = Random.new().read(bs)
##Warn: B304
cipher = Blowfish.new(key, Blowfish.MODE_CBC, iv)
plaintext = b'docendo discimus '
plen = bs - divmod(len(plaintext),bs)[1]
padding = [plen]*plen
padding = pack('b'*plen, *padding)
msg = iv + cipher.encrypt(plaintext + padding)
key = b'-8B key-'
nonce = Random.new().read(DES.block_size/2)
ctr = Counter.new(DES.block_size*8/2, prefix=nonce)
##Warn: B304
cipher = DES.new(key, DES.MODE_CTR, counter=ctr)
plaintext = b'We are no longer the knights who say ni!'
msg = nonce + cipher.encrypt(plaintext)
key = b'Super secret key'
##Warn: B304
cipher = XOR.new(key)
plaintext = b'Encrypt me'
msg = cipher.encrypt(plaintext)
##Warn: B304
cipher = Cipher(algorithms.ARC4(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
##Warn: B304
cipher = Cipher(algorithms.Blowfish(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
##Warn: B304
cipher = Cipher(algorithms.IDEA(key), mode=None, backend=default_backend())
encryptor = cipher.encryptor()
ct = encryptor.update(b"a secret message")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', 31137))
s.bind(('192.168.0.1', 8080))
| null |
docs/multiple-tests/with-config/src/B304.py
|
B304.py
|
py
| 2,238 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Crypto.Random.new",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Crypto.Random",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.ARC2.block_size",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.ARC2",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.ARC2.new",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.ARC2",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.ARC2.MODE_CFB",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Random.new",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Crypto.Random",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "Crypto.Hash.SHA.new",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "Crypto.Hash.SHA",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.ARC4.new",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.ARC4",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.Blowfish.block_size",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.Blowfish",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "Crypto.Random.new",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Crypto.Random",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.Blowfish.new",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.Blowfish",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.Blowfish.MODE_CBC",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "struct.pack",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Crypto.Random.new",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "Crypto.Random",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.DES.block_size",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.DES",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "Crypto.Util.Counter.new",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "Crypto.Util.Counter",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.DES.block_size",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.DES",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.DES.new",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.DES",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.DES.MODE_CTR",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.XOR.new",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.XOR",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.Cipher",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.ARC4",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.Cipher",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.Blowfish",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.Cipher",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.IDEA",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 71,
"usage_type": "attribute"
}
] |
391835765
|
import unittest
from typing import List
from .ds import TreeNode
from queue import Queue
class Solution(unittest.TestCase):
def rightSideView(self, root: TreeNode) -> List[int]:
"""
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
---
Basic Idea: BFS with level, each level keep last node
"""
if not root:
return []
queue = Queue()
queue.put((root, 1))
ret = []
preNode, preLevel = root, 1
while not queue.empty():
node, level = queue.get()
if level == preLevel + 1:
ret.append(preNode.val)
if node.left:
queue.put((node.left, level+1))
if node.right:
queue.put((node.right, level+1))
preNode, preLevel = node, level
ret.append(preNode.val)
return ret
def testRightView(self):
self.assertEqual([1,3,4], self.rightSideView(TreeNode.fromList([1,2,3,None,5,None,4])))
| null |
src/main/python/binary_tree_right_side_view.py
|
binary_tree_right_side_view.py
|
py
| 1,225 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "ds.TreeNode",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "queue.Queue",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "queue.put",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "queue.empty",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "queue.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "queue.put",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "queue.put",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "ds.TreeNode.fromList",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ds.TreeNode",
"line_number": 49,
"usage_type": "name"
}
] |
265475322
|
import time
from weakref import WeakKeyDictionary
from scrapy import signals, log
from scrapy.exceptions import NotConfigured
from scrapy.exporters import PythonItemExporter
from scrapy.http import Request
from scrapy.utils.request import request_fingerprint
from scrapy.exceptions import ScrapyDeprecationWarning
from sh_scrapy import hsref
from sh_scrapy.compat import IS_PYTHON2
from sh_scrapy.crawl import ignore_warnings
class HubstorageExtension(object):
"""Extension to write scraped items to HubStorage"""
def __init__(self, crawler):
self.hsref = hsref.hsref
if not self.hsref.enabled:
raise NotConfigured
self.crawler = crawler
self._write_item = self.hsref.job.items.write
# https://github.com/scrapy/scrapy/commit/c76190d491fca9f35b6758bdc06c34d77f5d9be9
exporter_kwargs = {'binary': False} if not IS_PYTHON2 else {}
with ignore_warnings(category=ScrapyDeprecationWarning):
self.exporter = PythonItemExporter(**exporter_kwargs)
log.msg("HubStorage: writing items to %s" % self.hsref.job.items.url)
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler)
crawler.signals.connect(o.item_scraped, signals.item_scraped)
crawler.signals.connect(o.spider_closed, signals.spider_closed)
return o
def item_scraped(self, item, spider):
type_ = type(item).__name__
item = self.exporter.export_item(item)
item.setdefault("_type", type_)
self._write_item(item)
def spider_closed(self, spider, reason):
# flush item writer
self.hsref.job.items.flush()
# update outcome
self.hsref.job.metadata.update(close_reason=reason)
self.hsref.job.metadata.save()
class HubstorageMiddleware(object):
def __init__(self):
self._seen = WeakKeyDictionary()
self.hsref = hsref.hsref
def process_spider_input(self, response, spider):
parent = response.meta.get('_hsparent')
riq = self.hsref.job.requests.add(
parent=parent,
url=response.url,
status=response.status,
method=response.request.method,
rs=len(response.body),
duration=response.meta.get('download_latency', 0) * 1000,
ts=time.time() * 1000,
fp=request_fingerprint(response.request),
)
self._seen[response] = riq
def process_spider_output(self, response, result, spider):
parent = self._seen.pop(response)
for x in result:
if isinstance(x, Request):
x.meta['_hsparent'] = parent
yield x
| null |
sh_scrapy/extension.py
|
extension.py
|
py
| 2,674 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sh_scrapy.hsref.hsref",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sh_scrapy.hsref",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scrapy.exceptions.NotConfigured",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sh_scrapy.compat.IS_PYTHON2",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sh_scrapy.crawl.ignore_warnings",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scrapy.exceptions.ScrapyDeprecationWarning",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "scrapy.exporters.PythonItemExporter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scrapy.log.msg",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scrapy.log",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "scrapy.signals.item_scraped",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "scrapy.signals",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "scrapy.signals.spider_closed",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scrapy.signals",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "weakref.WeakKeyDictionary",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sh_scrapy.hsref.hsref",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "sh_scrapy.hsref",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scrapy.utils.request.request_fingerprint",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "scrapy.http.Request",
"line_number": 74,
"usage_type": "argument"
}
] |
385530762
|
# -------------------------------------------------------------
# proj: gan-stronomy
# file: opts.py
# authors: Mark Sabini, Zahra Abdullah, Darrith Phan
# desc: File containing all options pertaining to training
# -------------------------------------------------------------
import torch
import os
import util
# General parameters
SAFETY_MODE = False
# Device parameters
CUDA = torch.cuda.is_available()
DEVICE = 'cuda' if CUDA else 'cpu'
FloatTensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if CUDA else torch.LongTensor
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# Data parameters
EMBED_SIZE = 1024
LATENT_SIZE = 100
IMAGE_SIZE = 64
TVT_SPLIT = [49800, 100, 100]
TVT_SPLIT_LABELS = ['train', 'val', 'test']
DATASET_NAME = 'data50000'
DATA_PATH = os.path.abspath('../temp/%s/data.pkl' % DATASET_NAME)
# Training parameters
BATCH_SIZE = 64
ADAM_LR = 0.0002
ADAM_B = (0.5, 0.999)
NUM_EPOCHS = 91
LAMBDA = 10.0 # Weight of gradient penalty
# Model parameters
NGF = 64
NDF = 64
CONDITIONAL = True
# Run parameters
RUN_ID = 64
RUN_COMMENT = 'Type your run comment here!'
RUN_PATH = os.path.abspath('../runs/run%d' % RUN_ID)
IMG_OUT_PATH = os.path.join(RUN_PATH, 'out')
MODEL_PATH = None # None means starting fresh
MODEL_OUT_PATH = os.path.join(RUN_PATH, 'models')
INTV_PRINT_LOSS = 1
INTV_SAVE_IMG = 1
INTV_SAVE_MODEL = 10
NUM_UPDATE_D = 3
| null |
code/opts.py
|
opts.py
|
py
| 1,412 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
}
] |
86552622
|
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD, PCA, KernelPCA
from sklearn.feature_extraction.text import TfidfTransformer
from datetime import datetime
import os
import sys
initial_dir = os.getcwd()
os.chdir('../course_repo/machine_learning_examples/rnn_class')
from util import get_wikipedia_data
from brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx
# os.chdir(initial_dir)
sentences, word2idx = get_wikipedia_data(n_files = 3, n_vocab = 2000, by_paragraph = True)
# build term document matrix:
V = len(word2idx)
N = len(sentences)
# create raw counts:
A = np.zeros((V,N))
j = 0
for sentence in sentences:
for i in sentence:
A[i, j] += 1
j+=1
transformer = TfidfTransformer()
A = transformer.fit_transform(A.T).T
# TSNE requires a dense array (TSNE is a dimension reduction algorithm)
A = A.toarray()
# map back to word in plot
idx2word = {v:k for k, v in iteritems(word2idx)}
tsne = TSNE()
Z = tsne.fit_transform(A)
Z_df = pd.DataFrame(Z)
Z_df['word'] = Z_df.index.map(lambda x: idx2word[x])
Z_df.columns = ['x','y','word']
# look at some words in the space:
small_subset_words = ['king', 'man', 'woman','france', 'paris', 'rome', 'italy']
small_subset_indices = [word2idx[word] for word in small_subset_words]
Z_small = Z[small_subset_indices]
plt.scatter(Z_small[:,0], Z_small[:,1])
for i in range(V):
if i in small_subset_indices:
try:
plt.annotate(s=idx2word[i].encode("utf8").decode("utf8"), xy=(Z[i,0], Z[i,1]))
except:
print("bad string:", idx2word[i])
plt.draw()
# look at a random small section of the plot:
def random_section(Z_df = Z_df):
random_index = np.random.choice(range(V))
idx2word[random_index]
feature = Z[random_index]
Z_df['distance'] = Z_df.index.map(lambda x: (Z[x][0]- feature[0])**2 + (Z[x][1]- feature[1])**2)
Z_df = Z_df.sort_values(by = 'distance').reset_index(drop = False)
small_df = Z_df.iloc[0:9]
plt.scatter(small_df['x'], small_df['y'])
for i in range(len(small_df)):
plt.annotate(small_df['word'][i], xy=(small_df['x'][i], small_df['y'][i]))
plt.draw()
random_section()
# find a word analogy:
# we want to solve: word_diff[0] - word_diff[1] = word_diff[2] - ?
def analogy_finder(word_vec, Z_df = Z_df):
indices = [word2idx[word] for word in word_vec]
new_vec = Z[indices][2] + Z[indices][1]- Z[indices][0]
Z_df['distance'] = Z_df.index.map(lambda x: (Z[x][0]- new_vec[0])**2 + (Z[x][1]- new_vec[1])**2)
new = Z_df.sort_values(by = 'distance').reset_index(drop = False)['index'][0]
return(idx2word[new])
analogy_finder(['king', 'queen', 'man'])
analogy_finder(['spain', 'spanish', 'england'])
analogy_finder(['spain', 'madrid', 'italy'])
analogy_finder(['italy', 'rome', 'italy'])
| null |
lecture_8.py
|
lecture_8.py
|
py
| 3,078 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "util.get_wikipedia_data",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "future.utils.iteritems",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "builtins.range",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "numpy.random.choice",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "builtins.range",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "builtins.range",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
}
] |
651719991
|
#!/usr/bin/env python
"""Functions for streamlining analysis."""
# Imports
from collections import defaultdict, Sequence, namedtuple
import functools
import itertools
import concurrent.futures as futures
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import statsmodels
import munch
import patsy
from crunchers import parallel as putils
from crunchers import ipython_info
if ipython_info():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
# Metadata
__author__ = "Gus Dunn"
__email__ = "[email protected]"
def tree():
return defaultdict(tree)
def report_logitreg(formula, data, verbose=True, disp=1):
"""Fit logistic regression, print a report, and return the fit object."""
y, X = patsy.dmatrices(formula, data=data, return_type='dataframe')
results = smf.Logit(endog=y, exog=X).fit(disp=disp)
# results = smf.logit(formula, data=data).fit()
summary = results.summary()
margeff = results.get_margeff().summary()
if verbose:
report = """\n{summary}\n\n\n{margeff}\n""".format(summary=summary,margeff=margeff)
print(report)
return results
def report_glm(formula, data, verbose=True, **kwargs):
"""Fit GLM, print a report, and return the fit object."""
results = smf.glm(formula, data=data, **kwargs).fit(disp=False, **kwargs)
summary = results.summary()
if verbose:
report = """\n{summary}\n""".format(summary=summary)
print(report)
return results
def report_ols(formula, data, fit_regularized=False, L1_wt=1, refit=False, **kwargs):
"""Fit OLS regression, print a report, and return the fit object."""
RegressionResultsWrapper = statsmodels.regression.linear_model.RegressionResultsWrapper
# parse formula string into design matrix
y, X = patsy.dmatrices(formula, data=data, return_type='dataframe')
if fit_regularized:
# Does not wrap in RegressionResultsWrapper automatically when using elastic_net
results = RegressionResultsWrapper(smf.OLS(endog=y, exog=X,).fit_regularized(start_params=None, L1_wt=L1_wt, refit=refit, **kwargs))
else:
results = smf.OLS(endog=y, exog=X,).fit(**kwargs)
return results
def report_rlm(formula, data, verbose=True, **kwargs):
"""Fit RLM, print a report, and return the fit object."""
results = smf.rlm(formula, data=data, **kwargs).fit(**kwargs)
summary = results.summary()
if verbose:
report = """\n{summary}\n""".format(summary=summary)
print(report)
return results
def do_regression(data, y_var, X_ctrls=None, X_hyp=None, kind='OLS', **kwargs):
"""Provide a further abstracted way to build and run multiple types of regressions.
data (pd.DataFrame): data table to use when retrieving the column headers
y_var (str): column header of the outcome variable
X_ctrls (str): formula specification of the "boring" variables "column_header_1 + column_header_2"...
X_hyp (str): formula specification of the "interesting" variables "column_header_1 + column_header_2"...
kind (str): the type of regression to run `kind in ['GLM','OLS','RLM'] == True`
"""
assert (X_ctrls is not None) or (X_hyp is not None)
if X_hyp is not None:
X_hyp = ' + {X_hyp}'.format(X_hyp=X_hyp)
else:
X_hyp = ''
if X_ctrls is None:
X_ctrls = ''
formula = '{y_var} ~ {X_ctrls}{X_hyp}'.format(y_var=y_var,
X_ctrls=X_ctrls,
X_hyp=X_hyp)
regs = munch.Munch()
regs.GLM = report_glm
regs.OLS = report_ols
regs.RLM = report_rlm
regs.LOGIT = report_logitreg
return regs[kind](formula=formula,
data=data,
**kwargs)
def build_regression_models_grid(X_hyps_dicts, ctrl_coefs_dicts, outcomes_dicts):
grid = munch.Munch()
combos = itertools.product(X_hyps_dicts.keys(),ctrl_coefs_dicts.keys(),outcomes_dicts.keys())
for model_vars in tqdm(combos, desc="building regression model grid"):
grid_key = putils.GridKey(*model_vars)
grid[grid_key] = putils.GridValues(X_hyps=X_hyps_dicts[grid_key.X_hyps],
ctrl_coefs=ctrl_coefs_dicts[grid_key.ctrl_coefs],
outcomes=outcomes_dicts[grid_key.outcomes])
return grid
def regression_grid_single(grid_item, data, kind, **kwargs):
grid_key, grid_value = grid_item
y_var = grid_value.outcomes
X_ctrls = ' + '.join(grid_value.ctrl_coefs)
X_hyp = ' + '.join(grid_value.X_hyps)
try:
result = do_regression(data=data,
y_var=y_var,
X_ctrls=X_ctrls,
X_hyp=X_hyp,
kind=kind,
**kwargs
)
return grid_key, result
except np.linalg.linalg.LinAlgError:
print('error with: {x}'.format(x=X_hyp))
return grid_key, 'LinAlgError'
def run_regressions_grid(grid, data, kind, max_workers=None, **kwargs):
regressions = tree()
partial_regression_grid_single = functools.partial(regression_grid_single,
data=data,
kind=kind,
**kwargs
)
with futures.ProcessPoolExecutor(max_workers=max_workers) as worker_pool:
# results = tqdm(worker_pool.map(partial_regression_grid_single, (grid_item for grid_item in grid.items())))
results = worker_pool.map(partial_regression_grid_single, (grid_item for grid_item in grid.items()))
for job in tqdm(results, total=len(grid)):
grid_key, reg_result = job
X_hyps = grid_key.X_hyps
ctrl_coefs = grid_key.ctrl_coefs
outcomes = grid_key.outcomes
regressions[outcomes][ctrl_coefs][X_hyps] = reg_result
return munch.munchify(regressions)
def summarize_multi_LOGIT(results):
"""Return dataframe aggregating over-all stats from a dictionary-like object containing LOGIT result objects."""
s = defaultdict(defaultdict)
for name,reg in results.items():
s['converged'][name] = reg.mle_retvals['converged']
s['iterations'][name] = reg.mle_retvals['iterations']
s['warnflag'][name] = reg.mle_retvals['warnflag']
s['pseudo_rsqrd'][name] = reg.prsquared
s['aic'][name] = reg.aic
return pd.DataFrame(s)
def summarize_single_OLS(regression, col_dict, name, is_regularized=False):
"""Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects."""
reg = regression
try:
col_dict['rsquared'][name] = reg.rsquared
except AttributeError:
col_dict['rsquared'][name] = 'NA'
try:
col_dict['rsquared_adj'][name] = reg.rsquared_adj
except AttributeError:
col_dict['rsquared_adj'][name] = 'NA'
col_dict['f_pvalue'][name] = reg.f_pvalue
col_dict['condition_number'][name] = reg.condition_number
col_dict['regularized'][name] = is_regularized
if not is_regularized:
outliers = reg.outlier_test(method='fdr_bh')['fdr_bh(p)'] <= 0.05
col_dict['n_outliers'][name] = (outliers).sum()
col_dict['outliers'][name] = ','.join(outliers.index[outliers].values)
else:
col_dict['n_outliers'][name] = "NA"
col_dict['outliers'][name] = "NA"
col_dict['aic'][name] = reg.aic
return col_dict
def summarize_multi_OLS(results):
"""Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects."""
col_dict = defaultdict(defaultdict)
test_reg = list(results.values())[0]
try:
is_regularized = test_reg.regularized
except AttributeError:
is_regularized = False
for name, reg in results.items():
# TODO: Adpat summarize_single_OLS to be used here
col_dict = summarize_single_OLS(regression=reg, col_dict=col_dict, name=name, is_regularized=is_regularized)
df = pd.DataFrame(col_dict)
df.index.name = 'outcome'
return df
def summarize_grid_OLS(regs, reg_grid):
summaries_overall = []
grid_keys = list(reg_grid.keys())
for gk in tqdm(grid_keys, desc="initial summary"):
# col_dict = defaultdict(defaultdict)
r = regs[gk.outcomes][gk.ctrl_coefs][gk.X_hyps]
df_part = summarize_multi_OLS({gk.X_hyps: r})
df_part.index.name = 'X_hyps'
df_part = df_part.reset_index()
df_part['ctrl_coefs'] = gk.ctrl_coefs
df_part['outcomes'] = gk.outcomes
summaries_overall.append(df_part)
summaries_overall_df = pd.concat(summaries_overall)
# calculate q-values and add to regression objects
outcome_ctrl_grps = summaries_overall_df.groupby(["outcomes","ctrl_coefs"])
qvals = []
def assign_qvalue_reg(row, regs):
qval = row['qvalue_reg']
r = regs[row.outcomes][row.ctrl_coefs][row.X_hyps]
r.qvalue_reg = qval
for name,df in tqdm(outcome_ctrl_grps, desc="adding qvalues"):
df = df.copy()
df.index.name = 'dropme'
df['qvalue_reg'] = statsmodels.stats.multitest.multipletests(pvals=df['f_pvalue'], alpha=0.05, method='fdr_bh')[1]
df["reg_obj"] = df.apply(lambda row: assign_qvalue_reg(row, regs), axis=1)
df = df.set_index(["outcomes", "ctrl_coefs","X_hyps"])
qvals.append(df)
summaries_overall_df = pd.concat(qvals)
columns = ["aic","condition_number","f_pvalue","qvalue_reg","n_outliers","outliers","regularized","rsquared","rsquared_adj"]
return summaries_overall_df[columns]
def get_diff(a,b):
return abs(a) - abs(b)
def get_log2_fold(a,b):
return np.log2(abs(a) / abs(b))
def compare_coefs(row, value, results):
reg = results[row['regression']]
try:
X_var = reg.params[row['X_hyp']]
value = reg.params[value]
except KeyError:
return "NA"
comparisons = "{value} | {diff} | {log2_fold}".format(value=round(value,4),
diff=round(get_diff(value,X_var),4),
log2_fold=round(get_log2_fold(value,X_var),4)
)
return comparisons
def identify_full_ctrl_names(X_vars, orig_ctrl_names):
"""Return set of variable names actually used in regression, tolerating mangling of categoricals."""
X_vars = set(X_vars)
ctrls = []
for X_var in X_vars:
for orig_ctrl in orig_ctrl_names:
if X_var == orig_ctrl:
ctrls.append(X_var)
elif X_var.startswith(orig_ctrl) and (X_var.startswith('C(') and X_var.endswith(']')):
ctrls.append(X_var)
else:
pass
return set(ctrls)
def summarize_X_vars(results, sig_thresh=0.05, X_ctrls=None, X_ignore=None):
if sig_thresh is None:
sig_thresh = 1
if X_ctrls is None:
X_ctrls = []
if X_ignore is None:
X_ignore = []
regs_dfs = []
for name,reg in results.items():
regs_dfs.append(pd.DataFrame(data={'pvalue_coef':reg.pvalues,
'coef':reg.params,
'regression': name,
'pvalue_reg': reg.f_pvalue,
'qvalue_reg': reg.qvalue_reg
},
columns=['regression','pvalue_reg','qvalue_reg','pvalue_coef','coef'],
)
)
pvals = pd.concat(regs_dfs)
pvals['X_hyp'] = pvals.index.values
pvals = pvals.reset_index(drop=True)
X_ctrls = identify_full_ctrl_names(X_vars=pvals['X_hyp'].values, orig_ctrl_names=X_ctrls)
X_ignore.extend(X_ctrls)
ignore_these = pvals.X_hyp.isin(X_ignore)
sig = pvals[~ignore_these].query(''' pvalue_coef <= {thresh} '''.format(thresh=sig_thresh))
return sig
def summarize_grid_X_vars_OLS(regs, reg_grid, sig_thresh=0.05):
rename_map = {"pvalue_coef": "pvalue_param_coef",
"coef": "param_coef",
"X_hyp": "param_name"}
summaries_X_vars = []
grid_keys = list(reg_grid.keys())
for gk in tqdm(grid_keys, desc="initial summaries"):
# col_dict = defaultdict(defaultdict)
r = regs[gk.outcomes][gk.ctrl_coefs][gk.X_hyps]
# df_part = stats.summarize_multi_OLS({gk.X_hyps:r})
df_part = summarize_X_vars(results={gk.X_hyps:r}, sig_thresh=sig_thresh, X_ctrls=None, X_ignore=["Intercept"])
# df_part.index.name = 'X_hyps'
# df_part = df_part.reset_index(inplace=False)
df_part['ctrl_coefs'] = gk.ctrl_coefs
df_part['qvalue_reg'] = r.qvalue_reg
df_part['outcomes'] = gk.outcomes
summaries_X_vars.append(df_part)
columns = ["pvalue_reg","qvalue_reg","pvalue_param_coef","param_coef"]
summaries_X_vars_df = pd.concat(summaries_X_vars).rename(columns=rename_map).set_index(["outcomes", "ctrl_coefs","regression","param_name"], append=False, inplace=False).sort_index()[columns]
return summaries_X_vars_df
def format_all_regression_models(regs, total):
"""Return tuple of string formated versions of all regression tables in the `regs` object.
Args:
regs (reg-tree: dict-like): tree-like dict containing the regression
results objects as leaves and descriptors as nodes.
total (int): total number of results tables to format.
Returns:
tuple
"""
# GridKey(X_hyps='ITGA4', ctrl_coefs='ctrl_coefs_no_ster', outcomes='L_Thalamus_Proper')
# args = Munch()
# args.X_hyps = []
# args.ctrl_coefs = []
# args.outcomes = []
# args.table = []
#
# # with futures.ProcessPoolExecutor(max_workers=max_workers) as worker_pool:
# # all_tables = worker_pool.map(partial_regression_grid_single, )
divider = "#"*66
tmpl = "-- {outcome} {ctrl} {X_hyp} --\n\n" \
"outcomes: {outcome}\n" \
"ctrl: {ctrl}\n" \
"X_hyp: {X_hyp}\n\n" \
"{table}\n\n" \
"{divider}\n" \
"{divider}\n\n"
prog_bar = tqdm(total=total)
all_tables = []
for outcome, ctrl_coefs_dicts in regs.items():
for ctrl, X_hyps_dicts in ctrl_coefs_dicts.items():
for X_hyp, result in X_hyps_dicts.items():
table = result.summary2()
all_tables.append(tmpl.format(**locals()))
prog_bar.update(n=1)
return all_tables
| null |
src/crunchers/statsmodels_helpers/lazy_stats.py
|
lazy_stats.py
|
py
| 14,903 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "crunchers.ipython_info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "patsy.dmatrices",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.Logit",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "statsmodels.formula.api.glm",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "statsmodels.regression",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "patsy.dmatrices",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.OLS",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "statsmodels.formula.api.OLS",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "statsmodels.formula.api.rlm",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "munch.Munch",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "munch.Munch",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "crunchers.parallel.GridKey",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "crunchers.parallel",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "crunchers.parallel.GridValues",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "crunchers.parallel",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "numpy.linalg",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ProcessPoolExecutor",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "concurrent.futures",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "munch.munchify",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "statsmodels.stats.multitest.multipletests",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "statsmodels.stats",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 439,
"usage_type": "call"
}
] |
619528332
|
# Copyright 2018 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core of all things hype."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
from threading import Lock
import time
from absl import logging
from concurrent import futures
from typing import Any, Callable, Dict, List, Optional, Text, Union
from hypebot import types
from hypebot.core import async_lib
from hypebot.core import proxy_lib
from hypebot.core import schedule_lib
from hypebot.core import util_lib
from hypebot.core import zombie_lib
from hypebot.interfaces import interface_lib
from hypebot.plugins import coin_lib
from hypebot.plugins import deploy_lib
from hypebot.plugins import hypestack_lib
from hypebot.plugins import inventory_lib
from hypebot.protos.channel_pb2 import Channel
from hypebot.stocks import stock_factory
from hypebot.storage import storage_factory
# TODO(someone): Remove and replace usage with direct dependency on types lib.
_MessageType = Union[
Text,
List[Text]]
MessageType = Optional[_MessageType]
def _MakeMessage(response: _MessageType) -> types.Message:
msg = types.Message()
_AppendToMessage(msg, response)
return msg
def _GetAlternateTextList(value: Union[Text, List[Text]]) -> List[Text]:
if isinstance(value, Text):
return value.split('\n')
# Flat map to expand newlines to separate list items.
return list(itertools.chain.from_iterable([x.split('\n') for x in value]))
def _AppendToMessage(msg: types.Message, response: _MessageType):
if isinstance(response, (bytes, Text)):
for line in response.split('\n'):
msg.messages.add(text=line)
else:
assert isinstance(response, list)
for line in response:
_AppendToMessage(msg, line)
class RequestTracker(object):
"""Tracks user requests that require confirmation."""
_REQUEST_TIMEOUT_SEC = 60
def __init__(self, reply_fn: Callable) -> None:
self._reply_fn = reply_fn
self._pending_requests = {} # type: Dict[types.User, Dict]
self._pending_requests_lock = Lock()
def HasPendingRequest(self, user: types.User) -> bool:
with self._pending_requests_lock:
return user in self._pending_requests
def RequestConfirmation(self,
user: types.User,
summary: str,
request_details: Dict,
action_fn: Callable,
parse_fn: Optional[Callable] = None) -> None:
"""Create a user request that must be confirmed before action is taken.
This is a very generic flow useful for any command/bot service that would
like to double-check with the user before some action is taken (e.g. filing
an issue). There can be only a single pending request per user at a time.
When there is an outstanding request for user, all other calls to this
function will fail until either the user confirms or denies their pending
request, or _REQUEST_TIMEOUT_SEC has elapsed.
Args:
user: The user making the request.
summary: Summary of the request, used in confirmation message.
request_details: Information passed to action_fn upon confirmation.
action_fn: Function called if user confirms this request.
parse_fn: Function used to parse a user's response.
Returns:
None
"""
now = time.time()
with self._pending_requests_lock:
previous_request = self._pending_requests.get(user, None)
if previous_request:
if now - previous_request['timestamp'] < self._REQUEST_TIMEOUT_SEC:
self._reply_fn(user,
'Confirm prior request before submitting another.')
return
del self._pending_requests[user]
request_details['timestamp'] = now
request_details['action'] = action_fn
if not parse_fn:
parse_fn = lambda x: x.lower().startswith('y')
request_details['parse'] = parse_fn
self._pending_requests[user] = request_details
self._reply_fn(user, 'Confirm %s?' % summary)
def ResolveRequest(self, user: types.User, user_msg: str) -> None:
"""Resolves a pending request, taking the linked action if confirmed."""
now = time.time()
with self._pending_requests_lock:
request_details = self._pending_requests.get(user)
if not request_details:
return
if not request_details['parse'](user_msg):
self._reply_fn(user, 'Cancelling request.')
elif now - request_details['timestamp'] >= self._REQUEST_TIMEOUT_SEC:
self._reply_fn(user, 'You took too long to confirm, try again.')
else:
self._reply_fn(user,
request_details.get('action_text',
'Confirmation accepted.'))
request_details['action'](user, request_details)
del self._pending_requests[user]
class OutputUtil(object):
"""Allows plugins to send output without a reference to Core."""
def __init__(self, output_fn: Callable) -> None:
self._output_fn = output_fn
def LogAndOutput(self,
log_level: int,
channel: Channel,
message: MessageType) -> None:
"""Logs message at log_level, then sends it to channel via Output."""
logging.log(log_level, message)
self.Output(channel, message)
def Output(self, channel: Channel, message: MessageType) -> None:
"""Outputs a message to channel."""
self._output_fn(channel, message)
class Core(object):
"""The core of hypebot.
Any state or service that is needed by more than one command.
"""
def __init__(
self,
params: Any, # HypeParams
interface: interface_lib.BaseChatInterface) -> None:
"""Constructs core of hypebot.
Args:
params: Bot parameters.
interface: This will always be the original interface that the bot was
created with, and never the CaptureInterface during nested calls. For
this reason, you should only call Join/Part and potentially Notice/Topic
on this interface. Don't call SendMessage or else it can send messages
never intended for human consumption.
hypeletter_callback: brcooley get rid of this when migrating hypeletter to
its own command.
"""
self.params = params
self.nick = self.params.name.lower()
self.interface = interface
self.output_util = OutputUtil(self.Reply)
self.store = storage_factory.CreateFromParams(self.params.storage)
cached_type = self.params.storage.get(self.params.storage.type, {}).get(
'cached_type')
if cached_type:
self.cached_store = storage_factory.Create(
cached_type, self.params.storage.get(self.params.storage.type))
else:
logging.info('No cached_type found for storage, using default store.')
self.cached_store = self.store
self.user_tracker = util_lib.UserTracker()
self.timezone = self.params.time_zone
self.scheduler = schedule_lib.HypeScheduler(self.timezone)
self.executor = futures.ThreadPoolExecutor(max_workers=8)
self.runner = async_lib.AsyncRunner(self.executor)
self.inventory = inventory_lib.InventoryManager(self.store)
self.proxy = proxy_lib.Proxy(self.store)
self.zombie_manager = zombie_lib.ZombieManager(self.Reply)
self.request_tracker = RequestTracker(self.Reply)
self.bank = coin_lib.Bank(self.store, self.nick)
self.bets = coin_lib.Bookie(self.store, self.bank, self.inventory)
self.stocks = stock_factory.CreateFromParams(self.params.stocks, self.proxy)
self.deployment_manager = deploy_lib.DeploymentManager(
self.nick, self.bets, self.output_util, self.executor)
self.hypestacks = hypestack_lib.HypeStacks(self.store, self.bank,
self.Reply)
self.betting_games = []
self.last_command = None
self.default_channel = Channel(visibility=Channel.PUBLIC,
**self.params.default_channel.AsDict())
def Reply(self,
channel: types.Target,
msg: MessageType,
default_channel: Optional[Channel] = None,
limit_lines: bool = False,
max_public_lines: int = 6,
user: Optional[types.User] = None,
log: bool = False,
log_level: int = logging.INFO) -> None:
"""Sends a message to the channel.
Leaving Reply on the HypeCore allows replacing the interface to process
nested commands. However, some change will be needed in order to actually
create an OutputUtil for HBDS without a HypeCore.
Args:
channel: Who/where to send the message.
msg: The message to send.
default_channel: Who/where to send the message if no channel is specified.
limit_lines: Whether to limit lines or not.
max_public_lines: Maximum number of lines to send to a public channel.
user: If specified, where to send the message if its too long.
log: Whether to also log the message.
log_level: How important the log is.
"""
if not msg:
return
if log:
text_msg = msg
logging.log(log_level, text_msg, exc_info=log_level == logging.ERROR)
channel = channel or default_channel
if not channel:
logging.info('Attempted to send message with no channel: %s', msg)
return
# Support legacy Reply to users as a string.
if not isinstance(channel, Channel):
# Send messages for sub-accounts to the real user.
channel = Channel(id=channel.split(':')[0],
visibility=Channel.PRIVATE,
name=channel)
if (limit_lines and channel.visibility == Channel.PUBLIC and
isinstance(msg, list) and len(msg) > max_public_lines):
if user:
self.interface.SendMessage(
channel, _MakeMessage('It\'s long so I sent it privately.'))
self.interface.SendMessage(
Channel(id=user, visibility=Channel.PRIVATE, name=user),
_MakeMessage(msg))
else:
# If there is no user, just truncate and send to channel.
self.interface.SendMessage(
channel, _MakeMessage(msg[:max_public_lines] + ['...']))
else:
self.interface.SendMessage(channel, _MakeMessage(msg))
def ReloadData(self) -> bool:
"""Asynchronous reload of all data on core.
Searches for any attribute that has a ReloadData function and calls it.
Returns:
Whether reload triggered or not since it was still running.
"""
if not self.runner.IsIdle():
logging.info('Runner not idle, can not trigger reload.')
return False
self.proxy.FlushCache()
for obj in self.__dict__.values():
if hasattr(obj, 'ReloadData'):
logging.info('Triggering reload for: %s', obj.__class__.__name__)
self.runner.RunAsync(obj.ReloadData)
return True
| null |
hypebot/hypecore.py
|
hypecore.py
|
py
| 11,507 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.Union",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "hypebot.types.Message",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "hypebot.types",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "hypebot.types.Message",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types.Message",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "threading.Lock",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "hypebot.types.User",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "hypebot.types.User",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "hypebot.types.User",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "absl.logging.log",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "hypebot.interfaces.interface_lib.BaseChatInterface",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "hypebot.interfaces.interface_lib",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "hypebot.storage.storage_factory.CreateFromParams",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "hypebot.storage.storage_factory",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "hypebot.storage.storage_factory.Create",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "hypebot.storage.storage_factory",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "absl.logging.info",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "hypebot.core.util_lib.UserTracker",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "hypebot.core.util_lib",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "hypebot.core.schedule_lib.HypeScheduler",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "hypebot.core.schedule_lib",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "concurrent.futures",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "hypebot.core.async_lib.AsyncRunner",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "hypebot.core.async_lib",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "hypebot.plugins.inventory_lib.InventoryManager",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "hypebot.plugins.inventory_lib",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "hypebot.core.proxy_lib.Proxy",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "hypebot.core.proxy_lib",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "hypebot.core.zombie_lib.ZombieManager",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "hypebot.core.zombie_lib",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "hypebot.plugins.coin_lib.Bank",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "hypebot.plugins.coin_lib",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "hypebot.plugins.coin_lib.Bookie",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "hypebot.plugins.coin_lib",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "hypebot.stocks.stock_factory.CreateFromParams",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "hypebot.stocks.stock_factory",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "hypebot.plugins.deploy_lib.DeploymentManager",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "hypebot.plugins.deploy_lib",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "hypebot.plugins.hypestack_lib.HypeStacks",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "hypebot.plugins.hypestack_lib",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel.PUBLIC",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types.Target",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "hypebot.types.User",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "hypebot.types",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "absl.logging.INFO",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "absl.logging",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "absl.logging.log",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "absl.logging.ERROR",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "absl.logging.info",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 265,
"usage_type": "argument"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel.PRIVATE",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel.PUBLIC",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "hypebot.protos.channel_pb2.Channel.PRIVATE",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "absl.logging.info",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "absl.logging.info",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "absl.logging",
"line_number": 301,
"usage_type": "name"
}
] |
73506498
|
import os
import numpy as np
import librosa as lb
from keras.models import load_model
import tkinter as tk
#%%
def predict():
filepath_prediction = 'C:\\Users\\Lakshminagaraj\\Documents\\Pramod\\ML project\\SplitOnSilenceFiles'
model_load_path = "C:\\Users\\Lakshminagaraj\\Documents\\Pramod\\ML project\\saved_models"
prediction_X = []
prediction_X_num = []
for file in os.listdir(filepath_prediction):
aud, sr = lb.core.load(filepath_prediction + "\\" + file)
#aud = signal.medfilt(aud, 23)
mfccs = np.mean(lb.feature.mfcc(y=aud, sr=sr, n_mfcc=50), axis = 1)
prediction_X.append(mfccs)
prediction_X_num.append(aud)
prediction_X_num = np.array(prediction_X_num)
model = load_model(model_load_path + "\\" + os.listdir(model_load_path)[-2])
#Prediction of numbers according to a saved model
predictions = model.predict_classes(np.array(prediction_X))
predictions = list(predictions)
global final_string_num
final_string_num = ""
while predictions[-1] == 10 or predictions[-1] == 11:
predictions.pop()
l = len(predictions)
i = 0
while i < l:
if predictions[i] == 10:
i += 1
final_string_num += ((str(predictions[i]) + "-") * 2)
elif predictions[i] == 11:
i += 1
final_string_num += ((str(predictions[i]) + "-") * 3)
else:
final_string_num += ((str(predictions[i]) + "-"))
i += 1
#print(final_string_num)
#tkWindow()
return predictions, prediction_X_num
def tkWindow():
root = tk.Tk()
root.geometry("200x100")
root.title("Model Predictions")
root.configure(bg = "black")
tk.Label(root, text = final_string_num[:-1]).place(relx = 0.25, rely = 0.5, anchor = tk.CENTER)
tk.Button(root, text = "OK", command = root.destroy).place(relx = 0.75, rely = 0.5, anchor = tk.CENTER)
root.mainloop()
| null |
prediction.py
|
prediction.py
|
py
| 2,017 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "librosa.core.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "librosa.core",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "librosa.feature.mfcc",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "librosa.feature",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tkinter.CENTER",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tkinter.CENTER",
"line_number": 55,
"usage_type": "attribute"
}
] |
560445574
|
MODEL = 'BGRU_GA'
import numpy as np
from tensorflow import set_random_seed
np.random.seed(1234)
set_random_seed(12345)
import os, pickle
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
from LossHistory import LossHistory
from keras.layers import Dense, Input, Embedding, GRU, Bidirectional, TimeDistributed, concatenate, Dropout, BatchNormalization, Reshape, \
RepeatVector, multiply, Permute
from keras.models import Model
from granularAtt import Granular_Attention_layer
from keras import regularizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
MAX_NB_WORDS = 100000
MAX_SEQUENCE_LENGTH = 70
EMBEDDING_DIM = 200
POS_EMBEDDING_DIM = 10
MODEL_DIR = './models/'
EMBED_DIR = './embedding/'
def swish(x):
return (K.sigmoid(x) * x)
get_custom_objects().update({'swish': Activation(swish)})
def build_model():
pos_input1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='float32', name='pos_input1')
word_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='float32', name='aux_input')
word_inputE = Embedding(len(word_index) + 1, EMBEDDING_DIM, mask_zero=False, weights=[word_embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH)(word_input)
pos_input11 = RepeatVector(EMBEDDING_DIM)(pos_input1)
pos_input11 = Permute((2, 1))(pos_input11)
xx1 = multiply([word_inputE, pos_input11])
x = Dropout(0.5)(xx1)
y = Bidirectional(GRU(256, return_sequences=True, activation=swish))(x)
att = Granular_Attention_layer()(y)
d2 = Dropout(0.5)(att)
d2 = BatchNormalization(epsilon=1e-06, mode=0, axis=-1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(d2)
main_output = Dense(6, kernel_regularizer=regularizers.l2(0.02), activation='softmax', name='main_output')(d2)
model = Model(inputs=[word_input, pos_input1], outputs=main_output)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
model.summary()
return model
def train_model(model, x_train, y_train, x_dev, y_dev):
i = 100
dir_model = os.path.join(MODEL_DIR, MODEL)
if not os.path.exists(dir_model):
os.mkdir(dir_model)
filepath = dir_model + "/weights-improvement-{epoch:02d}-{val_acc:.4f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1)
print("The " + str(i) + "-th iteration.")
history = LossHistory()
model.fit({'aux_input': x_train, 'pos_input1': pos_chem_train}, {'main_output': y_train},
validation_data=([x_dev, pos_chem_dev], y_dev),
epochs=i, batch_size=64, verbose=2, callbacks=[history, checkpoint,
EarlyStopping(monitor='val_loss', min_delta=0.005, patience=5, verbose=0, mode='min')]
)
| null |
model.py
|
model.py
|
py
| 2,792 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "keras.backend.sigmoid",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "keras.utils.generic_utils.get_custom_objects",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.layers.Embedding",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.layers.RepeatVector",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.layers.Permute",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.multiply",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "keras.layers.Bidirectional",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "granularAtt.Granular_Attention_layer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "keras.models.Model",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "LossHistory.LossHistory",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.EarlyStopping",
"line_number": 62,
"usage_type": "call"
}
] |
430825618
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import sys
import os
import os.path
import pickle
import numpy as np
from collections import defaultdict
import scipy.io
from GPyOpt.methods import BayesianOptimization
from problems.define_basic_functions import obj, obj_parallel, define_test_pts
np.set_printoptions(threshold=sys.maxint)
def obj_val(x):
global random_seeds, obj_count, list_sampled_x, list_y, list_cost
obj_count += 1
random_seed = random_seeds[obj_count]
x = x[0, :]
print(x)
y_mean, y_std, _, raw_data = obj_parallel(repQL, max_steps, episodeCap, num_policy_checks, checks_per_policy, exp_path,
env=env, flag_num=flag_num,
random_seed=random_seed, x=x, noise_base=noise_base, noise_rand=noise_rand,
maze_num=maze_num, maze_name=maze_name)
y_mean = y_mean[-1]
list_sampled_x.append(x)
list_y.append(y_mean)
list_cost.append(max_steps*repQL)
return y_mean
# ============================================ #
# main #
# ============================================ #
if __name__ == '__main__':
argv = sys.argv[1:]
'''
python main_gpyopt.py mcf2 EI 0 0
argv[0] prob_name
gw10One1, gw10Two1, gw10Two2, gw10Three1; gw20Three1
ky10One
it10
pd10
mcf1, mcf2
argv[1] algorithm
EI, LCB
argv[2] constraint (for ei and lcb)
0, 1
argv[3] sample_num
0,1,2,3,...
'''
prob_name = argv[0]
env = prob_name[:2]
acq = argv[1]
constraint = int(argv[2])
sample = int(argv[3])
result_path = './Results_gpyopt_'+prob_name+acq+'/'
if not os.path.exists(result_path): os.makedirs(result_path)
_test_x, _row_num, _col_num, maze_num, maze_name, noise_base, noise_rand, \
flag_num, maze_size, repQL, s, S, skip, episodeCap = define_test_pts(prob_name)
num_policy_checks = 1
checks_per_policy = 1
s_num = int((S-s)/skip)
max_steps = S
if prob_name in ['gw10One1', 'gw10Two1', 'gw10Two2', 'gw20Three1', 'ky10One', 'it10']:
max_iter_ = 100
wid_min = 0.1
wid_max = maze_size*2
domain = [{'name':'var_1', 'type':'continuous', 'domain':(0,maze_size-1), 'dimensionality':1},
{'name':'var_2', 'type':'continuous', 'domain':(0,maze_size-1), 'dimensionality':1},
{'name':'var_3', 'type':'continuous', 'domain':(0,maze_size-1), 'dimensionality':1},
{'name':'var_4', 'type':'continuous', 'domain':(0,maze_size-1), 'dimensionality':1}]
# {'name':'var_5', 'type':'continuous', 'domain':(wid_min,wid_max), 'dimensionality':1},
# {'name':'var_6', 'type':'continuous', 'domain':(wid_min,wid_max), 'dimensionality':1}]
if constraint: constraints = [ {'name': 'const_1', 'constraint': 'x[:,0] - x[:,2]'}]
else: constraints = None
elif prob_name in ['mcf2']:
max_iter_ = 100
position_min = -1.2
position_max = 0.6
domain = [{'name':'var_1', 'type':'continuous', 'domain':(position_min, position_max), 'dimensionality':1},
{'name':'var_2', 'type':'continuous', 'domain':(position_min, position_max), 'dimensionality':1}]
if constraint: constraints = [ {'name': 'const_1', 'constraint': 'x[:,0] - x[:,1]'}]
else: constraints = None
if env in ['gw', 'ky', 'mc']: is_max, mult = False, -1 # minimize the steps
elif env in ['it', 'pd']: is_max, mult = True, 1 # maximize the reward
if env in ['mc']: checks_per_policy = 10
initial_design_numdata = 10
exp_path = result_path+'sample'+str(sample)+'/'
txt_path = result_path+'sample'+str(sample)
np.random.seed(sample)
random_seeds = np.random.randint(900, size=(initial_design_numdata+max_iter_)*2)
list_sampled_x, list_y, list_cost = [], [], []
X_init, Y_init = None, None
max_iter = max_iter_
obj_count = -1
if os.path.isfile(txt_path+'_result.pickle'):
with open(txt_path+'_result.pickle', 'rb') as file: f_dict = pickle.load(file)
array_sampled_x = f_dict.get('sampled_x')
array_y = f_dict.get('observed_y')
array_cost = f_dict.get('cost')
X_init = array_sampled_x
Y_init = np.reshape( np.array(array_y), (-1,1) )
for ind, cost in enumerate(array_cost):
list_sampled_x.append(array_sampled_x[ind, :])
list_y.append(array_y[ind])
list_cost.append(array_cost[ind])
max_iter = max_iter_ - len(list_y)
obj_count = len(list_y) - 1
myBO = BayesianOptimization(f=obj_val, domain=domain,
constraints=constraints,
X=X_init, Y=Y_init,
initial_design_numdata=initial_design_numdata,
initial_design_type='latin',
acquisition_type=acq, maximize=is_max)
myBO.run_optimization(max_iter=max_iter)
myBO.save_report(report_file=txt_path+'_report.txt')
myBO.save_evaluations(evaluations_file=txt_path+'_evaluations.txt')
myBO.save_models(txt_path+'_models.txt')
result = {'sampled_x': np.array(list_sampled_x),
'observed_y': np.array(list_y),
'cost': np.array(list_cost),
'random_seeds': random_seeds}
with open(txt_path+'_result.txt', "w") as file: file.write(str(result))
with open(txt_path+'_result.pickle', "wb") as file: pickle.dump(result, file)
| null |
main_gpyopt.py
|
main_gpyopt.py
|
py
| 5,893 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.maxint",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "problems.define_basic_functions.obj_parallel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "problems.define_basic_functions.define_test_pts",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "GPyOpt.methods.BayesianOptimization",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 137,
"usage_type": "call"
}
] |
643699253
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 18:35:41 2021
@author: piyab
"""
import os
#os.chdir("D:/Saarland/NN TI/NNTI_WS2021_Project")
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import pandas as pd
import TASK_1.py
from Task1_word_Embeddings.ipynb import *
SEED = 1234
#torch.manual_seed(SEED)
#torch.backends.cudnn.deterministic = True
#TEXT = data.Field(tokenize = 'spacy', tokenizer_language = 'en_core_web_sm')
#LABEL = data.LabelField(dtype = torch.float)
df = pd.DataFrame.from_csv("hindi_hatespeech.tsv", sep="\t")
X = df['text']
y = df['task_1']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
def preprocessing(input_data):
dataset = pd.DataFrame(input_data)
dataset['text'] = dataset['text'].str.lower()
# data preprocessing
dataset['text'] = dataset['text'].map(lambda x: clean_data(x))
# drop empty values
dataset = drop_empty_values(df)
#building vocabulary
sentences, all_words, v = building_vocabulary(df)
#Calculating word frequencies
frequency_of_words = calculating_word_frequency(all_words)
return dataset
class Attention(torch.nn.Module):
def __init__(self, output_size, hidden_size, vocab_size, embedding_length, weights):
super(Attention, self).__init__()
self.output_size = output_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embedding_length = embedding_length
self.word_embeddings = nn.Embedding(vocab_size, embedding_length)
self.word_embeddings.weights = nn.Parameter(weights, requires_grad=False)
self.lstm = nn.LSTM(embedding_length, hidden_size)
self.label = nn.Linear(hidden_size, output_size)
#self.attn_fc_layer = nn.Linear()
def Attention_Net(self, lstm_output, final_state):
hidden = final_state.squeeze(0)
attn_weights = torch.bmm(lstm_output, hidden.unsqueeze(2)).squeeze(2)
soft_attn_weights = F.softmax(attn_weights, 1)
new_hidden_state = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)
return new_hidden_state
def forward(self, input_sentences, batch_size=None):
input = self.word_embeddings(input_sentences)
input = input.permute(1, 0, 2)
if batch_size is None:
h_0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size).cuda())
c_0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size).cuda())
output, (final_hidden_state, final_cell_state) = self.lstm(input, (h_0, c_0))
output = output.permute(1, 0, 2)
attn_output = self.Attention_Net(output, final_hidden_state)
logits = self.label(attn_output)
return logits
X_train = preprocessing(X_train)
'''
INPUT_DIM = len(v)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256 #Size of the hidden_state of the LSTM
OUTPUT_DIM = 1
'''
out_w = io.open('embedding_weight_W.tsv', 'w', encoding='utf-8')
out_w1 = io.open('embedding_weight_W1.tsv', 'w', encoding='utf-8')
weight_w = []
for x in out_w:
words = [x for x in line.split(',')]
weight_w.append(words)
weight_w1 = []
for x in out_w1:
words = [x for x in line.split(',')]
weight_w1.append(words)
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
| null |
hindi_binary_classifier.py
|
hindi_binary_classifier.py
|
py
| 3,312 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.DataFrame.from_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "torch.bmm",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.bmm",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 84,
"usage_type": "call"
}
] |
410881366
|
import numpy as np
import dicom
import glob
from matplotlib import pyplot as plt
import os
import cv2
import mxnet as mx
import pandas as pd
from sklearn import cross_validation
inDir = 'C:/Users/SriPrav/Documents/R/19DSB2017'
resnet50Model = inDir + "/input/preModels/resnet-50"
Stage1SourceFolder = inDir + "/input/sources/stage2/stage2/*"
FeaturesExtraction_numpyFiles = inDir + "/input/sources/stage2/stage2/"
FeatureExtraction_Folder = inDir + "/input/FeatureExtraction_00_stg2"
def get_extractor():
model = mx.model.FeedForward.load(resnet50Model, 0, ctx=mx.gpu(), numpy_batch_size=1)
#model = mx.mod.Module.load('C:/Users/SriPrav/Documents/R/19DSB2017/input/model/resnet-50', 0)
fea_symbol = model.symbol.get_internals()["flatten0_output"]
feature_extractor = mx.model.FeedForward(ctx=mx.gpu(),
symbol=fea_symbol ,
numpy_batch_size=64,
arg_params=model.arg_params,
aux_params=model.aux_params,
allow_extra_params=True
)
return feature_extractor
def get_3d_data(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_data_id(path):
sample_image = get_3d_data(path)
sample_image[sample_image == -2000] = 0
batch = []
cnt = 0
dx = 40
ds = 512
for i in range(0, sample_image.shape[0] - 3, 3):
tmp = []
for j in range(3):
img = sample_image[i + j]
img = 255.0 / np.amax(img) * img
img = cv2.equalizeHist(img.astype(np.uint8))
img = img[dx: ds - dx, dx: ds - dx]
img = cv2.resize(img, (224, 224))
tmp.append(img)
tmp = np.array(tmp)
batch.append(np.array(tmp))
batch = np.array(batch)
return batch
def calc_features():
net = get_extractor()
for folder in glob.glob(Stage1SourceFolder):
batch = get_data_id(folder)
feats = net.predict(batch)
print(feats.shape)
np.save(folder, feats)
if __name__ == '__main__':
calc_features()
##########################################################################################################
# Move the feature extraction numpy files to features folder
##########################################################################################################
import os
import shutil
files = os.listdir(FeaturesExtraction_numpyFiles)
for f in files:
if f.endswith('.npy'):
shutil.move(os.path.join(FeaturesExtraction_numpyFiles,f), os.path.join(FeatureExtraction_Folder,f))
| null |
DSB2017/1000.FeatureExtraction_00_stg2.py
|
1000.FeatureExtraction_00_stg2.py
|
py
| 2,869 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mxnet.model.FeedForward.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mxnet.model",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gpu",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mxnet.model.FeedForward",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mxnet.model",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gpu",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dicom.read_file",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.equalizeHist",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
}
] |
537395572
|
import os.path
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, DateTime
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from .database import Base, engine
from . import app
import datetime
from flask_login import UserMixin
class User(Base, UserMixin):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(128))
email = Column(String(128), unique=True)
password = Column(String(128))
class Asset(Base):
__tablename__ = 'assets'
id = Column(Integer, primary_key = True)
barcode = Column(String, nullable = False, unique = True)
serial_no = Column(String, nullable = True)
capture_date = Column(DateTime, default = datetime.datetime.now)
name = Column(String(128), nullable = False)
category = Column(Integer, ForeignKey('asset_categories.id'), nullable = True)
_type = Column(Integer, ForeignKey('asset_types.id'), nullable = True)
_model = Column(Integer, ForeignKey('asset_models.id'), nullable = True)
status = Column(Integer, ForeignKey('asset_status.id'), nullable = True)
location = Column(Integer, ForeignKey('locations.id'), nullable = True)
user = Column(Integer, ForeignKey('people.id'), nullable = True)
purchase_price = Column(Integer, nullable = True)
value = Column(Integer, nullable = True)
supplier = Column(Integer, ForeignKey('suppliers.id'), nullable = True)
photo = Column(String, nullable = True)
comments = Column(String(256), nullable = True)
#Return asset object as dictionary
def as_dictionary(self):
asset={"id": self.id, "barcode": self.barcode, "serial_no": self.serial_no,
"capture_date": self.capture_date, "name": self.name, "category": self.category,
"_type": self._type, "_model": self._model, "status": self.status,
"location": self.location, "user": self.user, "purchase_price": self.purchase_price,
"value": self.value, "supplier": self.supplier, "photo": self.photo,
"comments": self.comments
}
return asset
class AssetCategory(Base):
__tablename__ = 'asset_categories'
id = Column(Integer, primary_key = True)
category_name = Column(String(128), nullable = False, unique = True)
category_assets = relationship("Asset", backref = "asset_category")
def as_dictionary(self):
categories = {"id": self.id, "category_name": self.category_name}
return categories
class AssetType(Base):
__tablename__ = 'asset_types'
id = Column(Integer, primary_key = True)
type_name = Column(String(128), nullable = False, unique = True)
type_assets = relationship ("Asset", backref = "asset_type")
def as_dictionary(self):
_types = {"id": self.id, "type_name": self.type_name}
return _types
class AssetModel(Base):
__tablename__ = 'asset_models'
id = Column(Integer, primary_key = True)
model_name = Column(String(128), nullable = False, unique = True)
model_assets = relationship ("Asset", backref = "asset_model")
def as_dictionary(self):
_models = {"id": self.id, "model_name": self.model_name}
return _models
class AssetStatus(Base):
__tablename__ = 'asset_status'
id = Column(Integer, primary_key = True)
status_code = Column(String(64), nullable = False, unique = True)
status_name = Column(String(128), nullable = False, unique = True)
status_assets = relationship ("Asset", backref = "asset_status")
def as_dictionary(self):
_statuses ={"id": self.id, "status_code": self.status_code,
"status_name": self.status_name
}
return _statuses
class Location(Base):
__tablename__ = 'locations'
id = Column(Integer, primary_key = True)
location_code = Column(String(64), nullable = False, unique = True)
location_name = Column(String(128), nullable = False)
category = Column(Integer, ForeignKey("location_categories.id"), nullable = True)
location_assets = relationship ("Asset", backref = "asset_location")
location_people = relationship ("People", backref = "person_location")
def as_dictionary(self):
_locations ={"id": self.id, "location_code": self.location_code,
"location_name": self.location_name,
"category": self.category
}
return _locations
class LocationCategory(Base):
__tablename__ = 'location_categories'
id = Column(Integer, primary_key = True)
category_name = Column(String(128), nullable = False, unique = True)
category_locations = relationship ("Location", backref = "location_category")
def as_dictionary(self):
_locations_categories = {"id": self.id, "category_name": self.category_name}
return _locations_categories
class People(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key = True)
barcode = Column(String(64), nullable = False, unique = True)
category = Column(Integer, ForeignKey("people_categories.id"), nullable = False)
first_name = Column(String(128), nullable = False)
second_name = Column(String(128), nullable = False)
designation = Column(String(128), nullable = True)
department = Column(Integer, ForeignKey("departments.id"), nullable = False)
location = Column(Integer, ForeignKey("locations.id"), nullable = False)
phone = Column(Integer, unique = True)
email = Column(String(128), unique = True)
class PeopleCategory(Base):
__tablename__ = 'people_categories'
id = Column(Integer, primary_key = True)
category_name = Column(String(128), nullable = False, unique = True)
category_people = relationship ("People", backref = "person_category")
def as_dictionary(self):
people_category = {"id": self.id, "category_name": self.category_name}
return people_category
class Department(Base):
__tablename__ = "departments"
id = Column(Integer, primary_key = True)
department_code = Column(String(64), nullable = True, unique = True)
department_name = Column(String(128), nullable = False)
department_people = relationship ("People", backref = "person_department")
def as_dictionary(self):
_departments = {"id": self.id, "department_code": self.department_code,
"department_name": self.department_name
}
return _departments
class Supplier(Base):
__tablename__ = 'suppliers'
id = Column(Integer, primary_key = True)
code = Column(String(64), nullable = False, unique = True)
category = Column(Integer, ForeignKey("suppliers_category.id"), nullable = False)
phone = Column(Integer, nullable = False, unique = True)
email = Column(String(128), unique = True)
location = Column(Integer, ForeignKey("locations.id"), nullable = False)
website = Column(String(128), nullable = True, unique = True)
person = Column(Integer, ForeignKey("people.id"), nullable = True)
def as_dictionary(self):
_suppliers = {"id": self.id, "code": self.code, "category": self.category,
"phone": self.phone, "email": self.email, "location": self.location,
"website": self.website
}
return _suppliers
class SupplierCategory(Base):
__tablename__ = 'suppliers_category'
id = Column(Integer, primary_key = True)
category_name = Column(String(128), nullable = False, unique = True)
category_suppliers = relationship ("Supplier", backref = "supplier_category")
def as_dictionary(self):
supplier_categories = {"id": self.id, "category_name": self.category_name}
return supplier_categories
| null |
arm/assets_bkup.py
|
assets_bkup.py
|
py
| 7,209 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "database.Base",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask_login.UserMixin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 89,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 103,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 120,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 133,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 137,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 138,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 139,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 145,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 156,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 170,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 172,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 173,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 175,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 177,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "database.Base",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 189,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 191,
"usage_type": "call"
}
] |
3206122
|
from django.urls import path
from address_app import views
app_name = 'address'
urlpatterns = [
path('settle/',views.settle_accounts,name='settle'),
path('addresspg/',views.address_page,name='addresspg'),
path('addressok/',views.address_ok,name='addressok'),
path('ybaddress/',views.ybaddress,name='ybaddress')
]
| null |
address_app/urls.py
|
urls.py
|
py
| 330 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "address_app.views.settle_accounts",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "address_app.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "address_app.views.address_page",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "address_app.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "address_app.views.address_ok",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "address_app.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "address_app.views.ybaddress",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "address_app.views",
"line_number": 10,
"usage_type": "name"
}
] |
36093736
|
from celery import shared_task
from django.core.mail import send_mail
# from settings import settings WRONG
from django.conf import settings
@shared_task(
autoretry_for=(Exception,),
retry_kwargs={
'max_retries': 5,
'default_retry_delay': 60,
},
)
def send_registration_email(body, email_to):
title = 'Activate Your Account'
send_mail(
title,
body,
settings.DEFAULT_FROM_EMAIL,
[email_to],
fail_silently=False,
)
| null |
app/accounts/tasks.py
|
tasks.py
|
py
| 497 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.mail.send_mail",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "celery.shared_task",
"line_number": 8,
"usage_type": "call"
}
] |
400909341
|
from django.conf.urls import url, include, patterns
from rest_framework import routers
urlpatterns = patterns('api.views',
url(r'^establecimiento/$','establecimiento' ),
url(r'^contratante/$','contratante' ),
url(r'^establecimientodetalle/(?P<id>\d+)/$','establecimientodetalle' ),
)
| null |
api/urls.py
|
urls.py
|
py
| 296 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
}
] |
83645527
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
import subprocess
import time
from .core import core
from .targets import Target
from .option import is_option
from .variable import Variable
from .exceptions import (FaffError, RuleError)
# Error messages.
ENORMALISE = "rule `{}` argument `{}` unexpected input"
EARG_LEN = "rule `{}` argument {} has more than two elements"
EARG = "rule `{}` argument {} unexpected input"
EARG_DEPENDS = "rule `{}` target {} dependency {} unexpected input"
EDEPEND = "rule `{}` dependency {} not a `Target` instance"
EDEPEND_EXISTS = "rule `{}` dependencies do not exist"
EDEPEND_CIRCULAR = "rule `{}` target {} dependency {} is circular"
EOPTION = "rule `{}` option {} not an `Option` instance"
ETARGET = "rule `{}` target {} not a `Target` instance"
ETARGET_DEPENDS = "rule `{}` target {} dependencies not a tuple"
ETARGET_DEPEND = "rule `{}` target {} dependency {} not a `Target` instance"
ETARGET_EXISTS = "rule `{}` target {} does not exist"
ETARGET_RULE_DEPEND_UPDATED = "rule `{}` target {} out of date compared to rule dependency {}" # noqa
ETARGET_DEPEND_UPDATED = "rule `{}` target {} out of date compared to dependency {}" # noqa
EKEYBOARD_INT = "rule `{}` keyboard interrupt"
class Rule(object):
"""Default rule function decorator, decorated function is called based on
the state of targets, their dependencies and other keyword arguments to
the decorator.
`func`
Decorated function.
`*args`
Target arguments in a defined rule pattern.
`**kwargs`
`depends`
Optional additional dependencies applied to all rule arguments.
`options`
Optional decorated option functions for command line configuration.
`args`
Optional arguments passed to decorated function on call.
`context`
Optional variable context.
Decorated functions must have the arguments::
@rule(...)
def func(**kwargs):
...
Where keyword arguments contain the following:
`target`
Out of date ``Target`` instance.
`depends`
List of ``Target`` instance dependencies associated with target.
`options`
Dictionary of option decorator argument values.
`args`
Optional arguments passed to decorator.
Where ``target`` is an out of date ``Target`` instance, ``depends`` is a
list of ``Target`` instance dependencies associated with the target, and
``args`` is the value passed to the keyword argument of the same name in
the decorator.
When instance is called, rule function will run based on state of targets
and dependencies, returning a boolean indicating success and results
dictionary containing more information.
"""
# Private methods.
def __init__(self, func, *args, **kwargs):
# Name, function and description from decorator.
self._name = func.__name__
self._func = func
self._description = func.__doc__
# Optional rule dependencies (`depends` keyword argument).
self._rule_depends = self._normalise("depends", kwargs, Target)
# Optional rule options (`options` keyword argument).
self._rule_options = self._normalise("options", kwargs, is_option)
self._opt_values = {}
# Optional rule arguments (`args` keyword argument.)
self._rule_args = kwargs.get("args", None)
# Optional variable context argument.
self._context = kwargs.get("context", None)
# Targets and dependencies.
self._targets = []
self._depends = []
# Process decorator positional arguments.
for i, arg in enumerate(args):
# Break loop if true is not returned.
if not self._process(i, arg):
break
# Cast to tuples internally.
self._targets = tuple(self._targets)
self._depends = tuple(self._depends)
# Check internal data.
self._check()
def __call__(self, opt_values=None):
# Determine which targets, dependencies require update.
# Rule results dictionary.
results = {
# Total number of targets, updated targets.
"total": len(self._targets),
"updated": 0,
# Total time elapsed (calculated in ._results() method).
"time": time.time(),
# Individual target results.
"results": {},
}
# Rule keyword arguments.
kwargs = {
# Used to pass option values to rule targets .updated() method.
"options": self._opt_values if opt_values is None else opt_values,
"args": self._rule_args,
}
# If rule dependencies do not exist, return error.
if not self.exists():
results["results"][0] = {
"error": EDEPEND_EXISTS.format(self._name)}
return self._results(results)
# Get rule dependencies updated time.
rdepends_updated = []
for depend in self._rule_depends:
rdepends_updated.append(
depend.updated(opt_values=kwargs["options"]))
# If RulePattern1 (no targets).
if results["total"] == 0:
# Set total counter to 1 for semantics, update rule.
results["total"] = 1
results["results"] = self.update(**kwargs)
return self._results(results)
# Track targets to update, associated dependencies.
rtargets = []
rdepends = []
# Else iterate over targets.
for i, pair in enumerate(zip(self._targets, self._depends)):
target, depends = pair
# If depends is none, use empty list.
depends = [] if depends is None else depends
# Check dependencies are not circular, get updated time.
tdepends_updated = []
for j, depend in enumerate(depends):
if target == depend:
results["results"][i] = {
"error": EDEPEND_CIRCULAR.format(self._name, i, j)}
break
tdepends_updated.append(
depend.updated(opt_values=kwargs["options"]))
# Exit loop if dependency error.
if len(tdepends_updated) != len(depends):
break
# If target does not exist, update required.
if not target.exists():
core.debug(__name__, ETARGET_EXISTS, self._name, i)
rtargets.append(target)
rdepends.append(depends)
continue
# Judge if the target was just updated.
updated = target.updated(opt_values=kwargs["options"])
if abs(updated - time.time()) < 0.05:
rtargets.append(target)
rdepends.append(depends)
continue
# Continue loop if update required.
# TODO: Cleaner way to do this.
update = False
# Compare target updated time to rule dependencies.
for j, rd_updated in enumerate(rdepends_updated):
if updated <= rd_updated:
core.debug(
__name__, ETARGET_RULE_DEPEND_UPDATED,
self._name, i, j)
rtargets.append(target)
rdepends.append(depends)
update = True
break
if update:
continue
# Compare target updated time to dependencies.
for j, td_updated in enumerate(tdepends_updated):
if updated <= td_updated:
core.debug(
__name__, ETARGET_DEPEND_UPDATED,
self._name, i, j)
rtargets.append(target)
rdepends.append(depends)
break
# Update rule if targets to update and loop did not exit early.
if (len(rtargets) > 0) and (len(rtargets) == len(rdepends)):
results["results"] = self.update(rtargets, rdepends, **kwargs)
return self._results(results)
def _normalise(self, key, kwargs, cls):
# Get value from keyword arguments, default to empty list.
arg = kwargs.get(key, [])
# Wrap argument as tuple for consistency.
if isinstance(arg, list) or isinstance(arg, tuple):
return tuple(arg)
elif isinstance(cls, type):
if isinstance(arg, cls):
return tuple([arg])
elif cls(arg):
return tuple([arg])
# Raise error for unexpected input.
core.exception(ENORMALISE, self._name, key, cls=RuleError)
def _process(self, i, arg):
# If argument is Target instance, RulePattern2.
if isinstance(arg, Target):
self._targets.append(arg)
self._depends.append(None)
return True
# Else if argument is list or tuple.
elif isinstance(arg, list) or isinstance(arg, tuple):
# Raise error if list length is greater than two.
if len(arg) > 2:
core.exception(EARG_LEN, self._name, i, cls=RuleError)
# Extract targets, dependencies from argument list.
targets = arg[0] if len(arg) > 0 else None
depends = arg[1] if len(arg) > 1 else None
# If targets is Target instance.
if isinstance(targets, Target):
self._targets.append(targets)
# If dependencies is Target instance, RulePattern3.
if isinstance(depends, Target):
self._depends.append(tuple([depends]))
return True
# Else if dependencies is list or tuple, RulePattern4.
elif isinstance(depends, list) or isinstance(depends, tuple):
self._depends.append(tuple(depends))
return True
# Else if targets is list or tuple.
elif isinstance(targets, list) or isinstance(targets, tuple):
# If dependencies is a Target instance, RulePattern5.
if isinstance(depends, Target):
for target in targets:
self._targets.append(target)
self._depends.append(tuple([depends]))
return True
# Else if dependencies is list or tuple.
elif isinstance(depends, list) or isinstance(depends, tuple):
# If not equal in length, RulePattern7.
if len(targets) != len(depends):
for target in targets:
self._targets.append(target)
self._depends.append(tuple(depends))
return True
# If equal in length.
for j, pair in enumerate(zip(targets, depends)):
target, depend = pair
self._targets.append(target)
# If dependency is Target, RulePattern6.
if isinstance(depend, Target):
self._depends.append(tuple([depend]))
# Else if dependency is list or tuple, RulePattern8.
elif (isinstance(depend, list) or
isinstance(depend, tuple)):
self._depends.append(tuple(depend))
# Unknown dependency argument.
else:
core.exception(
EARG_DEPENDS, self._name, i, j, cls=RuleError)
return True
# No arguments, RulePattern1.
# Raise error for unknown argument.
core.exception(EARG, self._name, i, cls=RuleError)
def _check(self):
# Rule dependencies must be list of Target instances.
for i, depend in enumerate(self._rule_depends):
if not isinstance(depend, Target):
core.exception(EDEPEND, self._name, i, cls=RuleError)
# Rule options must be list of options.
for i, opt in enumerate(self._rule_options):
if not is_option(opt):
core.exception(EOPTION, self._name, i, cls=RuleError)
# Targets must be a list of Target instances.
for i, pair in enumerate(zip(self._targets, self._depends)):
target, depends = pair
if not isinstance(target, Target):
core.exception(ETARGET, self._name, i, cls=RuleError)
# Skip dependencies checks.
if depends is None:
continue
# Target dependencies must be a list of lists of Target instances.
if not isinstance(depends, tuple):
core.exception(ETARGET_DEPENDS, self._name, i, cls=RuleError)
for j, depend in enumerate(depends):
if not isinstance(depend, Target):
core.exception(
ETARGET_DEPEND, self._name, i, j, cls=RuleError)
def _results(self, results):
# TODO: Use named tuple here.
# Process results dictionary to determine success.
success = True
for i, result in results["results"].items():
# Write error messages to stderr.
if result["error"] is not None:
core.stderr(result["error"])
success = False
else:
results["updated"] += 1
results["time"] = time.time() - results["time"]
return (success, results)
# Public properties, methods.
@property
def name(self):
"""Return rule name string."""
return self._name
@property
def description(self):
"""Return rule description string."""
return self._description
@property
def targets(self):
"""Return list of rule targets."""
return self._targets
def add_options(self, parser):
"""Add rule options to argument parser instance.
`parser`
Instance of ArgumentParser.
"""
for opt in self._rule_options:
opt._option.add(parser)
def call_options(self, args):
"""Call rule options with arguments returned by argument parser.
`args`
Instance of argparse.Namespace.
"""
for opt in self._rule_options:
value = getattr(args, opt._option.name)
self._opt_values[opt._option.name] = value
opt._option(value)
def exists(self):
"""Return true if rule dependencies exist."""
# Check rule dependencies exist.
for depend in self._rule_depends:
if not depend.exists():
return False
# Check dependencies of each target.
for depends in self._depends:
if depends is not None:
for depend in depends:
if not depend.exists():
return False
# Dependencies exist.
return True
# Reimplemented methods.
def update(self, targets=None, depends=None, **kwargs):
"""Update rule targets and dependencies, serial implementation.
`targets`
List of out of date targets.
`depends`
List of lists of dependencies associated with targets.
"""
def _update(kwargs):
# Rule function call.
result = {
# Elapsed time for this target.
"time": time.time(),
"error": None,
}
try:
self._func(**kwargs)
except KeyboardInterrupt:
result["error"] = EKEYBOARD_INT.format(self._name)
except subprocess.CalledProcessError as err:
result["error"] = err.output
except FaffError as err:
result["error"] = err.message
# TODO: Catch generic exceptions?
# Calculate elapsed time, set result.
result["time"] = time.time() - result["time"]
return result
# Update results, keyword arguments.
results = {}
# RulePattern1, no targets.
if (targets is None) and (depends is None):
results[0] = _update(kwargs)
return results
# Iterate over targets, dependencies lists.
for i, pair in enumerate(zip(targets, depends)):
target, depends = pair
kwargs["target"] = target
kwargs["depends"] = depends
# Save variable context, set automatic variables.
ctx = Variable.save("_", self._context)
Variable("_T", str(target), self._context)
Variable("_D", " ".join([str(x) for x in depends]), self._context)
# Update rule function.
results[i] = _update(kwargs)
# Restore variable values.
Variable.restore(ctx, self._context)
return results
def rule(*args, **kwargs):
"""Rule function decorator, function and arguments used to create a
``Rule`` instance.
`*args`
Positional arguments to ``Rule`` class.
`**kwargs`
Keyword arguments to ``Rule`` class.
"""
def _decorator(func):
# TODO: Use rule subclasses here, keyword argument?
_rule = Rule(func, *args, **kwargs)
@functools.wraps(func)
def _func(*args):
return _rule(*args)
_func._rule = _rule
return _func
return _decorator
def is_rule(obj):
"""Return true if object is a rule instance.
`obj`
Object instance.
"""
if hasattr(obj, "_rule"):
return isinstance(obj._rule, Rule)
return False
| null |
faff/rule.py
|
rule.py
|
py
| 17,896 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "targets.Target",
"line_number": 85,
"usage_type": "argument"
},
{
"api_name": "option.is_option",
"line_number": 88,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "core.core.debug",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "core.core.debug",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "core.core.debug",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "core.core.exception",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "targets.Target",
"line_number": 242,
"usage_type": "argument"
},
{
"api_name": "core.core.exception",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "targets.Target",
"line_number": 259,
"usage_type": "argument"
},
{
"api_name": "targets.Target",
"line_number": 263,
"usage_type": "argument"
},
{
"api_name": "targets.Target",
"line_number": 276,
"usage_type": "argument"
},
{
"api_name": "targets.Target",
"line_number": 298,
"usage_type": "argument"
},
{
"api_name": "core.core.exception",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "core.core.exception",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "targets.Target",
"line_number": 320,
"usage_type": "argument"
},
{
"api_name": "core.core.exception",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "option.is_option",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "core.core.exception",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "targets.Target",
"line_number": 331,
"usage_type": "argument"
},
{
"api_name": "core.core.exception",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "core.core.exception",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "targets.Target",
"line_number": 342,
"usage_type": "argument"
},
{
"api_name": "core.core.exception",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "exceptions.RuleError",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "core.core.stderr",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "core.core",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "exceptions.FaffError",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "variable.Variable.save",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "variable.Variable",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "variable.Variable",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "variable.Variable",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "variable.Variable.restore",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "variable.Variable",
"line_number": 470,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 488,
"usage_type": "call"
}
] |
186091510
|
# -*- coding: UTF-8 -*-
"""
trytond_async.tasks
Implements the actual task runners.
Usual celery projects would have the method/functions which have the code
to run as tasks. However, the tryton inheritance and majority of tryton
code being in class and instance methods makes it hard for the pattern to
be followed. Read more about the design on the getting started
documentation of this module.
"""
from trytond import backend
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.cache import Cache
from trytond_async.app import app
class RetryWithDelay(Exception):
"""
A special case of exception meant to be used by Tryton models to
indicate to the worker that the task needs to be retried. This is
needed because Tryton models itself are ignorant to the invocation from
regular model code and asynchronously through workers!
:param delay: Delay in seconds after which the task should be retried
"""
def __init__(self, delay=5, *args, **kwargs):
super(RetryWithDelay, self).__init__(*args, **kwargs)
self.delay = delay
def _execute(app, database, user, payload_json):
"""
Execute the task identified by the given payload in the given database
as `user`.
"""
if database not in Pool.database_list():
# Initialise the database if this is the first time we see the
# database being used.
with Transaction().start(database, 0, readonly=True):
Pool(database).init()
with Transaction().start(database, 0):
Cache.clean(database)
with Transaction().start(database, user) as transaction:
Async = Pool().get('async.async')
DatabaseOperationalError = backend.get('DatabaseOperationalError')
# De-serialize the payload in the transaction context so that
# active records are constructed in the same transaction cache and
# context.
payload = Async.deserialize_payload(payload_json)
try:
with Transaction().set_context(payload['context']):
results = Async.execute_payload(payload)
except RetryWithDelay as exc:
# A special error that would be raised by Tryton models to
# retry the task after a certain delay. Useful when the task
# got triggered before the record is ready and similar cases.
transaction.connection.rollback()
raise app.retry(exc=exc, countdown=exc.delay)
except DatabaseOperationalError as exc:
# Strict transaction handling may cause this.
# Rollback and Retry the whole transaction if within
# max retries, or raise exception and quit.
transaction.connection.rollback()
raise app.retry(exc=exc)
except Exception:
transaction.connection.rollback()
raise
else:
transaction.connection.commit()
return results
@app.task(bind=True, default_retry_delay=2)
def execute(app, database, user, payload_json):
return _execute(app, database, user, payload_json)
| null |
tasks.py
|
tasks.py
|
py
| 3,145 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "trytond.pool.Pool.database_list",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "trytond.pool.Pool",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "trytond.pool.Pool",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "trytond.cache.Cache.clean",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "trytond.cache.Cache",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "trytond.pool.Pool",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "trytond.backend.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "trytond.backend",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "trytond_async.app.app.retry",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "trytond_async.app.app",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "trytond_async.app.app.retry",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "trytond_async.app.app",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "trytond_async.app.app",
"line_number": 84,
"usage_type": "argument"
},
{
"api_name": "trytond_async.app.app.task",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "trytond_async.app.app",
"line_number": 82,
"usage_type": "name"
}
] |
366095768
|
import os
import sys
# Actually submit job to LAVA
SUBMIT = 1
def submit_testjob(jobdef):
import xmlrpc.client
import requests
from urllib.parse import urlsplit
via_squad = os.getenv("USE_QA_SERVER") == "1"
if via_squad:
lava_uri = os.getenv("LAVA_SERVER")
if "://" not in lava_uri:
lava_uri = "https://" + lava_uri
qa_server_uri = os.getenv("QA_SERVER")
qa_server_group = os.getenv("QA_SERVER_GROUP")
qa_server_project = os.getenv("QA_SERVER_PROJECT")
qa_server_build = os.getenv("EXTERNAL_BUILD_ID")
if qa_server_build is None:
print(
"Warning: EXTERNAL_BUILD_ID environment variable is not set, "
"using GIT_COMMIT_ID instead."
)
qa_server_build = os.getenv("GIT_COMMIT_ID")
qa_server_env = os.getenv("PLATFORM")
qa_server_api = "%s/api/submitjob/%s/%s/%s/%s" % (
qa_server_uri,
qa_server_group,
qa_server_project,
qa_server_build,
qa_server_env)
headers = {
"Auth-Token": os.getenv("QA_REPORTS_TOKEN")
}
data = {
"definition": jobdef,
"backend": urlsplit(lava_uri).netloc # qa-reports backends are named as lava instances
}
print("POST:", qa_server_api, data)
results = requests.post(qa_server_api, data=data, headers=headers)
if results.status_code < 300:
print("%s/testjob/%s" % (qa_server_uri, results.text))
else:
print("status code: %s" % results.status_code)
print(results.text)
else:
username = os.getenv("LAVA_USER")
token = os.getenv("LAVA_TOKEN")
uri = os.getenv("LAVA_SERVER")
if not uri.endswith("/"):
uri += "/"
server = xmlrpc.client.ServerProxy("https://%s:%s@%s" % (username, token, uri))
job_id = server.scheduler.submit_job(jobdef)
if isinstance(job_id, list):
job_id = job_id[0]
print("LAVA: https://%s../scheduler/job/%s" % (uri, job_id))
with open(sys.argv[1]) as f:
jobdef = f.read()
if SUBMIT:
submit_testjob(jobdef)
| null |
lava_submit.py
|
lava_submit.py
|
py
| 2,225 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlsplit",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "xmlrpc.client.client.ServerProxy",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "xmlrpc.client.client",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "xmlrpc.client",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 67,
"usage_type": "attribute"
}
] |
566264816
|
import redis
import json
"""
IPPool function:
1) insert_ip: insert a proxy ip into redis
2) get_random_key: get a random key from redis
3) delete_by_key: delete a proxy ip by key
"""
class IPPool(object):
def __init__(self):
self.__HOST = '127.0.0.1'
self.__PORT = 6379
self.__IPPOOL_DB = 0
self.__REDIS_CONN = redis.Redis(host=self.__HOST, port=self.__PORT, db=self.__IPPOOL_DB)
def insert_ip(self, ip):
# insert ip into redis
# example of ip:['163.204.245.227', '9999', '广东', '高匿', 'HTTPS']
# redis_conn = redis.Redis(host=self.__HOST, port=self.__PORT, db=self.__IPPOOL_DB)
# construct key and value
ip_with_port = str(ip[0]) + ":" + str(ip[1])
# print("key:", ip_with_port)
ip_info = json.dumps(ip)
# print("value:", ip_info)
self.__REDIS_CONN.set(ip_with_port, ip_info)
def get_random_key(self):
# select a random key from redis
# redis_conn = redis.Redis(host=self.__HOST, port=self.__PORT, db=self.__IPPOOL_DB)
# decode: transfer byte type to string type
random_key = self.__REDIS_CONN.randomkey().decode()
return random_key
def delete_by_key(self, key):
self.__REDIS_CONN.delete(key)
return None # 无返回值
def get_proxy_ip_cnt(self):
return len(self.__REDIS_CONN.keys())
if __name__ == "__main__":
ip_example = ['182.35.85.193', '9999', '山东泰安', '高匿', 'HTTP']
test = IPPool()
test.insert_ip(ip_example)
print("Info: get a random key from IPPool")
print(test.get_random_key())
print(test.get_proxy_ip_cnt())
| null |
db/redis.py
|
redis.py
|
py
| 1,709 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "redis.Redis",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
}
] |
221667612
|
import numpy as np
import matplotlib.pyplot as plt
euclidean_distance = lambda data, point: np.sqrt(np.sum(np.power(data - point, 2), axis = 1).reshape((len(data), 1)))
def relative_neighborhood_graphs(data):
#debug:
#plt.gca().set_aspect('equal', adjustable='box')
#construct the complete Graph G
N = len(data)
G = np.empty((N, 0))
for point in data:
G = np.concatenate((G, euclidean_distance(data,point)), axis=1)
###############
RNG = np.zeros(G.shape)
#O(n^3) time
for i, point1 in enumerate(data):
for j, point2 in enumerate(data):
if i != j:
flag = 1
for k, pointK in enumerate(data):
if k != i and k !=j:
if max(G[i, k], G[j,k]) < G[i, j]:
flag = 0
break
if flag:
#form an edge
RNG[i, j] = G[i, j]
#debug
#visualize RNG
plt.scatter(data[:, 0], data[:, 1])
x_, y_ = np.nonzero(RNG)
for x, y in zip(x_, y_):
if x < y:
plt.plot((data[x, 0], data[y, 0]), (data[x, 1], data[y, 1]))
plt.show()
##############################
data = np.hstack((data, np.zeros((len(data), 1))))
visited_nodes = np.zeros(N)
#dfs from now on
cluster_id = 1
for s in range(N):
if(visited_nodes[s] == 0):
visited_nodes[s] = 1
data[s, 2] = cluster_id
_dfs_util(G, s, visited_nodes, cluster_id, data)
cluster_id += 1
return data
def _dfs_util(MST, s, visited_nodes, cluster_id, data):
adj_nodes = np.nonzero(MST[s, :])
for node in adj_nodes[0]:
if visited_nodes[node] == 0:
visited_nodes[node] = 1
data[node, -1] = cluster_id
_dfs_util(MST, node, visited_nodes, cluster_id, data)
| null |
graph_theory/RNG.py
|
RNG.py
|
py
| 2,027 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.sqrt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.nonzero",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "numpy.hstack",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 70,
"usage_type": "call"
}
] |
198197657
|
"""Process an html delicious export and import them into bookie"""
import codecs
from datetime import datetime
from BeautifulSoup import BeautifulSoup
from sys import argv
import urllib
DELAPI = 'http://127.0.0.1:6543/delapi/posts/add?'
def process(fname):
"""Given a file, process it"""
bmark_file = codecs.open(fname, "r", "utf-8").read()
soup = BeautifulSoup(bmark_file)
for tag in soup.findAll('dt'):
# if we have a dd as next sibling, get it's content
if tag.nextSibling and tag.nextSibling.name == 'dd':
extended = tag.nextSibling.text
else:
extended = ""
link = tag.a
# now get the link tag
call_system(link, extended)
def call_system(link_tag, extended):
"""Given a parsed <a> tag, store this"""
date_fmt = "%Y-%m-%dT%H:%M:%SZ"
add_date = datetime.fromtimestamp(float(link_tag['add_date']))
prms = {
'url': link_tag['href'].encode('utf-8'),
'description': link_tag.text.encode('utf-8'),
'extended': extended.encode('utf-8'),
'tags': " ".join(link_tag['tags'].split(',')).encode('utf-8'),
'dt': add_date.strftime(date_fmt),
}
req_params = urllib.urlencode(prms)
call = urllib.urlopen(DELAPI + req_params)
call.close()
if __name__ == "__main__":
filename = argv[1]
process(filename)
| null |
Bookie/bookie/scripts/import_delicious.py
|
import_delicious.py
|
py
| 1,390 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "codecs.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "BeautifulSoup.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "urllib.urlencode",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 49,
"usage_type": "name"
}
] |
260923416
|
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import json
import os,sys
import CommonLibrary
import urllib.request
import zipcodes
import us
import requests
from bs4 import BeautifulSoup
home_path = os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), os.pardir)) + '/com.leafly'
current_data_path = os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), os.pardir)) + '/current_data'
previous_data_path = os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), os.pardir)) + '/previous_data'
upload_data_path = os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), os.pardir)) + '/upload_data'
CommonLibrary.CreateFolder('ZS_120')
CommonLibrary.DashboardUploadBegin('ZS_120')
CommonLibrary.CreateFolder('ZS_121')
CommonLibrary.DashboardUploadBegin('ZS_121')
CommonLibrary.CreateFolder('ZS_122')
CommonLibrary.DashboardUploadBegin('ZS_122')
CommonLibrary.CreateFolder('ZS_123')
CommonLibrary.DashboardUploadBegin('ZS_123')
CommonLibrary.CreateFolder('ZS_125')
CommonLibrary.DashboardUploadBegin('ZS_125')
CommonLibrary.CreateFolder('ZS_130')
CommonLibrary.DashboardUploadBegin('ZS_130')
CommonLibrary.CreateFolder('ZS_133')
CommonLibrary.DashboardUploadBegin('ZS_133')
if not os.path.exists(home_path + '/dispensary_url.uniq.txt'): #check availability uniq strains list file
url = 'https://www.leafly.com/finder'
options = Options()
options.headless = True
driver = webdriver.Chrome(options=options, executable_path=home_path + '/chromedriver')
#driver = webdriver.Chrome(executable_path=soures_path + '/chromedriver')
driver.get(url)
driver.maximize_window()
link_lists = ''
try:
driver.find_element_by_id("tou-continue").click()
time.sleep(10)
elements = driver.find_elements_by_tag_name('a')
except NoSuchElementException:
False
for elem in elements:
href = elem.get_attribute("href")
if href is not None and 'dispensary-info' in href and not 'reviews' in href:
textfilerow = str(href) + '\n'
f = open(home_path + '/dispensary_url.txt', 'a+')
f.write(textfilerow)
f.close()
driver.quit()
cmd = 'chmod 777 ' + home_path +'/dispensary_url.txt' #set permission for file
os.system(cmd)
cmd = 'sort ' + home_path +'/dispensary_url.txt | uniq > '+ home_path +'/dispensary_url.uniq.txt' #filter uniq json object on text file
os.system(cmd)
cmd = 'rm -r ' + home_path +'/dispensary_url.txt' #text file remove for next time write on fresh file
os.system(cmd)
lastrun = CommonLibrary.GetLastRun(home_path + '/LastRun_Dispensary.txt')
with open(home_path + '/dispensary_url.uniq.txt') as f:
content = f.readlines()
for x in range(len(content)):
if x >= int(lastrun):
url = content[x].strip()
print (url)
infopage = requests.get(content[x].strip())
soup_infopage = BeautifulSoup(infopage.content, 'html.parser')
if soup_infopage.find("script", id="__NEXT_DATA__"):
element = soup_infopage.find("script", id="__NEXT_DATA__")
data = json.loads(element.get_text())
print (data)
dispensary_name = ''
dispensary_type = ''
city = ''
state = ''
country = ''
longitude = ''
latitude = ''
brand_location= ''
product_type = ''
rating_count = ''
zip_code = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('zip')
if zip_code:
if len(zip_code) == 5 and zip_code.isdigit():
location_data = zipcodes.matching(zip_code)
for a in location_data:
city = a['city']
state = a['state']
country = a['country']
if not city:
city = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('city')
state = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('state')
country = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('country')
city = city.capitalize()
state = us.states.lookup(state)
state = str(state).replace('<State:', '')
state = state.replace('>', '')
if state == 'None':
state = ''
if city == 'None':
city = ''
if country == 'None':
country = ''
dispensary_name = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('name')
#print (data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('tags'))
#if len(data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('tags')) != 0:
dispensary_type = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('tags')[0]
dispensary_type = dispensary_type.capitalize()
rating_count = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('ratings', {}).get('averageRating')
latitude = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('mapMarkerLocations', {})[0]['latitude']
longitude = data.get('props', {}).get('pageProps', {}).get('dispensary', {}).get('mapMarkerLocations', {})[0]['longitude']
total_menu_items = data.get('props', {}).get('pageProps', {}).get('menu', {})
for one_menu in total_menu_items:
product_type = ''
product_category = ''
product = ''
brand = ''
product_category = one_menu['category']
product = one_menu['name']
brand = one_menu['brandName']
brand = str(brand).replace('None', '')
total_price_items = one_menu.get('variants', {})
for one_price in total_price_items:
price = ''
weight = ''
weight_type = ''
price = one_price['packagePrice']
weight = one_price['packageDisplayUnit']
if 'g' in weight:
weight_type = 'gram'
weight = str(weight).replace('g', '')
if 'oz' in weight:
weight_type = 'ounce'
weight = str(weight).replace('oz', '')
#####################################################
ZS_122 = []
ZS_122.append({
'country': country,
'city': city,
'state': state,
'dispensary': dispensary_name,
'product_category': product_category,
'brand': brand,
'product': product,
'price': price,
'weight_type': weight_type,
'weight': weight,
})
CommonLibrary.JsonFilesWrite('ZS_122', ZS_122)
#####################################################
if brand != '' and price != '' and product != '' and product_category != '':
ZS_123 = []
ZS_123.append({
'country': country,
'city': city,
'state': state,
'product_category': product_category,
'brand': brand,
'product': product,
'price': price,
'weight_type': weight_type,
'weight': weight,
})
CommonLibrary.JsonFilesWrite('ZS_123', ZS_123)
###################################################
if brand != '':
ZS_121 = []
ZS_121.append({
'country': country,
'city': city,
'state': state,
'product_category': product_category,
'brand': brand,
})
CommonLibrary.JsonFilesWrite('ZS_121', ZS_121)
###################################################
if brand != '' and rating_count != '':
ZS_130 = []
ZS_130.append({
'country': country,
'city': city,
'state': state,
'dispensary': dispensary_name,
'product_category': product_category,
'brand': brand,
'rating_count': rating_count,
})
CommonLibrary.JsonFilesWrite('ZS_130', ZS_130)
####################################################
if brand != '':
ZS_125 = []
ZS_125.append({
'country': country,
'city': city,
'state': state,
'dispensary': dispensary_name,
'product_category': product_category,
'brand': brand,
'product_type': product_type,
'latitude': latitude,
'longitude': longitude,
'brand_location': brand_location,
})
CommonLibrary.JsonFilesWrite('ZS_125', ZS_125)
####################################################
if brand != '' and product != '':
ZS_120 = []
ZS_120.append({
'country': country,
'city': city,
'state': state,
'dispensary': dispensary_name,
'product_category': product_category,
'brand': brand,
'product': product,
})
CommonLibrary.JsonFilesWrite('ZS_120', ZS_120)
#######################################################
if dispensary_type != '':
ZS_133 = []
ZS_133.append({
'country': country,
'city': city,
'state': state,
'dispensary': dispensary_name,
'dispensary_type': dispensary_type,
})
CommonLibrary.JsonFilesWrite('ZS_133', ZS_133)
####################################################
CommonLibrary.UpdateLastRun('LastRun_Dispensary.txt', str(x + 1)) # each loop cycle finished update last time harvest number
CommonLibrary.LogFilesWrite('com.weedmaps', url)
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_120')
CommonLibrary.DashboardUploadFinish('ZS_120')
CommonLibrary.FilesCompare('ZS_120')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_121')
CommonLibrary.DashboardUploadFinish('ZS_121')
CommonLibrary.FilesCompare('ZS_121')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_122')
CommonLibrary.DashboardUploadFinish('ZS_122')
CommonLibrary.FilesCompare('ZS_122')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_123')
CommonLibrary.DashboardUploadFinish('ZS_123')
CommonLibrary.FilesCompare('ZS_123')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_125')
CommonLibrary.DashboardUploadFinish('ZS_125')
CommonLibrary.FilesCompare('ZS_125')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_130')
CommonLibrary.DashboardUploadFinish('ZS_130')
CommonLibrary.FilesCompare('ZS_130')
CommonLibrary.LogFilesComplete('com.weedmaps.ZS_133')
CommonLibrary.DashboardUploadFinish('ZS_133')
CommonLibrary.FilesCompare('ZS_133')
CommonLibrary.UpdateLastRun('LastRun_Dispensary.txt', '0') # loop set 0 foe next time harvest update all data
os.remove(home_path + '/dispensary_url.uniq.txt') # remove dispensary list for next time
CommonLibrary.StatesFlagUpdate(sys.argv[0])
| null |
data_process/com.leafly/com.leafly.py
|
com.leafly.py
|
py
| 15,962 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.CreateFolder",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadBegin",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.GetLastRun",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "zipcodes.matching",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "us.states.lookup",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "us.states",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.JsonFilesWrite",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.UpdateLastRun",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesWrite",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.LogFilesComplete",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.DashboardUploadFinish",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.FilesCompare",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.UpdateLastRun",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "CommonLibrary.StatesFlagUpdate",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 262,
"usage_type": "attribute"
}
] |
211436182
|
import os
import random
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, TemplateSendMessage, ButtonsTemplate, PostbackTemplateAction, MessageTemplateAction, URITemplateAction, ImageSendMessage
)
app = Flask(__name__)
line_bot_api = LineBotApi(os.environ.get('CHANNEL_ACCESS_TOKEN'))
handler = WebhookHandler(os.environ.get('CHANNEL_SECRET'))
Favorability = {}
cat_toy = {'普通的逗貓棒':['https://i.imgur.com/jtbU0Gi.png'], '一條魚':['https://i.imgur.com/ncK4QZL.png'], '一隻老鼠':['https://i.imgur.com/mb6Ws0g.png', 'https://i.imgur.com/wTJCm9H.png']}
cat_food = {'點心':'https://i.imgur.com/wLs0yHy.png', '罐頭':'https://i.imgur.com/g4iJv1x.png', '貓糧':'https://i.imgur.com/9ZqH3Rk.png'}
Emergencies = ['貓貓趴在你的電腦鍵盤上,偷偷看著你', '貓貓睡著了,請不要吵到他', '貓貓蹲在你背後,她感覺餓了', '貓貓坐在你腳上,蹭了你的肚子']
love = ['https://i.imgur.com/PzuAI3G.png', 'https://i.imgur.com/zOI0H0i.png']
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
cat_talk = ""
meow = len(event.message.text) * "喵"
if event.message.text == "餵食":
reply = TemplateSendMessage(
alt_text = 'Buttons template',
template = ButtonsTemplate(
thumbnail_image_url='https://i.imgur.com/oMAspmB.png',
title='餵食',
text='請選擇要餵的食物',
actions=[
MessageTemplateAction(
label='點心',
text='點心'
),
MessageTemplateAction(
label='罐頭',
text='罐頭'
),
MessageTemplateAction(
label='貓糧',
text='貓糧'
)
]
)
)
elif event.message.text == "逗貓":
reply = TemplateSendMessage(
alt_text = 'Buttons template',
template = ButtonsTemplate(
thumbnail_image_url='https://i.imgur.com/2YHXdZG.png',
title='逗貓',
text='請選擇一根逗貓棒',
actions=[
MessageTemplateAction(
label='普通的逗貓棒',
text='普通的逗貓棒'
),
MessageTemplateAction(
label='一條魚',
text='一條魚'
),
MessageTemplateAction(
label='一隻老鼠',
text='一隻老鼠'
)
]
)
)
elif event.message.text == "查看好感度":
if event.source.user_id not in Favorability:
Favorability[event.source.user_id] = 0
cat_talk = str(Favorability[event.source.user_id])
print(Favorability)
if event.message.text == "逗貓":
line_bot_api.reply_message(event.reply_token, reply)
elif event.message.text in cat_toy:
add = random.randint(-10,10)
if add <= 0:
cat_talk = random.choice(["去去,貓貓不想跟你玩了", "去去,奴才走"])
else:
cat_talk = random.choice(["我才沒有想跟你玩呢!(撲過去", "走開,我才沒有要跟你玩呢(偷喵"])
if event.source.user_id not in Favorability:
Favorability[event.source.user_id] = 0 + add
else:
Favorability[event.source.user_id] = Favorability[event.source.user_id] + add
reply = [
ImageSendMessage(
original_content_url=random.choice(cat_toy[event.message.text]),
preview_image_url=random.choice(cat_toy[event.message.text])
),
TextSendMessage(text=cat_talk)
]
line_bot_api.reply_message(event.reply_token, reply)
elif event.message.text == "餵食":
line_bot_api.reply_message(event.reply_token, reply)
elif event.message.text in cat_food:
add = random.randint(-15,30)
if add <= 0:
cat_talk = "貓貓覺得難吃"
else:
cat_talk = "奴才做得不錯嘛"
if event.source.user_id not in Favorability:
Favorability[event.source.user_id] = 0 + add
else:
Favorability[event.source.user_id] = Favorability[event.source.user_id] + add
reply = [
ImageSendMessage(
original_content_url=cat_food[event.message.text],
preview_image_url=cat_food[event.message.text]
),
TextSendMessage(text=cat_talk)
]
line_bot_api.reply_message(event.reply_token,reply)
else:
if Favorability[event.source.user_id] >= 100:
picture = random.choice(love)
reply = [
ImageSendMessage(
original_content_url=picture,
preview_image_url=picture
),
TextSendMessage(text=cat_talk + meow)
]
line_bot_api.reply_message(event.reply_token,reply)
elif Favorability[event.source.user_id] >= 75:
if random.randint(0,100) // 5 == 0:
reply = [
TextSendMessage(text=random.choice(Emergencies)),
TextSendMessage(text=cat_talk + meow)
]
else:
reply = TextSendMessage(text=cat_talk + meow)
line_bot_api.reply_message(event.reply_token,reply)
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=cat_talk + meow)
)
if __name__ == "__main__":
app.run()
| null |
app.py
|
app.py
|
py
| 6,716 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "linebot.LineBotApi",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "linebot.WebhookHandler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request.headers",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.request.get_data",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "linebot.exceptions.InvalidSignatureError",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "linebot.models.TemplateSendMessage",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "linebot.models.ButtonsTemplate",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "linebot.models.TemplateSendMessage",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "linebot.models.ButtonsTemplate",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageTemplateAction",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "linebot.models.ImageSendMessage",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "linebot.models.ImageSendMessage",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "linebot.models.ImageSendMessage",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "linebot.models.MessageEvent",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "linebot.models.TextMessage",
"line_number": 44,
"usage_type": "name"
}
] |
535837331
|
import json
from annoying.decorators import render_to
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, get_object_or_404
import requests
from . import forms
from . import models
API = 'http://www.unisport.dk/api/sample/'
def import_products(request):
"""
Simple import. Enough for sample project.
In the real life should be asynchronous(celery.task or something..) with validation, logging, etc.
"""
for model in (models.Product, models.ProductSize):
model.objects.all().delete()
products = json.loads(requests.get(API).content).get('latest', [])
for product in products:
size_names = [size.strip() for size in product.pop('sizes', '').split(',')]
product['product_id'] = product.pop('id')
for field in ('price', 'price_old'):
product[field] = product.get(field, '0').replace(',', '.')
instance = models.Product.objects.create(**product)
instance.sizes.add(*[models.ProductSize.objects.get_or_create(name=size_name)[0] for size_name in size_names])
messages.info(request, '%d products were saved in the DB' % len(products))
return redirect(reverse('product_list'))
@render_to('products/list.html')
def product_list(request, **kwargs):
"""
Keyword arguments define filtering and ordering rules
Example: product_list(request, ordering=('name', ), kids=1) will return queryset for /products/kids/ page ordered by name
Default ordering is ('price', ) - cheapest first
"""
ordering = kwargs.pop('ordering', ('price', ))
paginator = Paginator(models.Product.objects.filter(**kwargs).order_by(*ordering), 10)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
return {'products': products}
@render_to('products/detail.html')
def product_detail(request, pk):
return {'product': get_object_or_404(models.Product, pk=pk)}
@render_to('products/form.html')
def change_product(request, pk=None):
instance = get_object_or_404(models.Product, pk=pk) if pk else None
if request.method == 'POST':
form = forms.ProductForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
return redirect(instance.get_absolute_url())
elif pk:
instance = get_object_or_404(models.Product, pk=pk)
form = forms.ProductForm(instance=instance)
else:
form = forms.ProductForm()
return {'form': form}
def delete_product(request, pk=None):
instance = get_object_or_404(models.Product, pk=pk)
instance.delete()
messages.info(request, 'Product was successfully deleted')
return redirect(reverse('product_list'))
| null |
products/views.py
|
views.py
|
py
| 2,970 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "annoying.decorators.render_to",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "annoying.decorators.render_to",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "annoying.decorators.render_to",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 90,
"usage_type": "call"
}
] |
11518226
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Task
# Create your views here.
def index_form(request):
"""
main page
a list of tasks
"""
if request.method == "POST":
# check done form
done_id = request.POST.get("done", "")
if done_id:
task = Task.objects.filter(id=done_id)
if task[0].status == 1:
task.update(status=0)
else:
task.update(status=1)
return redirect("/")
# check filter
filter = request.POST.get("filter", "")
if filter == "end_date":
tasks = Task.objects.order_by('end_date')
return render(request, 'myapp/index.html', {'tasks':tasks})
elif filter == "start_data":
tasks = Task.objects.order_by('name')
return render(request, 'myapp/index.html', {'tasks':tasks})
elif filter == "priority":
tasks = Task.objects.order_by('priority')
return render(request, 'myapp/index.html', {'tasks':tasks})
elif filter == "dones":
tasks = Task.objects.filter(status=1)
return render(request, 'myapp/index.html', {'tasks':tasks})
elif filter == "remains":
tasks = Task.objects.filter(status=0)
return render(request, 'myapp/index.html', {'tasks':tasks})
else:
tasks = Task.objects.order_by('id')
return render(request, 'myapp/index.html', {'tasks':tasks})
else:
tasks = Task.objects.all()
return render(request, 'myapp/index.html', {'tasks':tasks})
def delete(request, id):
"""
remove a task by id
"""
task = Task.objects.filter(id=id)
if request.method == "POST":
task.update(status=1)
return redirect("/")
return render(request, "myapp/delete.html")
def add_task(request):
"""
create a task
"""
if request.method == "POST":
name = request.POST.get("name","")
priority = request.POST.get("priority", "")
end_date = request.POST.get("end_date", "")
description = request.POST.get("description", "")
task = Task(name=name, priority=priority, description=description, end_date=end_date)
task.save()
return redirect("/")
else:
return render(request, "myapp/add.html")
| null |
todolist/myapp/views.py
|
views.py
|
py
| 2,421 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Task.objects.filter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.order_by",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.order_by",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.order_by",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.filter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.filter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.order_by",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.all",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.filter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.Task",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 74,
"usage_type": "call"
}
] |
507408094
|
#!/usr/bin/python3
import tkinter as tk
from tkinter.filedialog import askopenfilenames
from PIL import Image
import re
import os
class Application(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent, background="red")
self.parent = parent
self.actions = [{"display_text": "Change filetype",
"options": [{"text": "Convert to jpg",
"function": self.convert_to_jpg},
{"text": "Convert to png",
"function": self.convert_to_png}]},
{"display_text": "Change size",
"options": [{"text": "Size of all images smaller than a certain value.",
"function": self.all_images_size},
{"text": "Make every image x times smaller.",
"function": self.scale_images},
{"text": "Set maxium height or/and width of all images.",
"function": self.set_maximum_size}]},
{"display_text": "Apply filter",
"options": [{"text": "Sepia",
"function": self.sepia_filter},
{"text": "Black-white",
"function": self.black_and_white}]}]
self.chosen_files = []
self.initUI()
def initUI(self):
self.parent.title("Imagero Changero")
self.grid(row=0, column=0, rowspan=6, columnspan=10)
self.main_container = tk.Frame(self, relief=tk.RAISED, borderwidth=1, background="white")
self.main_container.grid(row=0, column=0, rowspan=5, columnspan=7)
action_label = tk.Label(self.main_container, text="Action:")
action_label.grid(row=0, column=0, sticky="w")
self.var = tk.IntVar()
action_radio = []
for i, action in enumerate(self.actions):
action_radio.append(tk.Radiobutton(self.main_container, text=action["display_text"],
variable=self.var, value=i,
command=self.update_option_list))
action_radio[i].grid(row=0, column=(2 * i + 1), columnspan=2)
self.option_list = tk.Listbox(self.main_container, bg="white", height=3, width=50)
self.update_option_list()
self.option_list.grid(row=1, column=0, columnspan=5, rowspan=3)
self.browse_files = tk.Button(self.main_container, text="Choose photos", command=self.choose_photos)
self.browse_files.grid(row=4, column=0, sticky="w")
ok_button = tk.Button(self, text="Ok", command=self.do_action)
ok_button.grid(row=5, column=6, sticky="e")
quit_button = tk.Button(self, text="Quit", command=self.quit)
quit_button.grid(row=5, column=5, sticky="e")
def do_action(self):
print("GO!")
print("wybrana opcja:", self.var.get())
print("wybrana czynnosc:", self.option_list.curselection())
if self.var.get() == 1:
print("dodatkowa opcja:", self.extra_option_spin1.get(), self.extra_option_spin2.get())
self.actions[self.var.get()]["options"][self.option_list.curselection()[0]]["function"]()
def choose_photos(self):
self.chosen_files = askopenfilenames()
def update_option_list(self):
self.option_list.delete(0, tk.END)
for option in self.actions[self.var.get()]["options"]:
self.option_list.insert(tk.END, option["text"])
if self.var.get() == 1:
self.extra_option_label1 = tk.Label(self.main_container, text="Extra1")
self.extra_option_label2 = tk.Label(self.main_container, text="Extra2")
self.extra_option_label1.grid(row=1, column=5, sticky="w", columnspan=2)
self.extra_option_label2.grid(row=2, column=5, sticky="w", columnspan=2)
self.extra_option_spin1 = tk.Spinbox(self.main_container, from_=0, to=100, width=5)
self.extra_option_spin2 = tk.Spinbox(self.main_container, from_=50, to=300, width=5)
self.extra_option_spin1.grid(row=1, column=7, sticky="w", columnspan=2)
self.extra_option_spin2.grid(row=2, column=7, sticky="w", columnspan=2)
else:
if hasattr(self, "extra_option_label1"):
self.extra_option_label1.destroy()
self.extra_option_label2.destroy()
self.extra_option_spin1.destroy()
self.extra_option_spin2.destroy()
def convert_to_jpg(self):
result = []
for old_path in self.chosen_files:
img = Image.open(old_path)
new_path = re.sub('\.png', '.jpg', old_path)
new_name = new_path.split('/')[-1]
result.append({"object": img, "name": new_name})
self.save_files(result)
def convert_to_png(self):
result = []
for old_path in self.chosen_files:
img = Image.open(old_path)
new_path = re.sub('\.jpg', '.png', old_path)
new_name = new_path.split('/')[-1]
result.append({"object": img, "name": new_name})
self.save_files(result)
def all_images_size(self):
print("make their size equal", self.extra_option_spin1.get())
max_size = int(self.extra_option_spin1.get()) * 1000000 #MB -> B
actual_size = 0
files_len = len(self.chosen_files)
for i in self.chosen_files:
actual_size += os.stat(i).st_size
if actual_size <= max_size - 1000000:
self.save_files(result)
return
right_width, right_height = [], []
left_width, left_height = [0] * files_len, [0] * files_len
new_width, new_height = [0] * files_len, [0] * files_len
for img_path in self.chosen_files:
img = Image.open(img_path)
width, height = img.size
right_width.append(width)
right_height.append(height)
while actual_size < max_size - 1000000 or actual_size > max_size + 1000000:
print("SIZE:", actual_size/1000000, "LEFT[0]", left_height[0], "RIGHT[0]", right_height[0])
result = []
for i, old_path in enumerate(self.chosen_files):
img = Image.open(old_path)
name = old_path.split('/')[-1]
new_width[i] = (right_width[i] + left_width[i]) // 2
new_height[i] = (right_height[i] + left_height[i]) // 2
resized_img = img.resize((new_width[i], new_height[i]))
result.append({"object": resized_img, "name": name})
self.save_files(result)
actual_size = 0
for i in os.listdir():
actual_size += os.stat(i).st_size
for i in range(files_len):
if actual_size > max_size:
right_width[i] = new_width[i]
right_height[i] = new_height[i]
else:
left_width[i] = new_width[i]
left_height[i] = new_height[i]
print("Pliki tyle zajmuja:", actual_size)
def scale_images(self):
print("scale images", self.extra_option_spin1.get(), "times")
scale_num = int(self.extra_option_spin1.get())**0.5
result = []
for old_path in self.chosen_files:
img = Image.open(old_path)
name = old_path.split('/')[-1]
width, height = img.size
resized_img = img.resize((int(width / scale_num), int(height / scale_num)))
result.append({"object": resized_img, "name": name})
self.save_files(result)
def set_maximum_size(self):
print("max height:", self.extra_option_spin1.get(),
"max width:", self.extra_option_spin2.get())
max_height = int(self.extra_option_spin2.get())
max_width = int(self.extra_option_spin1.get())
result = []
for old_path in self.chosen_files:
img = Image.open(old_path)
name = old_path.split('/')[-1]
width, height = img.size
new_width = min(width, max_width)
scale_num = width / new_width
new_height = min(height, max_height)
scale_num = max(height / new_height, scale_num)
resized_img = img.resize((int(width / scale_num), int(height / scale_num)))
result.append({"object": resized_img, "name": name})
self.save_files(result)
def sepia_filter(self):
print("sepia image", self.chosen_files)
def black_and_white(self):
print("greyscale", self.chosen_files)
def save_files(self, images):
if os.getcwd().split(os.sep)[-1] != "result":
result_folder = os.path.join(os.getcwd(), "result")
if not os.path.exists(result_folder):
os.makedirs(result_folder)
os.chdir(result_folder)
for img in images:
print(img["name"])
img["object"].save(img["name"])
print("Done!")
def main():
root = tk.Tk()
root.geometry("640x280")
root.update()
app = Application(root)
root.mainloop()
if __name__ == "__main__":
main()
| null |
imagero.py
|
imagero.py
|
py
| 9,401 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tkinter.Frame",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame.__init__",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tkinter.RAISED",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tkinter.IntVar",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tkinter.Radiobutton",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tkinter.Listbox",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfilenames",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tkinter.END",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "tkinter.END",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tkinter.Spinbox",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tkinter.Spinbox",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 202,
"usage_type": "call"
}
] |
316037642
|
import logging
from celery.task import task
from report_metric import reporter
from report_metric import settings
logging.basicConfig(level=logging.DEBUG)
def setup_reporter(destination=None, source=None):
destination = destination or settings.get('METRICS_DESTINATION', 'librato')
source = source or settings.get('METRICS_SOURCE', None)
if destination == 'librato' and reporter.LIBRATO and settings.get('METRICS_LIBRATO_USER'):
return reporter.LibratoReport(username=settings.get('METRICS_LIBRATO_USER'),
api_key=settings.get('METRICS_LIBRATO_TOKEN'),
source=source)
elif destination == 'direct':
return reporter.DirectReport()
elif destination == 'dummy':
return reporter.DummyReport()
raise reporter.StatsReportException('No available/configured destination') # maybe not right exception
def gauge(name, number, **kwargs):
'''
Helper method for single call sending of a gauge
:param name: metric name
:param number: metric number
:param destination: optional, if not sending to default
:return:
'''
if settings.get('METRICS_USE_CELERY', False):
_report_gauge.delay(name, number, **kwargs)
else:
_report_gauge(name, number, **kwargs)
def counter(name, number=1, **kwargs):
'''
Helper method for single call sending of a counter
:param name: metric name
:param number: metric number
:param destination: optional, if not sending to default
:return:
'''
if settings.get('METRICS_USE_CELERY', False):
_report_counter.delay(name, number, **kwargs)
else:
_report_counter(name, number, **kwargs)
# There's got to be a more elegant way to conditionally wrap a function in Task, but for the moment
@task(name='report_metric.gauge')
def _report_gauge(name, number, **kwargs):
try:
rep = setup_reporter(kwargs.get('destination', None), kwargs.get('source', None))
rep.gauge(name, number)
except reporter.StatsReportException as e:
logging.exception(str(e))
@task(name='report_metric.counter')
def _report_counter(name, number=1, **kwargs):
try:
setup_reporter(kwargs.get('destination', None), kwargs.get('source', None)).counter(name, number)
except reporter.StatsReportException as e:
logging.exception(str(e))
| null |
src/report_metric/metric.py
|
metric.py
|
py
| 2,404 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "report_metric.settings.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.LIBRATO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "report_metric.reporter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.LibratoReport",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "report_metric.reporter",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.DirectReport",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "report_metric.reporter",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.DummyReport",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "report_metric.reporter",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.StatsReportException",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "report_metric.reporter",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "report_metric.settings.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "report_metric.settings",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "report_metric.reporter.StatsReportException",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "report_metric.reporter",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "logging.exception",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "celery.task.task",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "report_metric.reporter.StatsReportException",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "report_metric.reporter",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "logging.exception",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "celery.task.task",
"line_number": 66,
"usage_type": "call"
}
] |
480371716
|
from configparser import ConfigParser
from random import Random
from json import JSONEncoder
import paramiko
import os
import time
import eel
import json
import sys
class Eagle(object):
def __init__(self, config_filename, send_fn):
self.running = False
self.ssh = None
self.sftp = None
self.logs = {}
# Retrieve configuration from external config file
self.config = ConfigParser()
self.config.read(os.sep.join([os.getcwd(), config_filename]))
self.username = self.get_config('username')
self.domain = self.get_config('flndevdomain')
self.maxlines = int(self.get_config('maxlines'))
self.gaflogsdir = self.get_config('gaflogsdir')
self.thriftlogsdir = self.get_config('thriftlogsdir')
self.remotetmp = self.get_config('remotetmpdir')
self.tempfile = os.sep.join([self.remotetmp, 'log'+str(int(Random().random()*1e12))])
self.localdir = os.sep.join([os.getcwd(), 'logs'])
self.send_log_to_user = send_fn
def __del__(self):
self.close()
def get_config(self, option):
if self.config:
return self.config.get('eagle', option)
def get_files_info(self, path):
files_info = {}
if self.sftp:
for filename in self.sftp.listdir(path):
if filename.split('.')[-1] in ['json', 'log', 'error']:
fullpath = os.sep.join([path, filename])
filestat = self.sftp.stat(fullpath)
files_info[filename] = dict(
name=filename,
longname=fullpath,
date=filestat.st_mtime,
size=filestat.st_size,
mode=filestat.st_mode & 0x4,
start=1,
)
return files_info
def get_logs_info(self):
logs_info = {}
logs_info.update(self.get_files_info(self.gaflogsdir))
logs_info.update(self.get_files_info(self.thriftlogsdir))
return logs_info
def get_logs(self, log_info):
print(f"Found changes on {log_info['name']}...")
stdin, stdout, stderr = self.ssh.exec_command(
'sudo tail -n +%d -q %s | tail -n %d -q' % (
log_info['start'], log_info['longname'], self.maxlines
)
)
messages = [ str(line) for line in stdout.readlines() ]
json = JSONEncoder()
return json.encode(messages)
def send(self, log_info):
log_name = log_info['name']
contents = self.get_logs(log_info)
json = JSONEncoder()
self.send_log_to_user(json.encode(dict(name=log_name, value=contents)))
def watch(self):
print("Starting EAGLE EYES....")
try:
# initially send the all log files
print("Initial Loading of Logs...")
self.logs = self.get_logs_info()
for log_name in self.logs.keys():
self.send(self.logs[log_name])
print("Finished Initial Loading of Logs...")
while self.running:
print("Watching....")
eel.sleep(0.5)
logs_info = self.get_logs_info()
for log_key in logs_info:
if (
self.running and
logs_info[log_key]['size'] != 0 and (
logs_info[log_key]['size'] != self.logs[log_key]['size']
or
logs_info[log_key]['date'] != self.logs[log_key]['date']
)
):
# the remote file has changed, send the log
self.send(logs_info[log_key])
# update the cached log info
self.logs[log_key] = logs_info[log_key]
except Exception as e:
print(e)
print("Disconnecting")
self.running = False
self.close()
sys.exit()
def open(self, hostname):
if hostname:
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.load_system_host_keys()
self.ssh.connect(
hostname='.'.join([hostname, self.domain]), username=self.username
)
self.sftp = self.ssh.open_sftp()
self.sftp.chdir(self.gaflogsdir)
self.sftp.getcwd()
self.running = True
self.hostname = hostname
def close(self):
self.running = False
if self.sftp:
self.sftp.close()
self.sftp = None
if self.ssh:
self.ssh.close()
self.ssh = None
def sleep(self, seconds):
time.sleep(seconds)
| null |
src/eagle.py
|
eagle.py
|
py
| 4,878 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.sep.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.sep.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "random.Random",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.sep.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.sep.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "json.JSONEncoder",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.encode",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.JSONEncoder",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "json.encode",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "eel.sleep",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "paramiko.SSHClient",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "paramiko.AutoAddPolicy",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 152,
"usage_type": "call"
}
] |
151928741
|
import ast
import io
import mimetypes
import re
import socket
import sys
import traceback
from pathlib import Path
WEBROOT = "webroot"
def response_ok(body=b"This is a minimal response", mimetype=b"text/plain"):
"""
returns a basic HTTP response
Ex:
response_ok(
b"<html><h1>Welcome:</h1></html>",
b"text/html"
) ->
b'''
HTTP/1.1 200 OK\r\n
Content-Type: text/html\r\n
\r\n
<html><h1>Welcome:</h1></html>\r\n
'''
"""
try:
return b"""HTTP/1.1 200 OK\r
Content-Type: """ + mimetype + b"""\r
\r
""" + body
except TypeError:
return b"""HTTP/1.1 500 Internal Server Error
Content-Type: text/html
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an error and could not complete your request.</p>
""".replace(b'\n', b'\r\n')
def response_method_not_allowed():
"""Returns a 405 Method Not Allowed response"""
return b"""HTTP/1.1 405 Method Not Allowed
Content-Type: text/html
<title>405 Method Not Allowed</title>
<h1>Method Not Allowed</h1>
<p>The method is not allowed for the requested URL.</p>
""".replace(b'\n', b'\r\n')
def response_not_found():
"""Returns a 404 Not Found response"""
return b"""HTTP/1.1 404 Not Found
Content-Type: text/html
<title>404 Not Found</title>
<h1>Not Found</h1>
<p>The requested URL was not found on the server.</p>
""".replace(b'\n', b'\r\n')
def parse_request(request):
"""
Given the content of an HTTP request, returns the path of that request.
This server only handles GET requests, so this method shall raise a
NotImplementedError if the method of the request is not GET.
"""
match = re.match(r"([^\s]+) ([^\s]+) ([^\s]+)", request)
if match:
if match.group(1) == "GET":
return match.group(2)
raise NotImplementedError
def response_path(path):
"""
This method should return appropriate content and a mime type.
If the requested path is a directory, then the content should be a
plain-text listing of the contents with mimetype `text/plain`.
If the path is a file, it should return the contents of that file
and its correct mimetype.
If the path does not map to a real location, it should raise an
exception that the server can catch to return a 404 response.
Ex:
response_path('/a_web_page.html') -> (b"<html><h1>North Carolina...",
b"text/html")
response_path('/images/sample_1.png')
-> (b"A12BCF...", # contents of sample_1.png
b"image/png")
response_path('/') -> (b"images/, a_web_page.html, make_type.py,...",
b"text/plain")
response_path('/a_page_that_doesnt_exist.html') -> Raises a NameError
"""
file = Path(WEBROOT + path)
if file.is_dir():
content = file.absolute().name
for child in file.iterdir():
content += '\n - ' + child.name
content = content.encode()
mime_type = b"text/plain"
elif file.is_file():
content = file.read_bytes()
mime_type = mimetypes.guess_type(file)[0].encode()
else:
raise NameError
if mime_type == b'text/x-python':
# If we ended up productionizing this we would want to scrub the, um, contents of content
# to make sure that it's not going to do anything bad. Even though theoretically we're not
# taking inputs from untrusted sources here (we're /certainly/ not letting the user upload
# files..) we can't make a blanket trust statement here that the script isn't going to do
# anything nefarious like subprocess.Popen('rm -rf /'.split(' ')) or anything like that.
# Still, as an exercise:
# Create an IO buffer and set stdout to it -- we can compile the contents of file to
# bytecode and evaluate it with exec() or eval(), but it will print to stdout.
tmp = sys.stdout
eval_content = io.StringIO()
sys.stdout = eval_content
eval(compile(content, file.name, 'exec'))
content = eval_content.getvalue().encode()
sys.stdout = tmp
mime_type = b"text/html"
return content, mime_type
def server(log_buffer=sys.stderr):
address = ('127.0.0.1', 10000)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("making a server on {0}:{1}".format(*address), file=log_buffer)
sock.bind(address)
sock.listen(1)
try:
while True:
print('waiting for a connection', file=log_buffer)
conn, addr = sock.accept() # blocking
try:
print('connection - {0}:{1}'.format(*addr), file=log_buffer)
request = ''
while True:
data = conn.recv(1024)
request += data.decode('utf8')
if '\r\n\r\n' in request:
break
print("Request received:\n{}\n\n".format(request))
try:
content, mime_type = response_path(parse_request(request))
response = response_ok(
body=content,
mimetype=mime_type
)
except NotImplementedError:
response = response_method_not_allowed()
except NameError:
response = response_not_found()
conn.sendall(response)
except:
traceback.print_exc()
finally:
conn.close()
except KeyboardInterrupt:
sock.close()
return
except:
traceback.print_exc()
if __name__ == '__main__':
server()
sys.exit(0)
| null |
http_server.py
|
http_server.py
|
py
| 5,944 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.match",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_REUSEADDR",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 196,
"usage_type": "call"
}
] |
421865152
|
from pyspark import SparkContext
from itertools import islice, combinations, tee
from operator import add
from sys import argv
from time import time
def basket_mapper(element):
comma_location = element.find(',')
return element[:comma_location], element[(comma_location+1):]
def read_baskets():
# Spark Context intitialize
sc = SparkContext('local[*]', 'hw2_task_2')
sc.setLogLevel("OFF")
baskets = sc.textFile(argv[3]).mapPartitionsWithIndex(lambda i, element: islice(element, 1, None) if i == 0 else element).\
map(basket_mapper).groupByKey().\
map(lambda basket: basket[1]).map(set).filter(lambda basket: len(basket) > int(argv[1])).repartition(3)
# coalesce(12) #.persist()
return sc, baskets
def set_up_task2():
# create basket depending on case from command prompt
sc, baskets = read_baskets()
# use to calculate sample support
support = 50
sample_support_vars = dict(num_baskets=baskets.count(), support=int(argv[2]))
return sc, baskets, sample_support_vars
def declare_son_vars():
# count of pass number
frequent_item_set_candidates = dict()
frequent_item_sets = dict()
return frequent_item_set_candidates, frequent_item_sets
def prune_baskets(basket, previous_frequent_item_sets):
return basket.intersection(set(previous_frequent_item_sets))
def prune_candidates(n_baskets_partition, sample_support_vars, candidate_ids_dict):
sample_support = sample_support_vars['support'] * n_baskets_partition / sample_support_vars['num_baskets']
frequent_item_set_candidates = tuple(k for k, v in candidate_ids_dict.items() if v >= sample_support)
return frequent_item_set_candidates
def update_item_set_dict(item_set_dict, item_set_candidate):
if item_set_candidate in item_set_dict:
item_set_dict[item_set_candidate] += 1
else:
item_set_dict[item_set_candidate] = 1
return item_set_dict
def phase_one(iterator, sample_support_vars, previous_frequent_item_sets, item_set_size):
possible_item_set_candidate_dict = dict()
n_baskets_partition = 0
if item_set_size > 2:
iterator = list(iterator)
for _ in iterator:
n_baskets_partition += 1
for i in range(previous_frequent_item_sets.__len__()-1):
for j in range(i+1, previous_frequent_item_sets.__len__()):
if previous_frequent_item_sets[i][:(item_set_size-2)] == \
previous_frequent_item_sets[j][:(item_set_size-2)]:
possible_item_set_candidate = tuple(sorted(set(
previous_frequent_item_sets[i]).union(set(previous_frequent_item_sets[j]))))
# BUG IS HERE!!!!!
for partitioned_basket in iterator:
if set(possible_item_set_candidate).issubset(partitioned_basket):
possible_item_set_candidate_dict = update_item_set_dict(possible_item_set_candidate_dict,
possible_item_set_candidate)
elif item_set_size == 2:
for partitioned_basket in iterator:
n_baskets_partition += 1
# go through all combinations of possible item sets by evaluating frequent singletons
for possible_item_set_candidate in map(tuple, combinations(previous_frequent_item_sets, item_set_size)):
if set(possible_item_set_candidate).issubset(partitioned_basket):
possible_item_set_candidate_dict = update_item_set_dict(possible_item_set_candidate_dict,
possible_item_set_candidate)
else:
# enumerate each item in each basket
for partitioned_basket in iterator:
partitioned_basket = list(partitioned_basket)
n_baskets_partition += 1
for possible_item_set_candidate in partitioned_basket:
possible_item_set_candidate_dict = update_item_set_dict(possible_item_set_candidate_dict,
possible_item_set_candidate)
frequent_item_set_candidates = prune_candidates(n_baskets_partition, sample_support_vars, possible_item_set_candidate_dict)
yield frequent_item_set_candidates
def phase_two(iterator, frequent_item_set_candidates, item_set_length):
# iterator, iterator_copy = tee(iterator, 2)
frequent_item_set_dict = dict()
if item_set_length > 1:
for basket in iterator:
for frequent_item_set_candidate in frequent_item_set_candidates:
if set(frequent_item_set_candidate).issubset(basket):
frequent_item_set_dict = update_item_set_dict(frequent_item_set_dict, frequent_item_set_candidate)
if item_set_length != 2:
frequent_item_set = set((k, v) for k, v in frequent_item_set_dict.items())
else:
frequent_item_set = set((k, v) for k, v in frequent_item_set_dict.items())
else:
for basket in iterator:
for frequent_item_set_candidate in basket:
frequent_item_set_dict = update_item_set_dict(frequent_item_set_dict, frequent_item_set_candidate)
frequent_item_set = set((k, v) for k, v in frequent_item_set_dict.items())
yield frequent_item_set
def write_output_file(item_set_size, frequent_item_set_candidates, frequent_item_sets):
f = open(argv[4], 'w')
f.write('Candidates:\n')
for i in range(1, item_set_size):
if i == 2:
f.write('\n')
if i != 1:
f.write(str(frequent_item_set_candidates[i])[1:-1].replace('), ', '),') + '\n\n')
else:
singletons = frequent_item_set_candidates[i]
for j, frequent_singleton in enumerate(singletons):
if j != singletons.__len__() - 1:
f.write('(\'' + str(frequent_singleton) + '\'),')
else:
f.write('(\'' + str(frequent_singleton) + '\')\n')
f.write('Frequent Itemsets:\n')
for i in range(1, item_set_size):
if i == 2:
f.write('\n')
if i != 1:
if i != item_set_size - 1:
f.write(str(frequent_item_sets[i])[1:-1].replace('), ', '),') + '\n\n')
else:
f.write(str(frequent_item_sets[i])[1:-1].replace('),', '),'))
else:
singletons = frequent_item_sets[i]
for j, frequent_singleton in enumerate(singletons):
if j != singletons.__len__() - 1:
f.write('(\'' + str(frequent_singleton) + '\'),')
else:
f.write('(\'' + str(frequent_singleton) + '\')\n')
f.close()
def main():
start_time = time()
sc, baskets, sample_support_vars = set_up_task2()
# declare solution dictionaries
frequent_item_set_candidates, frequent_item_sets = declare_son_vars()
# track item set size... increases by 1 per iteration
item_set_size = 1
keep_looping = True
while keep_looping:
if item_set_size > 1:
previous_frequent_item_sets = frequent_item_sets[item_set_size - 1]
if previous_frequent_item_sets.__len__() == 0:
keep_looping = False
# uncomment break while looping
break
else:
previous_frequent_item_sets = None
frequent_item_set_candidates[item_set_size] = tuple(sorted(
baskets.mapPartitions(
lambda iterator: phase_one(iterator, sample_support_vars, previous_frequent_item_sets, item_set_size)).
flatMap(lambda element: element).distinct().collect()))
if frequent_item_set_candidates[item_set_size].__len__() == 0:
keep_looping = False
del frequent_item_set_candidates[item_set_size]
# uncomment break while looping
break
if item_set_size == 1:
baskets = baskets.map(lambda basket: prune_baskets(basket, frequent_item_set_candidates[item_set_size])).repartition(3).persist()
frequent_item_sets[item_set_size] = tuple(sorted(
baskets.mapPartitions(lambda iterator: phase_two(iterator, frequent_item_set_candidates[item_set_size], item_set_size)).
flatMap(lambda element: element).reduceByKey(add).
filter(lambda element: element[1] >= sample_support_vars['support']).
map(lambda element: element[0]).collect()))
if item_set_size == 1:
baskets = baskets.map(lambda basket: prune_baskets(basket, frequent_item_sets[item_set_size])).repartition(3).persist()
if frequent_item_sets[item_set_size].__len__() == 0:
keep_looping = False
del frequent_item_sets[item_set_size]
# uncomment break while looping
break
item_set_size += 1
write_output_file(item_set_size, frequent_item_set_candidates, frequent_item_sets)
print('Duration: '+str(time() - start_time))
if __name__ == '__main__':
main()
| null |
HW2/jacob_beaudin_task2.py
|
jacob_beaudin_task2.py
|
py
| 9,145 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyspark.SparkContext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "itertools.islice",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "itertools.combinations",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 197,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 214,
"usage_type": "call"
}
] |
112345962
|
from TextToLif import TextToLif
import sys
sys.path.append('..')
from LifFileParser import LifFileParser
import corenlp
import json
"""
This is the wrapper of stanford corenlp functions that saves the result as the LIF format.
"""
class StanfordTokenizer:
def __init__(self, lif_string=""):
self.lif_parser = LifFileParser(string=lif_string)
self.text = self.lif_parser.data['payload']['text']['@value']
def call_tokenizer(self):
token_annotations = []
port = 9000
address = "http://localhost:" + str(port)
success = False
while not success:
try:
with corenlp.client.CoreNLPClient(annotators="tokenize".split(),endpoint=address) as client:
ann = client.annotate(self.text)
id = 0
for token in ann.sentencelessToken:
word = token.word
start = int(token.beginChar)
end = int(token.endChar)
new_ann = {
"id": "tok" + str(id),
"start": start,
"end": end,
"@type": "http://vocab.lappsgrid.org/Token",
"label": "http://vocab.lappsgrid.org/Token",
"features": {
"word": word
}
}
id += 1
token_annotations.append(new_ann)
success = True
break
except:
port += 1
success = False
metadata = {
"contains": {
"http://vocab.lappsgrid.org/Token": {
"producer": "org.anc.lapps.stanford.Tokenizer:2.0.0",
"type": "stanford"
}
}
}
view = {
"metadata": metadata,
"annotations": token_annotations
}
self.lif_parser.data["payload"]["views"].append(view)
class StanfordSentenceSplitter:
def __init__(self, lif_string=""):
self.lif_parser = LifFileParser(string=lif_string)
self.text = self.lif_parser.data['payload']['text']['@value']
def call_splitter(self):
sent_annotations = []
port = 9000
address = "http://localhost:" + str(port)
success = False
while not success:
try:
with corenlp.client.CoreNLPClient(annotators="ssplit".split()) as client:
ann = client.annotate(self.text)
id = 0
for sentence in ann.sentence:
length = len(sentence.token)
start = int(sentence.token[0].beginChar)
end = int(sentence.token[length-1].endChar)
ss = self.text[start:end]
new_ann = {
"id": "sent"+str(id),
"start": start,
"end": end,
"@type": "http://vocab.lappsgrid.org/Sentence",
"label": "Sentence",
"features":{
"sentence": ss
}
}
id += 1
sent_annotations.append(new_ann)
success = True
break
except:
port += 1
success = False
metadata = {
"contains": {
"http://vocab.lappsgrid.org/Sentence": {
"producer": "org.anc.lapps.stanford.SentenceSplitter:2.0.0",
"type": "sentence:stanford"
}
}
}
view = {
"metadata": metadata,
"annotations": sent_annotations
}
self.lif_parser.data["payload"]["views"].append(view)
class StanfordTagger:
def __init__(self, lif_string=""):
self.lif_parser = LifFileParser(string=lif_string)
self.text = self.lif_parser.data['payload']['text']['@value']
def call_pos(self):
sent_annotations = []
with corenlp.client.CoreNLPClient(annotators="pos".split()) as client:
ann = client.annotate(self.text)
print(ann)
if __name__ == "__main__":
converter = TextToLif("Gross Pathology\nThis content contains adenocarcinoma !? I expect it to be tagged.")
converter.convert_lif()
st = StanfordTokenizer(converter.lif_string)
st.call_tokenizer()
stanford_tokenizer_lif = json.dumps(st.lif_parser.data)
sentence = StanfordSentenceSplitter(stanford_tokenizer_lif)
sentence.call_splitter()
stantord_sentence_splitter_lif = json.dumps(sentence.lif_parser.data)
print(stantord_sentence_splitter_lif)
| null |
HLAFeatureLibrary/Training/stanford_wrapper.py
|
stanford_wrapper.py
|
py
| 3,724 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "LifFileParser.LifFileParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "corenlp.client.CoreNLPClient",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "corenlp.client",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "LifFileParser.LifFileParser",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "corenlp.client.CoreNLPClient",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "corenlp.client",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "LifFileParser.LifFileParser",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "corenlp.client.CoreNLPClient",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "corenlp.client",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "TextToLif.TextToLif",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 138,
"usage_type": "call"
}
] |
211228052
|
import requests
from bs4 import BeautifulSoup
url = 'http://www.lazada.com.ph/apple-macbook-pro-mf839zpa-8gb-intel-core-i5-13-540573.html'
request = requests.get(url)
content = request.content
soup = BeautifulSoup(content, "html.parser")
element = soup.find("span", {"itemprop": "price", "id": "special_price_box"})
string_price = element.text.strip()
string_price = string_price.replace(',', '')
price = float(string_price)
if price < 60000:
print("Within the budget, current price is {}".format(string_price))
else:
print("Do not buy its to expensive at the moment. Current price: {}".format(string_price))
# <span id="special_price_box" itemprop="price">61,899.00</span>
| null |
src/app.py
|
app.py
|
py
| 685 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
}
] |
257900350
|
from collections import namedtuple
TaskData = namedtuple('TaskData', (
# Time when the task was started
'startTime',
# Mesos' ID of the slave where task is being run
'slaveID',
# Mesos' ID of the executor running the task
'executorID'))
ResourceRequirement = namedtuple('ResourceRequirement', (
# Number of bytes (!) needed for a task
'memory',
# Number of CPU cores needed for a task
'cpu',
# Number of bytes (!) needed for task on disk
'disk'))
ToilJob = namedtuple('ToilJob', (
# A batchjob ID specific to this batch system implementation
'jobID',
# A ResourceRequirement tuple describing the resources needed by this batchjob
'resources',
# The command to be run on the worker node
'command',
# The resource object representing the user script
'userScript',
# The resource object representing the toil source tarball
'toilDistribution'))
| null |
src/toil/batchSystems/mesos/__init__.py
|
__init__.py
|
py
| 934 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 19,
"usage_type": "call"
}
] |
109328546
|
"""
基本语法练习
1. 规范格式
1.1 构建执行图
1.2 加载持久化模型,两种方式
2. 完成作业
2.1 变量作用域,变量定义规范,变量重用
2.2 模型训练:图构建,执行图
2.3 模型预测,pyplot画图
"""""
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
"""
** 【实际项目模板】 - 构建图和执行图 **
"""
# with tf.Graph().as_default():
# # 一、基于业务知识,构建执行图对象
# # 1. 数据的特征工程
#
# # 2. 模型构建
#
# # 二、运行
# with tf.Session() as sess:
# # 1. 加载数据
#
# # 2. 图执行
#
# # 3. 图效果查看
#
# # 4. 持久化
# pass
"""
** 【实际项目模板】 - 加载持久化模型一 **
"""
# with tf.Graph().as_default():
# # 1. 构建和训练过程完全一样的执行图
#
# # 2. 运行
# with tf.Session() as sess:
# # 1. 加载模型
#
# # 2. 使用模型预测,获取预测结果
# pass
"""
** 【实际项目模板】 - 加载持久化模型二 **
"""
# with tf.Graph().as_default():
# with tf.Session() as sess:
# # 1. 加载模型,同时加载执行图信息(从持久化信息中获取)
#
# # 2. 使用模型预测,获取预测结果
# pass
"""
作业:y = wx + b
要求:
1. 模拟图构建与执行图
2. 模型持久化
3. 模型加载与预测(可视化)
"""
"""
1. 模拟图构建与执行图
"""
def build_and_exec_model():
with tf.Graph().as_default():
# 一、基于业务知识,构建执行图对象
# 2. 模型构建
# a. 定义输入x、y变量
# trainable: 参与模型训练更新
"""
Variable与get_variable区别
1. 同:都是定义变量方式
2. 异:
2.1 同一个图中,使用Variable定义同name变量,新建,底层会自动累积name
2.2 同一个图中,使用get_variable定义同name变量,,根据name判断变量是否存在,不存在新建,存在报错
若需支持存在重用:添加重用参数 或 设置变量重用作用域
"""
input_x = tf.Variable(name="input_x", dtype=tf.float32,
initial_value=np.array(
[[2, 3, 4, 5, 6], [8, 3, 5, 5, 5], [3, 6, 9, 5, 8]]).reshape(
(5, 3)))
input_y = tf.Variable(name="input_y", dtype=tf.float32,
initial_value=[[0], [1], [1], [0], [1]])
# input_y_test = tf.Variable(name="input_y", dtype=tf.float32,
# initial_value=[[0], [1], [1], [0], [1]])
#
# # <tf.Variable 'input_x:0' shape=(5, 3) dtype=float32_ref>
# # <tf.Variable 'input_y:0' shape=(5, 1) dtype=float32_ref>
# # <tf.Variable 'input_y_1:0' shape=(5, 1)
# print(input_x, input_y, input_y_test)
print(input_x, input_y)
# 模型参数
# w:默认生成一个初始化均值为0,标准差为1.0且服从高斯分布的3x1矩阵
w = tf.get_variable(name="w", dtype=tf.float32, shape=[3, 1], trainable=True,
initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0))
# b:默认生成一个初始化均值为0,标准差为1.0且服从高斯分布的3x1矩阵
b = tf.get_variable(name="b", shape=[1], dtype=tf.float32, trainable=True,
initializer=tf.zeros_initializer())
''' 重用同名变量方式一:tf.get_variable_scope().reuse_variables() '''
# tf.get_variable_scope().reuse_variables()
# b_test = tf.get_variable(name="b", shape=[1], dtype=tf.float32, trainable=True,
# initializer=tf.zeros_initializer())
''' 重用同名变量方式二 '''
# reuse=True: 有问题的,所以得用reuse=tf.AUTO_REUSE
# with tf.variable_scope("reuse", reuse=tf.AUTO_REUSE,
# initializer=tf.constant_initializer(18)):
# # variable_scope中定义了initializer且变量没有定义initializer;则with语句块中默认都使用该initializer
# b_test_2 = tf.get_variable(name="b", shape=[1], dtype=tf.float32, trainable=True,
# initializer=tf.zeros_initializer())
# ValueError: Variable b already exists, disallowed.
# Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
# print(w, b, b_test)
print(w, b)
# 预测值
y_ = tf.matmul(a=input_x, b=w) + b
print(y_)
# 损失函数(平方和损失函数:sigma((y - y_) ** 2) / m)
# reduce_mean: 聚合均值(降低一些其他代码,整合了一下)
loss = tf.reduce_mean(tf.square(input_y - y_))
# 定义优化器(求解损失函数最小的模型参数<变量>的方式)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# 定义训练操作(此处对象)
"""
模型参数更新方式(GradientDescentOptimizer底层封装)
总结:通过给定的损失函数(loss:Tensor对象),反向查找到此图中的所有变量进行更新,使得损失函数最小
NOTE:
1. 获取图中所有变量,在满足初始化前提条件下,适当更新变量,使得损失函数最小
2. 若想只受模型参数(w,b)影响,则需保留两个变量(w,b),其他为常量(constant,placeholder)
或者让其他变量不参与模型训练,即trainable=False
3. minimize方法中封装了,找出变量,更新变量值的方法
"""
# 更新方式:通过给定的损失函数,求梯度,更新模型参数(即变量)
tran_op = optimizer.minimize(loss)
# == 等价于 ==
# # 获取所有训练变量
# var_list = tf.global_variables()
# # 计算梯度
# grads_and_vars = tf.compute_gradients(var_list)
# # 更新梯度(更新模型参数)
# tran_op = tf.apply_gradients(grads_and_vars)
# 获取变量初始化操作
# init_op = tf.initialize_all_variables() # return global_variables_initializer()
init_op = tf.global_variables_initializer()
# 二、运行
with tf.Session() as sess:
# 1. 加载数据
# 变量初始化
# FailedPreconditionError: Attempting to use uninitialized value input_y
sess.run(init_op)
# 2. 图执行
for i in range(20):
loss_, w_, b_, input_x_, input_y_, tran_op_ = sess.run(
[loss, w, b, input_x, input_y, tran_op])
print("损失函数:{}".format(loss_))
print("w:{}".format(w_))
print("b:{}".format(b_))
print("input_x:{}".format(input_x_))
print("input_y:{}".format(input_y_))
print("*" * 20)
# 3. 图效果查看
# 4. 持久化
pass
def build_and_exec_model_2():
# dimension: 维度
# DIMEN = 3
DIMEN = 1
with tf.Graph().as_default():
# 一、基于业务知识,构建执行图对象
# 2. 模型构建
# a. 定义输入x、y变量
# trainable: 参与模型训练更新
input_x = tf.placeholder(name="input_x", dtype=tf.float32, shape=[None, DIMEN])
input_y = tf.placeholder(name="input_y", dtype=tf.float32, shape=[None, 1])
print(input_x, input_y)
# 模型参数
# w:默认生成一个初始化均值为0,标准差为1.0且服从高斯分布的3x1矩阵
w = tf.get_variable(name="w", dtype=tf.float32, shape=[DIMEN, 1],
initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0))
# b:默认生成一个初始化均值为0,标准差为1.0且服从高斯分布的3x1矩阵
b = tf.get_variable(name="b", shape=[1], dtype=tf.float32,
initializer=tf.zeros_initializer())
print(w, b)
# 预测值
y_ = tf.matmul(a=input_x, b=w) + b
print(y_)
# 损失函数(平方和损失函数:sigma((y - y_) ** 2) / m)
# reduce_mean: 聚合均值(降低一些其他代码,整合了一下)
loss = tf.reduce_mean(tf.square(input_y - y_))
# 定义优化器(求解损失函数最小的模型参数<变量>的方式)
# 用的比较多的学习率:0.001, 0.0001
# 开始选:1e-8, 1e-6
""" 产生nan,学习率不够或者迭代次数不够 """
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-4)
# 定义训练操作(此处对象)
"""
模型参数更新方式(GradientDescentOptimizer底层封装)
总结:通过给定的损失函数(loss:Tensor对象),反向查找到此图中的所有变量进行更新,使得损失函数最小
NOTE:
1. 获取图中所有变量,在满足初始化前提条件下,适当更新变量,使得损失函数最小
2. 若想只受模型参数(w,b)影响,则需保留两个变量(w,b),其他为常量(constant,placeholder)
或者让其他变量不参与模型训练,即trainable=False
3. minimize方法中封装了,找出变量,更新变量值的方法
"""
train_op = optimizer.minimize(loss)
# 获取变量初始化操作
# init_op = tf.initialize_all_variables() # return global_variables_initializer()
init_op = tf.global_variables_initializer()
# 二、运行
with tf.Session() as sess:
# 1. 加载数据
""" a. 变量初始化操作 """
# FailedPreconditionError: Attempting to use uninitialized value input_y
sess.run(init_op)
""" b. 训练数据的产生/获取(基于numpy随机产生<可以先考虑一个固定的数据集>) """
"""
np.random.normal(产生一个服从高斯分布(Gaussian Distribution)的拟合)
Parameters
----------
loc : float or array_like of floats
Mean ("centre") of the distribution.
scale : float or array_like of floats
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
np.random.randn(产生一个服从标准正太分布的矩阵)
-------
"""
# [0, 8]且N条数据的一维线性(等差数列)数组 + 服从均值为0,标准差为2且服从正太分布的N条数据一维数组
N = 300
""" 此处多维不好画图,统一使用一维 """
b = np.array([8])
# 3x1
# w = np.array([0.2, 0.8, 0.3]).reshape((-1, DIMENSION)).T
# 1x1
w = np.array([12]).reshape((-1, DIMEN)).T
# 100x3
train_x = (np.linspace(0, 6, N) + np.random.normal(0, 2, N)).reshape((-1, DIMEN))
# 100x3 dot 3x1 = 100x1 + 1x1 + 100x1
# N / 3 = 100.0
# N // 3 = 100
train_y = np.dot(train_x, w) + b + np.random.normal(0, 2, N // DIMEN).reshape((-1, 1))
# 2. 图执行
""" c. 模型训练 """
for step in range(1000):
# 开始训练
loss_, tran_op_ = sess.run(
[loss, train_op], feed_dict={
input_x: train_x,
input_y: train_y
})
print("第{}次训练后模型的损失函数:{}".format(step, loss_))
# 3. 图效果查看
""" d. 构造一个测试数据集或者使用训练数据集,得到该数据对应的预测值,做一个可视化操作 """
predict = sess.run(y_, feed_dict={
input_x: train_x
})
# 训练红色点
plt.plot(train_x, train_y, "ro")
# 预测蓝色线
plt.plot(train_x, predict, 'g-')
plt.show()
# 4. 持久化
pass
if __name__ == '__main__':
# build_and_exec_model()
build_and_exec_model_2()
| null |
2019/AI/DeepLearning/TensorFlow/01_practice/03_basic_grammer.py
|
03_basic_grammer.py
|
py
| 12,858 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.Graph",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.random_normal_initializer",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.zeros_initializer",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "tensorflow.Graph",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.random_normal_initializer",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.zeros_initializer",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.GradientDescentOptimizer",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 295,
"usage_type": "name"
}
] |
73396807
|
import numpy as np
from numba import njit, float32
class WeightMetTrigger(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def begin(self, event):
self.cats = sorted([nmu for nmu in self.correction_files.keys()])
self.bins = []
self.corr = []
self.corr_up = []
self.corr_down = []
for nmuon in self.cats:
bins, corr, corr_up, corr_down = read_file(self.correction_files[nmuon])
self.bins.append(bins)
self.corr.append(corr)
self.corr_up.append(corr_up)
self.corr_down.append(corr_down)
def event(self, event):
nmuons = event.MuonSelection.pt.stops - event.MuonSelection.pt.starts
met = event.METnoX.pt
corrs, corrs_up, corrs_down = get_correction(
self.cats, self.bins, self.corr, self.corr_up, self.corr_down,
nmuons, met,
)
event.Weight *= corrs
event.Weight_metTrigSFUp = np.divide(corrs_up, corrs,
out=np.zeros_like(corrs_up),
where=corrs!=0)
event.Weight_metTrigSFDown = np.divide(corrs_down, corrs,
out=np.zeros_like(corrs_down),
where=corrs!=0)
@njit
def get_correction(cats, bins, incorr, incorr_up, incorr_down, nmuons, met):
nev = nmuons.shape[0]
corrs = np.ones(nev, dtype=float32)
corrs_up = np.ones(nev, dtype=float32)
corrs_down = np.ones(nev, dtype=float32)
for iev in range(nev):
if nmuons[iev] not in cats:
continue
cat = cats.index(nmuons[iev])
for ibin in range(bins[cat].shape[0]):
if bins[cat][ibin,0] <= met[iev] < bins[cat][ibin,1]:
corrs[iev] = incorr[cat][ibin]
corrs_up[iev] = incorr_up[cat][ibin]
corrs_down[iev] = incorr_down[cat][ibin]
break
return corrs, corrs_up, corrs_down
def read_file(path, overflow=True):
with open(path, 'r') as f:
lines = [l.split()
for l in f.read().splitlines()
if l.strip()[0]!="#"][1:]
bins = np.array([map(float, l[1:3]) for l in lines])
corr = np.array([float(l[3]) for l in lines])
corr_up = np.array([1.+float(l[5]) for l in lines])
corr_down = np.array([1.+float(l[4]) for l in lines])
if overflow:
bins[-1,-1] = np.infty
return bins, corr, corr_up, corr_down
| null |
sequence/Modules/WeightMetTrigger.py
|
WeightMetTrigger.py
|
py
| 2,559 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.divide",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numba.float32",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numba.float32",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numba.float32",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numba.njit",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.infty",
"line_number": 68,
"usage_type": "attribute"
}
] |
50700885
|
#!/usr/bin/python3
import sys, json
from collections import defaultdict
# This script was designed to allow users to add TTP:keyword associations
# The input parameters for the script are as follows:
# - python add_user_keywords.py [enterprise|mobile] "Txxxx:keywords,to add,here:Txxxx:other,keywords to add,etc."
# - The "Txxxx" in this case represents the TTP ID, i.e. T1234
# This is the relative path from the script to the files it needs
relative_path = sys.path[0] + '/ttpkeywords/'
# These are the output files for the user-specified keywords
# I decided against using additional_enterprise_keywords.json and additional_mobile_keywords.json as the output files
# The files mentionned above allow for multiple keywords to be added to ttps with similar name contents
# While this script allows adding keywords to a specified TTP ID
enterprise_file = relative_path + 'user_enterprise_keywords.json'
mobile_file = relative_path + 'user_mobile_keywords.json'
# Setting up the variables according to the provided parameters when running the script
if sys.argv[1] == 'enterprise':
selected_file = enterprise_file
elif sys.argv[1] == 'mobile':
selected_file = mobile_file
else:
print('Invalid matrix selected!')
print('Please run the script using the following parameters:')
print('python add_user_keywords.py [mobile|enterprise] "T1234:keywords to add,separated,by,commas:T5678:same,thing,here,etc."')
sys.exit()
print(sys.path[0])
# Adding the keywords
with open(selected_file, 'r+') as file:
keyword_dict = json.load(file)
file.seek(0)
input_list = sys.argv[2].split(':')
# Incrementing by 2 here because of how the input is formatted
# Even index numbers represent TTP IDs, while odd index numbers represents the keywords
for i in range(0, len(input_list), 2):
keyword_dict.setdefault(input_list[i],[]).extend(input_list[i+1].split(','))
keyword_dict[input_list[i]] = list(set(keyword_dict[input_list[i]]))
json.dump(keyword_dict, file, indent = 4)
| null |
add_user_keywords.py
|
add_user_keywords.py
|
py
| 2,027 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 44,
"usage_type": "call"
}
] |
484428340
|
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: Alexey Kazantsev <[email protected]>
"""
"""
Tests some of the api in cuda4py.cudnn package.
"""
import cuda4py as cu
import cuda4py.cudnn as cudnn
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.cudnn = cudnn.CUDNN(self.ctx)
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.cudnn
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(cudnn.CUDNN_STATUS_SUCCESS, 0)
self.assertEqual(cudnn.CUDNN_STATUS_NOT_INITIALIZED, 1)
self.assertEqual(cudnn.CUDNN_STATUS_ALLOC_FAILED, 2)
self.assertEqual(cudnn.CUDNN_STATUS_BAD_PARAM, 3)
self.assertEqual(cudnn.CUDNN_STATUS_INTERNAL_ERROR, 4)
self.assertEqual(cudnn.CUDNN_STATUS_INVALID_VALUE, 5)
self.assertEqual(cudnn.CUDNN_STATUS_ARCH_MISMATCH, 6)
self.assertEqual(cudnn.CUDNN_STATUS_MAPPING_ERROR, 7)
self.assertEqual(cudnn.CUDNN_STATUS_EXECUTION_FAILED, 8)
self.assertEqual(cudnn.CUDNN_STATUS_NOT_SUPPORTED, 9)
self.assertEqual(cudnn.CUDNN_STATUS_LICENSE_ERROR, 10)
self.assertEqual(cudnn.CUDNN_DATA_FLOAT, 0)
self.assertEqual(cudnn.CUDNN_DATA_DOUBLE, 1)
self.assertEqual(cudnn.CUDNN_DATA_HALF, 2)
self.assertEqual(cudnn.CUDNN_TENSOR_NCHW, 0)
self.assertEqual(cudnn.CUDNN_TENSOR_NHWC, 1)
self.assertEqual(cudnn.CUDNN_CONVOLUTION, 0)
self.assertEqual(cudnn.CUDNN_CROSS_CORRELATION, 1)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 1)
self.assertEqual(
cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, 0)
self.assertEqual(
cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, 1)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_ALGO_GEMM, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, 3)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_ALGO_FFT, 4)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, 5)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE, 0)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 1)
self.assertEqual(
cudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, 0)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1, 1)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3, 3)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE, 0)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 1)
self.assertEqual(
cudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, 0)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_1, 1)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT, 2)
self.assertEqual(cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING, 3)
self.assertEqual(cudnn.CUDNN_POOLING_MAX, 0)
self.assertEqual(cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING, 1)
self.assertEqual(cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, 2)
def test_errors(self):
idx = cu.CU.ERRORS[cudnn.CUDNN_STATUS_NOT_INITIALIZED].find(" | ")
self.assertGreater(idx, 0)
def test_version(self):
logging.debug("CUDNN version is %d", self.cudnn.version)
self.assertEqual(self.cudnn.version, int(self.cudnn.version))
def test_tensor_descriptor(self):
d = cudnn.TensorDescriptor()
self.assertIsNotNone(d.handle)
for dt in (cudnn.CUDNN_DATA_DOUBLE, cudnn.CUDNN_DATA_FLOAT):
for fmt in (cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_TENSOR_NHWC):
d.set_4d(fmt, dt, 100, 50, 217, 215)
del d
def test_filter_descriptor(self):
d = cudnn.FilterDescriptor()
self.assertIsNotNone(d.handle)
for dt in (cudnn.CUDNN_DATA_DOUBLE, cudnn.CUDNN_DATA_FLOAT):
d.set_4d(dt, 64, 3, 11, 12)
del d
def test_convolution_descriptor(self):
d = cudnn.ConvolutionDescriptor()
self.assertIsNotNone(d.handle)
for mode in (cudnn.CUDNN_CROSS_CORRELATION, cudnn.CUDNN_CONVOLUTION):
d.set_2d(1, 2, 3, 4, 1, 1, mode)
del d
def _init_descriptors(self, include_out=False):
conv = cudnn.ConvolutionDescriptor()
conv.set_2d(5, 4, 2, 1)
inp = cudnn.TensorDescriptor()
inp.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
100, 8, 208, 224)
filter = cudnn.FilterDescriptor()
filter.set_4d(cudnn.CUDNN_DATA_FLOAT, 64, 8, 11, 7)
if not include_out:
return conv, inp, filter
n, c, h, w = cudnn.CUDNN.get_convolution_2d_forward_output_dim(
conv, inp, filter)
out = cudnn.TensorDescriptor()
out.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT, n, c, h, w)
return conv, inp, filter, out
def test_get_convolution_2d_forward_output_dim(self):
conv, inp, filter = self._init_descriptors()
n, c, h, w = cudnn.CUDNN.get_convolution_2d_forward_output_dim(
conv, inp, filter)
self.assertEqual(n, 100)
self.assertEqual(c, 64)
self.assertEqual(h, 104)
self.assertEqual(w, 226)
def test_get_convolutional_forward_algorithm(self):
logging.debug("ENTER: test_get_convolutional_forward_algorithm")
conv, inp, filter, out = self._init_descriptors(True)
algo = self.cudnn.get_convolution_forward_algorithm(
inp, filter, conv, out)
self.assertGreaterEqual(algo, 0)
logging.debug("Fastest algo is %d", algo)
algo = self.cudnn.get_convolution_forward_algorithm(
inp, filter, conv, out,
cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
512 * 1024 * 1024)
logging.debug("With 512 Mb limit: %d", algo)
logging.debug("EXIT: test_get_convolutional_forward_algorithm")
def test_get_convolution_forward_workspace_size(self):
logging.debug("ENTER: test_get_convolution_forward_workspace_size")
conv, inp, filter, out = self._init_descriptors(True)
algo = self.cudnn.get_convolution_forward_algorithm(
inp, filter, conv, out)
for a in (algo, cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM,
cudnn.CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
cudnn.CUDNN_CONVOLUTION_FWD_ALGO_DIRECT):
try:
sz = self.cudnn.get_convolution_forward_workspace_size(
inp, filter, conv, out, a)
except cu.CUDARuntimeError as e:
self.assertEqual(e.code, cudnn.CUDNN_STATUS_NOT_SUPPORTED)
continue
self.assertGreaterEqual(sz, 0)
logging.debug("algo=%d size=%d", a, sz)
logging.debug("EXIT: test_get_convolution_forward_workspace_size")
def test_convolution_forward(self):
logging.debug("ENTER: test_convolution_forward")
conv_desc = cudnn.ConvolutionDescriptor()
conv_desc.set_2d(5, 4, 2, 1)
inp_data = numpy.zeros((100, 8, 104, 112), dtype=numpy.float32)
inp_data[:] = 0.1
inp_desc = cudnn.TensorDescriptor()
inp_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*inp_data.shape)
inp_buf = cu.MemAlloc(self.ctx, inp_data)
filter_data = numpy.zeros((64, 8, 11, 7), dtype=numpy.float32)
filter_data[:] = 0.3
filter_desc = cudnn.FilterDescriptor()
filter_desc.set_4d(cudnn.CUDNN_DATA_FLOAT, *filter_data.shape)
filter_buf = cu.MemAlloc(self.ctx, filter_data)
n, c, h, w = cudnn.CUDNN.get_convolution_2d_forward_output_dim(
conv_desc, inp_desc, filter_desc)
out_data = numpy.zeros((n, c, h, w), dtype=numpy.float32)
out_desc = cudnn.TensorDescriptor()
out_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*out_data.shape)
out_buf = cu.MemAlloc(self.ctx, out_data)
workspace = cu.MemAlloc(self.ctx, 512 * 1024 * 1024)
algo = self.cudnn.get_convolution_forward_algorithm(
inp_desc, filter_desc, conv_desc, out_desc,
cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace.size)
alpha = numpy.ones(1, dtype=numpy.float32)
beta = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.convolution_forward(
alpha, inp_desc, inp_buf, filter_desc, filter_buf, conv_desc,
algo, workspace, workspace.size, beta, out_desc, out_buf)
out_buf.to_host(out_data)
self.assertEqual(numpy.count_nonzero(out_data), out_data.size)
logging.debug("EXIT: test_convolution_forward")
def test_convolution_backward_bias(self):
logging.debug("ENTER: test_convolution_backward_bias")
bperr_data = numpy.zeros((100, 64, 104, 226), dtype=numpy.float32)
bperr_data[:] = 0.1
bperr_desc = cudnn.TensorDescriptor()
bperr_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*bperr_data.shape)
bperr_buf = cu.MemAlloc(self.ctx, bperr_data)
gd_data = numpy.zeros(64, dtype=numpy.float32)
gd_desc = cudnn.TensorDescriptor()
gd_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
1, gd_data.size, 1, 1)
gd_buf = cu.MemAlloc(self.ctx, gd_data)
alpha = numpy.ones(1, dtype=numpy.float32)
beta = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.convolution_backward_bias(alpha, bperr_desc, bperr_buf,
beta, gd_desc, gd_buf)
gd_buf.to_host(gd_data)
self.assertEqual(numpy.count_nonzero(gd_data), gd_data.size)
logging.debug("EXIT: test_convolution_backward_bias")
def test_convolution_backward_filter(self):
logging.debug("ENTER: test_convolution_backward_filter")
conv_desc = cudnn.ConvolutionDescriptor()
conv_desc.set_2d(5, 5, 1, 1)
inp_data = numpy.zeros((100, 8, 96, 96), dtype=numpy.float32)
inp_data[:] = 0.1
inp_desc = cudnn.TensorDescriptor()
inp_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*inp_data.shape)
inp_buf = cu.MemAlloc(self.ctx, inp_data)
filter_data = numpy.zeros((64, 8, 11, 11), dtype=numpy.float32)
filter_data[:] = 0.1
filter_desc = cudnn.FilterDescriptor()
filter_desc.set_4d(cudnn.CUDNN_DATA_FLOAT, *filter_data.shape)
filter_buf = cu.MemAlloc(self.ctx, filter_data)
bperr_data = numpy.zeros((100, 64, 96, 96), dtype=numpy.float32)
bperr_data[:] = 0.1
bperr_desc = cudnn.TensorDescriptor()
bperr_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*bperr_data.shape)
bperr_buf = cu.MemAlloc(self.ctx, bperr_data)
gd_data = numpy.zeros_like(filter_data)
gd_desc = cudnn.FilterDescriptor()
gd_desc.set_4d(cudnn.CUDNN_DATA_FLOAT, *gd_data.shape)
gd_buf = cu.MemAlloc(self.ctx, gd_data)
alpha = numpy.ones(1, dtype=numpy.float32)
beta = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.convolution_backward_filter(
alpha, inp_desc, inp_buf, bperr_desc, bperr_buf, conv_desc,
beta, gd_desc, gd_buf)
gd_buf.to_host(gd_data)
self.assertEqual(numpy.count_nonzero(gd_data), gd_data.size)
if self.cudnn.version >= 4000:
algo = self.cudnn.get_convolution_backward_filter_algorithm(
inp_desc, bperr_desc, conv_desc, gd_desc)
logging.debug("Fastest algo is %d", algo)
sz = self.cudnn.get_convolution_backward_filter_workspace_size(
inp_desc, bperr_desc, conv_desc, gd_desc, algo)
logging.debug("Workspace size for it is %d", sz)
algo = self.cudnn.get_convolution_backward_filter_algorithm(
inp_desc, bperr_desc, conv_desc, gd_desc,
cudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
512 * 1024 * 1024)
logging.debug("With 512 Mb limit: %d", algo)
workspace = cu.MemAlloc(self.ctx, 512 * 1024 * 1024)
gd_buf.memset32_async()
self.cudnn.convolution_backward_filter(
alpha, inp_desc, inp_buf, bperr_desc, bperr_buf, conv_desc,
beta, gd_desc, gd_buf, algo, workspace, workspace.size)
gd_buf.to_host(gd_data)
self.assertEqual(numpy.count_nonzero(gd_data), gd_data.size)
logging.debug("EXIT: test_convolution_backward_filter")
def test_convolution_backward_data(self):
logging.debug("ENTER: test_convolution_backward_data")
conv_desc = cudnn.ConvolutionDescriptor()
conv_desc.set_2d(5, 5, 1, 1)
inp_data = numpy.zeros((100, 8, 96, 96), dtype=numpy.float32)
inp_desc = cudnn.TensorDescriptor()
inp_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*inp_data.shape)
inp_buf = cu.MemAlloc(self.ctx, inp_data)
filter_data = numpy.zeros((64, 8, 11, 11), dtype=numpy.float32)
filter_data[:] = 0.1
filter_desc = cudnn.FilterDescriptor()
filter_desc.set_4d(cudnn.CUDNN_DATA_FLOAT, *filter_data.shape)
filter_buf = cu.MemAlloc(self.ctx, filter_data)
bperr_data = numpy.zeros((100, 64, 96, 96), dtype=numpy.float32)
bperr_data[:] = 0.1
bperr_desc = cudnn.TensorDescriptor()
bperr_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*bperr_data.shape)
bperr_buf = cu.MemAlloc(self.ctx, bperr_data)
alpha = numpy.ones(1, dtype=numpy.float32)
beta = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.convolution_backward_data(
alpha, filter_desc, filter_buf, bperr_desc, bperr_buf, conv_desc,
beta, inp_desc, inp_buf)
inp_buf.to_host(inp_data)
self.assertEqual(numpy.count_nonzero(inp_data), inp_data.size)
if self.cudnn.version >= 4000:
algo = self.cudnn.get_convolution_backward_data_algorithm(
filter_desc, bperr_desc, conv_desc, inp_desc)
logging.debug("Fastest algo is %d", algo)
sz = self.cudnn.get_convolution_backward_data_workspace_size(
filter_desc, bperr_desc, conv_desc, inp_desc, algo)
logging.debug("Workspace size for it is %d", sz)
algo = self.cudnn.get_convolution_backward_data_algorithm(
filter_desc, bperr_desc, conv_desc, inp_desc,
cudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
512 * 1024 * 1024)
logging.debug("With 512 Mb limit: %d", algo)
workspace = cu.MemAlloc(self.ctx, 512 * 1024 * 1024)
inp_buf.memset32_async()
self.cudnn.convolution_backward_data(
alpha, filter_desc, filter_buf, bperr_desc, bperr_buf,
conv_desc, beta, inp_desc, inp_buf,
algo, workspace, workspace.size)
inp_buf.to_host(inp_data)
self.assertEqual(numpy.count_nonzero(inp_data), inp_data.size)
logging.debug("EXIT: test_convolution_backward_data")
def test_transform_tensor(self):
logging.debug("ENTER: test_transform_tensor")
sh_interleaved = (2, 5, 5, 3)
sh_splitted = (2, 3, 5, 5)
inp_data = numpy.arange(numpy.prod(sh_interleaved),
dtype=numpy.float32).reshape(sh_interleaved)
inp_desc = cudnn.TensorDescriptor()
inp_desc.set_4d(cudnn.CUDNN_TENSOR_NHWC, cudnn.CUDNN_DATA_FLOAT,
*sh_splitted)
inp_buf = cu.MemAlloc(self.ctx, inp_data)
out_data = numpy.zeros(sh_splitted, dtype=numpy.float32)
out_desc = cudnn.TensorDescriptor()
out_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*sh_splitted)
out_buf = cu.MemAlloc(self.ctx, out_data)
alpha = numpy.ones(1, dtype=numpy.float32)
beta = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.transform_tensor(alpha, inp_desc, inp_buf,
beta, out_desc, out_buf)
out_buf.to_host(out_data)
max_diff = numpy.fabs(out_data - inp_data.transpose(0, 3, 1, 2)).max()
self.assertEqual(max_diff, 0.0)
logging.debug("EXIT: test_transform_tensor")
def test_pooling(self):
logging.debug("ENTER: test_pooling")
input_data = numpy.zeros((5, 96, 64, 48), dtype=numpy.float32)
input_data[:] = numpy.random.rand(input_data.size).reshape(
input_data.shape) - 0.5
input_desc = cudnn.TensorDescriptor()
input_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*input_data.shape)
input_buf = cu.MemAlloc(self.ctx, input_data)
pooling_desc = cudnn.PoolingDescriptor()
pooling_desc.set_2d((5, 3), (2, 1), (3, 2))
output_shape = cudnn.CUDNN.get_pooling_2d_forward_output_dim(
pooling_desc, input_desc)
self.assertEqual(len(output_shape), 4)
logging.debug("Output shape is %s", output_shape)
output_data = numpy.zeros(output_shape, dtype=numpy.float32)
output_data[:] = numpy.nan
output_desc = cudnn.TensorDescriptor()
output_desc.set_4d(cudnn.CUDNN_TENSOR_NCHW, cudnn.CUDNN_DATA_FLOAT,
*output_data.shape)
output_buf = cu.MemAlloc(self.ctx, output_data)
np_one = numpy.ones(1, dtype=numpy.float32)
np_zero = numpy.zeros(1, dtype=numpy.float32)
self.cudnn.pooling_forward(
pooling_desc, np_one, input_desc, input_buf,
np_zero, output_desc, output_buf)
output_buf.to_host(output_data)
self.assertEqual(numpy.count_nonzero(numpy.isnan(output_data)), 0)
diff_desc = output_desc
diff_buf = cu.MemAlloc(self.ctx, output_data)
grad_desc = input_desc
grad_data = input_data.copy()
grad_data[:] = numpy.nan
grad_buf = cu.MemAlloc(self.ctx, grad_data)
self.cudnn.pooling_backward(
pooling_desc, np_one, output_desc, output_buf,
diff_desc, diff_buf, input_desc, input_buf, np_zero,
grad_desc, grad_buf)
grad_buf.to_host(grad_data)
self.assertEqual(numpy.count_nonzero(numpy.isnan(grad_data)), 0)
logging.debug("EXIT: test_pooling")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| null |
tests/test_cudnn.py
|
test_cudnn.py
|
py
| 21,419 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.Devices",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_SUCCESS",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_NOT_INITIALIZED",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_ALLOC_FAILED",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_BAD_PARAM",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_INTERNAL_ERROR",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_INVALID_VALUE",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_ARCH_MISMATCH",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_MAPPING_ERROR",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_EXECUTION_FAILED",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_NOT_SUPPORTED",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_LICENSE_ERROR",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_DOUBLE",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_HALF",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NHWC",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CROSS_CORRELATION",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_NO_WORKSPACE",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_PREFER_FASTEST",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_FFT",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_0",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_1",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_POOLING_MAX",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "cuda4py.CU",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_NOT_INITIALIZED",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_DOUBLE",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NHWC",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_DOUBLE",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.ConvolutionDescriptor",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CROSS_CORRELATION",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.ConvolutionDescriptor",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN.get_convolution_2d_forward_output_dim",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.CUDNN.get_convolution_2d_forward_output_dim",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "cuda4py.CUDARuntimeError",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.CUDNN_STATUS_NOT_SUPPORTED",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.ConvolutionDescriptor",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN.get_convolution_2d_forward_output_dim",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.ConvolutionDescriptor",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.ConvolutionDescriptor",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.FilterDescriptor",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NHWC",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 420,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "numpy.fabs",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.PoolingDescriptor",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN.get_pooling_2d_forward_output_dim",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn.CUDNN",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn.TensorDescriptor",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_TENSOR_NCHW",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.cudnn",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "cuda4py.cudnn.CUDNN_DATA_FLOAT",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 475,
"usage_type": "attribute"
},
{
"api_name": "cuda4py.MemAlloc",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 489,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 490,
"usage_type": "call"
}
] |
86173917
|
import cv2
import numpy as np
image=cv2.imread('messi.jpg')
cv2.imshow("Original",image)
cv2.waitKey(0)
B,G,R=cv2.split(image)
# now creating zero matrix for extraction of RGB component seprately
zeros=np.zeros(image.shape[:2],dtype="uint8")
#for Blue Color
cv2.imshow("Blue",cv2.merge([B,zeros,zeros]))
cv2.waitKey(0)
#for Green Color
cv2.imshow("Green",cv2.merge([zeros,G,zeros]))
cv2.waitKey(0)
#for Red Color
cv2.imshow("Red",cv2.merge([zeros,zeros,R]))
cv2.waitKey(0)
cv2.destroyAllWindows()
| null |
open cv/7.How to extract RGB Color Space/rgb_cs.py
|
rgb_cs.py
|
py
| 497 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 18,
"usage_type": "call"
}
] |
32693150
|
# ~ OPEN SOURCE ~
# CODED BY FADIL YAZID
# FB : FADIL ID
import os
import sys
import time
import requests
def mulai():
print
print('\t\x1b[1;92m [ \x1b[1;97mSPAM SMS MAPCLUB \x1b[1;92m]\x1b[1;97m')
print('\t <~~~~~~~~~~~~~~~~~~>')
print('\t Author : Fadil ID')
print
print ('( Ex : 0822*** )')
Nomor = raw_input('Nomor Target : \x1b[1;92m')
if len(Nomor) < 9:
print
print('\x1b[1;97m[\x1b[1;91m ! \x1b[1;97m] Nomor Invalid')
time.sleep(1)
os.system('clear')
mulai()
else:
pass
Nomor = int(Nomor)
print
print ('\x1b[1;97m(Ex : 5)')
Jumlah = input('Jumlah : \x1b[1;92m')
if Jumlah > 15:
print
print('\x1b[1;97m[\x1b[1;91m !\x1b[1;97m ] Jangan terlalu banyak \nKalo Ga Mau Tools Ini Coid :v')
print
sys.exit()
else:
pass
print
print('Mulai Mengirim...')
print
MapClub(Nomor, Jumlah)
def MapClub(Nomor, Jumlah):
for _ in range(Jumlah):
time.sleep(5)
headers = {
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9',
'Referer' : 'https://mapclub.com/id/user/signup'
}
r = requests.post('https://cmsapi.mapclub.com/api/signup-otp', data = {'phone' : Nomor}, allow_redirects = True)
if 'error' in r.text:
print('\x1b[1;97m[\x1b[1;91m*\x1b[1;97m] Gagal Mengirim Sms Ke Nomor \x1b[1;92m' + str(Nomor))
else:
print('\x1b[1;97m[\x1b[1;92m*\x1b[1;97m] Berhasil Mengirim Sms Ke Nomor \x1b[1;92m' + str(Nomor))
if __name__ == '__main__':
os.system('clear')
mulai()
| null |
spam.py
|
spam.py
|
py
| 1,689 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 64,
"usage_type": "call"
}
] |
595872240
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^polls/$', 'polls.views.index'),
url(r'^polls/(?P<artistid>\d+)/song_view/$', 'polls.views.song_view'),
url(r'^polls/(?P<songid>\d+)/snippet_view/$', 'polls.views.snippet_view'),
url(r'^polls/post/$', 'post'),
url(r'^polls/search-form/$', 'polls.views.search_form'),
url(r'^polls/search/$', 'polls.views.search'),
url(r'^admin/', include(admin.site.urls)),
)
| null |
mysite_hipopathy/mysite/urls.py
|
urls.py
|
py
| 580 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.patterns",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 14,
"usage_type": "name"
}
] |
364668724
|
import re
import requests
import csv
link = []
title = []
watchs = []
num = []
day = []
up_space = []
up_names = []
message = input("请输入想要搜索的内容:")
op = int(input("请问您想要爬取多少页呢?(至多50页)"))
for i in range(1,op+1):
try:
url = 'https://search.bilibili.com/all?keyword=' + str(message)+"&from_source=nav_search_new&page="+str(i)
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'}
response = requests.get(url,headers=headers)
response.encoding='encoding=utf-8'
html = response.text
pattern = re.compile('<li class="video-item matrix"><a href="//(.*?)" title="(.*?)" target="_blank" class="img-anchor"><div class="img"><div class="lazy-img"><img alt="" src=""></div><span class="so-imgTag_rb">.*?</span><div class="watch-later-trigger watch-later"></div><span class="mask-video"></span></div><!----></a><div class="info"><div class="headline clearfix"><!----><!----><span class="type hide">.*?</div><div class="tags"><span title="观看" class="so-icon watch-num"><i class="icon-playtime"></i>(.*?)</span><span title="弹幕" class="so-icon hide"><i class="icon-subtitle"></i>(.*?)</span><span title="上传时间" class="so-icon time"><i class="icon-date"></i>(.*?)</span><span title="up主" class="so-icon"><i class="icon-uper"></i><a href="//(.*?)" target="_blank" class="up-name">(.*?)</a></span></div></div></li>',re.S)
items = re.findall(pattern,html)
print("正在爬取第",str(i),"页")
for it in items:
link.append(it[0].strip())
title.append(it[1].strip())
watchs.append(it[2].strip())
num.append(it[3].strip())
day.append(it[4].strip())
up_space.append(it[5].strip())
up_names.append(it[6].strip())
except:
pass
with open('输出.csv', mode='w',newline='',encoding = 'gb18030') as csv_file:
fieldnames = ['视频链接', '标题', '播放量','弹幕量','UP空间','up']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
n = 0
for li in link:
try:
writer.writerow({'视频链接':link[n], '标题':title[n], '播放量':watchs[n],'弹幕量':num[n],'UP空间':up_space[n],'up':up_names[n]})
n = n + 1
except:
pass
print("爬取完成!")
| null |
spider/B站_re_csv.py
|
B站_re_csv.py
|
py
| 2,532 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 39,
"usage_type": "call"
}
] |
183828008
|
# -*- coding: utf8 -*-
import json, requests
import datetime
train_text=""
final_count = 0
near_list = ['Chicago', 'Manchester', 'London', 'Liverpool', 'Oxford']
for k in range(len(near_list)):
# 버거 키워드로 검색 시 추천 식당받기
params = dict(
client_id='',
client_secret='',
v='20170801',
near = near_list[k],
#near='Chicago',
#section = 'food',
query='meal',
limit=50
)
#EXPLORE = get venue recommendations
url = 'https://api.foursquare.com/v2/venues/explore'
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
res_id = []
res_name = []
for i in range(len(data['response']['groups'][0]['items'])):
res_id.append(data['response']['groups'][0]['items'][i]['venue']['id'])
res_name.append(data['response']['groups'][0]['items'][i]['venue']['name'])
count = 0
url1 = 'https://api.foursquare.com/v2/venues/'
url3 = '/tips'
for i in range(len(res_name)):
url2 = str(res_id[i])
url = url1+url2+url3
params2 = dict(
venue_id = url2,
client_id='FN5PS1FYKITOSMYDRGMK3UGQVAVZD5GNTCPGBDTFDHJLMC42',
client_secret='PXPLYT5M23203GXN4ABRCZEKC5ETYJX3EU3SQZKDF20FNSXC',
sort = 'popular', #popular , recent 가능
limit= 1000,
offset=1,
v='20170101'
)
resp = requests.get(url = url, params=params2)
data = json.loads(resp.text)
print('----------------------------------------')
print(res_name[i], '식당의 리뷰 수집')
print('----------------------------------------')
print('리뷰 수 : ',data['response']['tips']['count'])
for j in range(len(data['response']['tips']['items'])):
print(j+1, " ",data['response']['tips']['items'][j]['text'])
train_text = train_text + str(data['response']['tips']['items'][j]['text'])
train_text = train_text + " :flag: "
count += 1
when = data['response']['tips']['items'][j]['createdAt']
when = datetime.datetime.fromtimestamp(when)
print(' 의 리뷰 작성일 : ',when)
print('---------------------------------------------')
print(near_list[k], '지역의 수집 리뷰 : ',count, '개')
print('---------------------------------------------')
final_count += count
print('최종 수집 리뷰 : ', final_count,'개')
# SAVE REVIEW
f = open('./train_reviews.txt','w')
f.write(train_text)
f.close()
| null |
Foursquare_API/get_res_reviews.py
|
get_res_reviews.py
|
py
| 2,395 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 67,
"usage_type": "attribute"
}
] |
291849764
|
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import numpy as np
# from mat4py import loadmat
# #from torchsummary import summary
# from graphviz import Digraph
# from torchviz import make_dot
# from graphviz import Source
import time
HOME = 0
if torch.cuda.is_available() and HOME == 0:
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
c = 3 * 10 ** 8
dt = 10 ** (-7)
Ts = 0.8000e-06
L = int(Ts / dt)
T = 400
class BuildGRUStack(nn.Module):
def __init__(self, input_size, rnn_size, num_layers):
super(BuildGRUStack, self).__init__()
self.input_size = input_size
self.rnn_size = rnn_size
self.num_layers = num_layers
l_i2h_lst = [nn.Linear(self.input_size, 3 * self.rnn_size)]
l_h2h_lst = [nn.Linear(self.rnn_size, 3 * self.rnn_size)]
# l_bn_lst = [nn.BatchNorm1d(3 * self.rnn_size)]
# self.l_do = nn.Dropout(0.25)
for L in range(1, self.num_layers):
l_i2h_lst.append(nn.Linear(self.rnn_size, 3 * self.rnn_size))
l_h2h_lst.append(nn.Linear(self.rnn_size, 3 * self.rnn_size))
# l_bn_lst.append(nn.BatchNorm1d(3 * self.rnn_size))
self.l_i2h = nn.ModuleList(l_i2h_lst)
self.l_h2h = nn.ModuleList(l_h2h_lst)
# self.l_bn = nn.ModuleList(l_bn_lst)
def forward(self, x, prev_hs):
self.x_size = []
self.prev_h = 0
self.next_hs = []
self.i2h = []
self.h2h = []
for L in range(self.num_layers):
self.prev_h = prev_hs[L]
if L == 0:
self.x = x
else:
self.x = self.next_hs[L - 1]
# self.i2h.append(self.l_do(self.l_bn[L](self.l_i2h[L](self.x))))
# self.h2h.append(self.l_do(self.l_bn[L](self.l_h2h[L](self.prev_h))))
self.i2h.append(self.l_i2h[L](self.x))
self.h2h.append(self.l_h2h[L](self.prev_h))
Wx1, Wx2, Wx3 = self.i2h[L].chunk(3, dim=1) # it should return 4 tensors self.rnn_size
Uh1, Uh2, Uh3 = self.h2h[L].chunk(3, dim=1)
zt = torch.sigmoid(Wx1 + Uh1)
rt = torch.sigmoid(Wx2 + Uh2)
h_candidate = torch.tanh(Wx3 + rt * Uh3)
ht = (1-zt) * self.prev_h + zt * h_candidate
self.next_hs.append(ht)
return torch.stack(self.next_hs)
class BuildGRUUnrollNet(nn.Module):
def __init__(self, num_unroll, num_layers, rnn_size, input_size):
super(BuildGRUUnrollNet, self).__init__()
self.num_unroll = num_unroll
self.num_layers = num_layers
self.rnn_size = rnn_size
self.input_size = input_size
self.outputs = []
self.output = []
self.now_h = []
self.buildGRUstack_lst = []
for i in range(0, self.num_unroll):
self.buildGRUstack_lst.append(BuildGRUStack(self.input_size, self.rnn_size, self.num_layers))
self.buildGRUstack = nn.ModuleList(self.buildGRUstack_lst)
def forward(self, x, init_states_input):
self.init_hs = []
self.now_hs = []
self.outputs = []
init_states = init_states_input.reshape((init_states_input.size(0), self.num_layers * 2, self.rnn_size))
init_states_lst = list(init_states.chunk(self.num_layers * 2, 1))
for i in range(self.num_layers):
self.init_hs.append(init_states_lst[2 * i].reshape(init_states_input.size(0), self.rnn_size))
self.now_hs.append(torch.stack(self.init_hs))
for i in range(self.num_unroll):
self.now_h = self.buildGRUstack[i](x[:, torch.arange(i*2*T,(i+1)*2*T)], self.now_hs[i])
self.now_hs.append(self.now_h)
self.outputs.append(self.now_hs[i + 1][-1])
# for L in range(self.num_layers):
# setattr(self, 'hid_%d_%d' %(i, L), self.now_hs[i][L])
# setattr(self, 'cell_%d_%d' %(i, L), self.now_cs[i][L])
for i in range(1, self.num_unroll):
for j in range(self.num_layers):
self.buildGRUstack[i].l_i2h[j].weight.data = self.buildGRUstack[0].l_i2h[j].weight.data
self.buildGRUstack[i].l_h2h[j].weight.data = self.buildGRUstack[0].l_h2h[j].weight.data
self.buildGRUstack[i].l_i2h[j].bias.data = self.buildGRUstack[0].l_i2h[j].bias.data
self.buildGRUstack[i].l_h2h[j].bias.data = self.buildGRUstack[0].l_h2h[j].bias.data
self.buildGRUstack[i].l_i2h[j].weight.grad = self.buildGRUstack[0].l_i2h[j].weight.grad
self.buildGRUstack[i].l_h2h[j].weight.grad = self.buildGRUstack[0].l_h2h[j].weight.grad
self.buildGRUstack[i].l_i2h[j].bias.grad = self.buildGRUstack[0].l_i2h[j].bias.grad
self.buildGRUstack[i].l_h2h[j].bias.grad = self.buildGRUstack[0].l_h2h[j].bias.grad
self.output = self.outputs[0]
for i in range(1, self.num_unroll):
self.output = torch.cat((self.output, self.outputs[i]), 1)
return self.output
class GetGRUNet(nn.Module):
def __init__(self, num_unroll, num_layers, rnn_size, output_size, input_size):
super(GetGRUNet, self).__init__()
self.num_unroll, self.num_layers, self.rnn_size, self.output_size, self.input_size = num_unroll, num_layers, rnn_size, output_size, input_size
self.l_pred_l = nn.Linear(self.num_unroll * self.rnn_size, self.output_size)
self.GRUnet = BuildGRUUnrollNet(self.num_unroll, self.num_layers, self.rnn_size, self.input_size)
self.l_pred_bn = nn.BatchNorm1d(self.output_size)
# setattr(self, 'GRUNetLinear', self.l_pred_l)
def forward(self, x, init_states_input):
self.GRU_output = self.GRUnet(x, init_states_input)
self.pred = self.l_pred_bn(self.l_pred_l(self.GRU_output))
return self.pred
###########Usage#######################################
# input_size = 20
# output_size = 50
# rnn_size = 10
# num_layers = 2
# num_unroll = 3
# # graph of net
# x = torch.rand(3, input_size)
# z = torch.zeros(3, rnn_size * num_layers * 2)
# model = BuildGRUStack(input_size, rnn_size, num_layers)
# init_hs = []
# init_cs = []
# init_states = z.reshape((z.size(0),num_layers * 2, rnn_size))
# init_states_lst = list(init_states.chunk(num_layers * 2,1))
# for i in range(num_layers):
# init_hs.append(init_states_lst[2*i].reshape(num_layers,rnn_size))
# init_cs.append(init_states_lst[2*i+1].reshape(num_layers,rnn_size))
# now_hs, now_cs = model(x, init_hs, init_cs)
# temp = make_dot((now_hs[2], now_cs[2]), params=dict(list(model.named_parameters())))
# s = Source(temp, filename="BuildGRUStack.gv", format="png")
# s.view()
#
# model = BuildGRUUnrollNet(num_unroll, num_layers, rnn_size, input_size)
# out = model(x, z)
# temp = make_dot(out, params=dict(list(model.named_parameters())+ [('x', x)]+ [('z', z)]))
# s = Source(temp, filename="BuildGRUUnrollNet.gv", format="png")
# s.view()
#
# model = GetGRUNet(num_unroll, num_layers, rnn_size, output_size, input_size)
# output = model(x,z)
# for i in range(1, num_unroll):
# for j in range(num_layers):
# model.GRUnet.buildGRUstack[i].l_i2h[j].weight = model.GRUnet.buildGRUstack[0].l_i2h[j].weight
# model.GRUnet.buildGRUstack[i].l_h2h[j].weight = model.GRUnet.buildGRUstack[0].l_h2h[j].weight
# model.GRUnet.buildGRUstack[i].l_i2h[j].bias = model.GRUnet.buildGRUstack[0].l_i2h[j].bias
# model.GRUnet.buildGRUstack[i].l_h2h[j].bias = model.GRUnet.buildGRUstack[0].l_h2h[j].bias
# print(model)
# temp = make_dot(output, params=dict(list(model.named_parameters())+ [('x', x)]+ [('z', z)]))
# s = Source(temp, filename="test.gv", format="png")
# s.view()
# modell = nn.Sequential()
# modell.add_module('W0', nn.Linear(8, 16))
# modell.add_module('tanh', nn.Tanh())
# modell.add_module('W1', nn.Linear(16, 1))
#
# x = torch.randn(1,8)
#
# temp = make_dot(modell(x), params=dict(modell.named_parameters()))
#
# s = Source(temp, filename="test.gv", format="png")
# s.view()
class MultiClassNLLCriterion(torch.nn.Module):
def __init__(self):
super(MultiClassNLLCriterion, self).__init__()
self.lsm = nn.LogSoftmax(dim=1)
self.nll = nn.NLLLoss()
self.output = 0
self.outputs = 0
def forward(self, inputs, target):
self.output = self.lsm(inputs)
shape = target.shape
self.outputs = 0
# print(self.output.shape)
# print(target.shape)
for i in range(0, shape[1]):
self.outputs = self.outputs + self.nll(self.output, target[:, i].squeeze())
return self.outputs # /shape[1]
# match number
def AccS(label, pred_prob):
num_nonz = label.shape[1]
_, pred = pred_prob.topk(num_nonz) # ?!
pred = pred.float()
t_score = torch.zeros(label.shape).to(device)
# print(label.get_device())
# print(pred.get_device())
for i in range(0, num_nonz):
for j in range(0, num_nonz):
t_score[:, i].add_(label[:, i].float().eq(pred[:, j]).float())
return t_score.mean()
# loose match
def AccL(label, pred_prob):
num_nonz = label.shape[1]
_, pred = pred_prob.topk(20) # ?!
pred = pred.float()
t_score = torch.zeros(label.shape).to(device)
for i in range(0, num_nonz):
for j in range(0, 20):
t_score[:, i].add_(
label[:, i].float().eq(pred[:, j]).float()) # t_score[:,i].add(label[:,i].eq(pred[:,j])).float()
return t_score.mean()
# sctrict match
def AccM(label, pred_prob):
num_nonz = label.shape[1]
_, pred = pred_prob.topk(num_nonz) # ?!
pred = pred.float()
t_score = torch.zeros(label.shape).to(device)
for i in range(0, num_nonz):
for j in range(0, num_nonz):
t_score[:, i].add_(
label[:, i].float().eq(pred[:, j]).float()) # t_score[:,i].add(label[:,i].eq(pred[:,j])).float()
return t_score.sum(1).eq(num_nonz).sum().item() * 1. / pred.shape[0]
gpu = 1 # gpu id
if torch.cuda.is_available() and HOME == 0:
batch_size = 256 # 10# training batch size
else:
batch_size = 5 # 600000 #
lr = 0.002 # basic learning rate
lr_decay_startpoint = 250 # learning rate from which epoch
num_epochs = 200 # total training epochs
max_grad_norm = 5.0
clip_gradient = 4.0
N = 8 # the number of receivers
M = 3 # the number of transmitters
K = 3 # the number of targets
# task related parameters
# task: y = Ax, given A recovery sparse x from y
dataset = 'uniform' # type of non-zero elements: uniform ([-1,-0.1]U[0.1,1]), unit (+-1)
# num_nonz = K*N*M*2 # number of non-zero elemetns to recovery: 3,4,5,6,7,8,9,10
num_nonz = K # number of non-zero elemetns to recovery: 3,4,5,6,7,8,9,10
input_size = T*2 # dimension of observation vector y
output_size = 13*13 # dimension of sparse vector x
# model hyper parameters
rnn_size = 200 # number of units in RNN cell
num_layers = 2 # number of stacked RNN layers
num_unroll = N # number of RNN unrolled time steps
# torch.set_num_threads(16)
# manualSeed = torch.randint(1,10000,(1,))
# print("Random seed " + str(manualSeed.item()))
# torch.set_default_tensor_type(torch.FloatTensor)
device = torch.device("cuda:0" if torch.cuda.is_available() and HOME == 0 else "cpu")
print(device)
if torch.cuda.is_available():
train_size = int(batch_size*800) #
valid_size = int(batch_size*200) # #
else:
train_size = 100 # 600000 #
valid_size = 10 # 100000 #
print(train_size)
print(valid_size)
print(batch_size)
print(N*input_size)
valid_data = torch.zeros(valid_size, N*input_size).to(device)
valid_label = torch.zeros(valid_size, num_nonz).type(torch.LongTensor).to(device)
batch_data = torch.zeros(batch_size, N*input_size).to(device)
batch_label = torch.zeros(batch_size, num_nonz).to(device) # for MultiClassNLLCriterion LOSS
batch_zero_states = torch.zeros(batch_size, num_layers * rnn_size * 2).to(device) # init_states for GRU
# AccM, AccL, Accs = 0, 0, 0
err = 0
model_all = "model_l_" + str(num_layers) + "t_" + str(num_unroll) + '_gru_mimo_' + str(rnn_size)
logger_file = model_all + str(dataset) + "_" + str(num_nonz) + '.log'
if torch.cuda.is_available():
logger_file = "/content/gdrive/My Drive/" + logger_file # or torch.save(net, PATH)
else:
logger_file = "./" + logger_file
logger = open(logger_file, 'w')
# for k,v in pairs(opt) do logger:write(k .. ' ' .. v ..'\n') end
# logger:write('network have ' .. paras:size(1) .. ' parameters' .. '\n')
# logger:close()
# torch.manual_seed(10)
# if torch.cuda.is_available():
# mat_A = torch.load("/content/gdrive/My Drive/mat_A.pt").to(device)
# else:
# mat_A = torch.load("./mat_A.pt").to(device)
def gen_mimo_samples(SNR_dB, M, N, K, NOISE, H):
DB = 10. ** (0.1 * SNR_dB)
# N = 8 # the number of receivers
# M = 1 # the number of transmitters
# K = 1 # the number of targets
# np.random.seed(15)
# Position of receivers
x_r = np.array([1000, 2000, 2500, 2500, 2000, 1000, 500, 500])#*np.random.rand(1)# + 500 * (np.random.rand(N) - 0.5)) # \
# 1500,3000,500,2500,1000,1500,500,3000,\
# 2500,3500,1000,3500,2000,4000,3000,3000]+500*(np.random.rand(N)-0.5))
y_r = np.array([500, 500, 1000, 2000, 2500, 2500, 2000, 1500])#*np.random.rand(1)# + 500 * (np.random.rand(N) - 0.5)) # \
# 3500,3500,500,4000,4000,2500,3000,500,\
# 3500,3000,2000,1000,2000,500,4000,1500]+500*(np.random.rand(N)-0.5))
# Position of transmitters
x_t = np.array([0, 4000, 4000, 0, 1500, 0, 4000, 2000])#+500*np.random.rand(1)
y_t = np.array([0, 0, 4000, 4000, 4000, 1500, 1500, 0])#+500*np.random.rand(1)
# NOISE = 1 # on/off noise
# H = 1 # on/off êîýôôèöèåíòû îòðàæåíèÿ
rk = np.zeros([K, M, N]);
tk = np.zeros([K, M, N]);
tau = np.zeros([K, M, N]);
if H == 0:
h = np.ones([K, M, N])
else:
h = (np.random.randn(K, M, N) + 1j * np.random.randn(K, M, N)) / np.sqrt(2)
s = np.zeros([M, L]) + 1j * np.zeros([M, L])
for m in range(M):
s[m] = np.exp(1j * 2 * np.pi * (m) * np.arange(L) / M) / np.sqrt(L);#np.sqrt(0.5)*(np.random.randn(1,L)+1j*np.random.randn(1,L))/np.sqrt(L);#
#
# Ls = 875
# Le = Ls + 125 * 6
# dx = 125
Ls = 0
Le = Ls + 4000
dx = 333
dy = dx
dy = dx
x_grid = np.arange(Ls, Le, dx)
y_grid = np.arange(Ls, Le, dy)
size_grid_x = len(x_grid)
size_grid_y = len(y_grid)
grid_all_points = [[i, j] for i in x_grid for j in y_grid]
grid_all_points_a = np.array(grid_all_points)
r = np.zeros(size_grid_x * size_grid_y * M * N)
k_random_grid_points = np.array([])
# Position of targets
x_k = np.zeros([K])
y_k = np.zeros([K])
for kk in range(K):
x_k[kk] = np.random.randint(Ls,Le)+np.random.rand(1)
y_k[kk] = np.random.randint(Ls,Le)+np.random.rand(1)
k_random_grid_points_i = np.array([])
k_random_grid_points = np.array([])
for k in range(K):
calc_dist = np.sqrt((grid_all_points_a[range(size_grid_x * size_grid_y), 0] - x_k[k]) ** 2 \
+ (grid_all_points_a[range(size_grid_x * size_grid_y), 1] - y_k[k]) ** 2)
# grid_all_points_a[calc_dist.argmin()]
k_random_grid_points_i = np.append(k_random_grid_points_i, calc_dist.argmin())
# Time delays
for k in range(K):
for m in range(M):
for n in range(N):
tk[k, m, n] = np.sqrt((x_k[k] - x_t[m]) ** 2 + (y_k[k] - y_t[m]) ** 2)
rk[k, m, n] = np.sqrt((x_k[k] - x_r[n]) ** 2 + (y_k[k] - y_r[n]) ** 2)
tau[k, m, n] = (tk[k, m, n] + rk[k, m, n]) / c
r_glob = np.zeros([size_grid_x * size_grid_y * M * N]) + 1j * np.zeros([size_grid_x * size_grid_y * M * N])
for m in range(M):
for n in range(N):
for k in range(K):
r_glob[k_random_grid_points_i[k].astype(int)] = DB[k] * h[k, m, n] * \
np.sqrt(200000000000) * (1 / tk[k, m, n]) * (1 / rk[k, m, n])
k_random_grid_points = np.append(k_random_grid_points,k_random_grid_points_i)
k_random_grid_points_i = k_random_grid_points_i + size_grid_x * size_grid_y
# for m in range(M):
# for n in range(N):
# k_random_grid_points = np.append(k_random_grid_points,k_random_grid_points[-1] + size_grid_x * size_grid_y)
r[k_random_grid_points.astype(int)] = 1
if NOISE == 0:
x = np.zeros([N, T]) + 1j * np.zeros([N, T])
else:
x = (np.random.randn(N, T) + 1j * np.random.randn(N, T)) / np.sqrt(2)
for k in range(K):
for m in range(M):
for n in range(N):
l = np.floor(tau[k, m, n] / dt)
l = l.astype(int)
x[n, range(l, l + L)] = x[n, range(l, l + L)] + DB[k] * s[m, :] * h[k, m, n] * \
np.sqrt(200000000000) * (1 / tk[k, m, n]) * (1 / rk[k, m, n])
x_flat = x[0, :].transpose();
for n in range(1, N):
x_flat = np.concatenate([x_flat, x[n, :].transpose()], axis=0)
return x_flat, r, r_glob, k_random_grid_points
def gen_batch(batch_size, num_nonz, N, M, K, NOISE, H):
# NOISE = 1
# H = 1
SNR_dB = np.random.rand(3)
y, rr, rr_glob, label = gen_mimo_samples(SNR_dB, M, N, K, NOISE, H)
batch_data = torch.zeros(batch_size, 2*y.shape[0])
# batch_label = torch.zeros(batch_size, 2*label.shape[0]).to(device)
batch_label = torch.zeros(batch_size, label[range(num_nonz)].shape[0])
r1 = 40
r2 = 20
for i in range(batch_size):
# SNR_dB = ((r1 - r2) * torch.rand((1,)) + r2).item()
for k in range(K):
SNR_dB[k] = 20 # ((r1 - r2) * np.random.rand(1) + r2)
y, rr, rr_glob, label = gen_mimo_samples(SNR_dB, M, N, K, NOISE, H)
batch_data[i] = torch.cat([torch.from_numpy(y.real),torch.from_numpy(y.imag)])
# batch_data[i] = torch.cat([torch.from_numpy(np.abs(y))]).to(device)
# batch_label[i] = torch.cat([torch.from_numpy(label),torch.from_numpy(label+M*N*36)]).to(device)
batch_label[i] = torch.cat([torch.from_numpy(label[range(num_nonz)])])
return batch_label.type(torch.LongTensor).to(device), batch_data.to(device)
print("building validation set")
for i in range(0, valid_size, batch_size):
# mat_A = torch.rand(output_size, input_size).to(device)
batch_label, batch_data = gen_batch(batch_size, num_nonz, N, M, K, 1, 1)
# print(batch_label.shape)
# print("batch_data shape = " + str(batch_data.shape))
# print("valid_data shape = " + str(valid_data.shape))
# print(range(i,i+batch_size-1))
valid_data[range(i, i + batch_size), :] = batch_data
valid_label[range(i, i + batch_size), :] = batch_label
print('done')
best_valid_accs = 0
base_epoch = lr_decay_startpoint
base_lr = lr
optimState = {'learningRate': 0.001, 'weigthDecay': 0.0001}
net = GetGRUNet(num_unroll, num_layers, rnn_size, output_size, input_size)
# print(net)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
# summary(net,[(num_layers,input_size),(num_layers,rnn_size * num_layers * 2)])
# summary(net,[(batch_size, input_size),(batch_size, num_layers * rnn_size * 2)])
# create a stochastic gradient descent optimizer
# optimizer = optim.RMSprop(params=net.parameters(), lr=0.001, alpha=0.9, eps=1e-04, weight_decay=0.0001, momentum=0, centered=False)
# create a loss function
LOSS = MultiClassNLLCriterion()
optimizer = optim.RMSprop(params=net.parameters(), lr=optimState['learningRate'], \
alpha=0.9, eps=1e-05, weight_decay=optimState['weigthDecay'], momentum=0.0, centered=False)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3,6,9,12,15], gamma=0.25)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=4,
# verbose=True, threshold=0.0001, threshold_mode='rel', \
# cooldown=0, min_lr=0, eps=1e-08)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.5)
if torch.cuda.is_available():
checkpoint = torch.load("/content/gdrive/My Drive/" + model_all + "_" + str(num_nonz) + ".pth") # or torch.save(net, PATH)
else:
checkpoint = torch.load("./" + model_all + "_" + str(num_nonz) + ".pth") # or torch.save(net, PATH)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
epoch = checkpoint['epoch'] + 1
loss = checkpoint['loss']
# epoch = 0
print(net)
print(device)
# mat_A = torch.rand(output_size, input_size).to(device)
for epoch in range(epoch, num_epochs):
for param_group in optimizer.param_groups:
print(param_group['lr'])
# learing rate self - adjustment
# if(epoch > 250):
# optimState['learningRate'] = base_lr / (1 + 0.06 * (epoch - base_epoch))
# if(epoch % 50 == 0): base_epoch = epoch; base_lr= base_lr * 0.25
logger = open(logger_file, 'a')
# train
train_accs = 0
train_accl = 0
train_accm = 0
train_err = 0
nbatch = 0
net.train()
start = time.time()
for i in range(0, train_size, batch_size):
batch_label, batch_data = gen_batch(batch_size, num_nonz, N, M, K, 1, 1)
batch_label.to(device)
optimizer.zero_grad()
pred_prob = net(batch_data, batch_zero_states).to(device) # 0 or 1?!
err = LOSS(pred_prob, batch_label.to(device))
err.backward()
with torch.no_grad():
for name, param in net.named_parameters():
# print(name)
# print(param.grad.data)
param.grad.clamp_(-4.0, 4.0)
gnorm = param.grad.norm()
if (gnorm > max_grad_norm):
param.grad.mul_(max_grad_norm / gnorm)
optimizer.step()
# print(pred_prob.get_device())
# print(batch_label.get_device())
batch_accs = AccS(batch_label[:, range(0, num_nonz)], pred_prob.to(device).float())
batch_accl = AccL(batch_label[:, range(0, num_nonz)], pred_prob.to(device).float())
batch_accm = AccM(batch_label[:, range(0, num_nonz)], pred_prob.to(device).float())
train_accs = train_accs + batch_accs.item()
train_accl = train_accl + batch_accl.item()
train_accm = train_accm + batch_accm
train_err = train_err + err.item()
nbatch = nbatch + 1
if (nbatch) % 255 == 1:
print("Epoch " + str(epoch) + " Batch " + str(nbatch) + " {:.4} {:.4} {:.4} loss = {:.4}".format(batch_accs,
batch_accl,
batch_accm,
err.item()))
# for param_group in optimizer.param_groups:
# print(param_group['lr'])
end = time.time()
print("Train [{}] Time {} s-acc {:.4} l-acc {:.4} m-acc {:.4} err {:.4}".format(epoch, end - start, \
train_accs / nbatch,
train_accl / nbatch, \
train_accm / nbatch,
train_err / nbatch))
logger.write("Train [{}] Time {:.4} s-acc {:.4} l-acc {:.4} m-acc {:.4} err {:.4}\n".format(epoch, end - start, \
train_accs / nbatch,
train_accl / nbatch, \
train_accm / nbatch,
train_err / nbatch))
# eval
nbatch = 0
valid_accs = 0
valid_accl = 0
valid_accm = 0
valid_err = 0
start = time.time()
net.eval()
for i in range(0, valid_size, batch_size):
batch_data = valid_data[range(i, i + batch_size), :]
batch_label[:, range(0, num_nonz)] = valid_label[range(i, i + batch_size), :]
pred_prob = net(batch_data, batch_zero_states)
err = LOSS(pred_prob, batch_label)
batch_accs = AccS(batch_label[:, range(0, num_nonz)], pred_prob.float())
batch_accl = AccL(batch_label[:, range(0, num_nonz)], pred_prob.float())
batch_accm = AccM(batch_label[:, range(0, num_nonz)], pred_prob.float())
valid_accs = valid_accs + batch_accs.item()
valid_accl = valid_accl + batch_accl.item()
valid_accm = valid_accm + batch_accm
valid_err = valid_err + err.item()
nbatch = nbatch + 1
# scheduler.step(valid_err / nbatch)
scheduler.step()
# if (nbatch+99) % 100 == 0:
# print("Eval Epoch " + str(epoch) + " Batch " + str(nbatch) + " {:.4} {:.4} {:.4} loss = {:.4}".format(batch_accs, batch_accl,
# batch_accm, err.item()))
end = time.time()
print("Valid [{}] Time {} s-acc {:.4} l-acc {:.4} m-acc {:.4} err {:.4}".format(epoch, end - start, \
valid_accs / nbatch,
valid_accl / nbatch, \
valid_accm / nbatch,
valid_err / nbatch))
logger.write("Valid [{}] Time {} s-acc {:.4} l-acc {:.4} m-acc {:.4} err {:.4}\n".format(epoch, end - start, \
valid_accs / nbatch,
valid_accl / nbatch, \
train_accm / nbatch,
valid_err / nbatch))
# if(valid_accs > best_valid_accs):
# best_valid_accs = valid_accs
# print("saving model")
# logger.write('saving model\n')
# checkpoint = {'epoch': epoch,
# 'model_state_dict': net.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': err.item()}
# # torch.save(checkpoint, 'checkpoint.pth')
# torch.save(checkpoint, "./checkpoints/"+model_all+"_"+str(num_nonz)+".pth") #or torch.save(net, PATH)
# #net.load_state_dict(torch.load(PATH)) # or the_model = torch.load(PATH)
# if(epoch % 2 == 0):
print("saving model")
logger.write('saving model\n')
checkpoint = {'epoch': epoch, \
'model_state_dict': net.state_dict(), \
'optimizer_state_dict': optimizer.state_dict(), \
'scheduler_state_dict': scheduler.state_dict(), \
'loss': err.item()}
if torch.cuda.is_available():
torch.save(checkpoint, "/content/gdrive/My Drive/" + model_all + "_" + str(num_nonz) + ".pth") # or torch.save(net, PATH)
else:
torch.save(checkpoint, "./" + model_all + "_" + str(num_nonz) + ".pth") # or torch.save(net, PATH)
logger.close()
| null |
model_gru_mimo_0.py
|
model_gru_mimo_0.py
|
py
| 28,077 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "google.colab.drive.mount",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "google.colab.drive",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.sigmoid",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.stack",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm1d",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.RMSprop",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 631,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 634,
"usage_type": "call"
}
] |
14336982
|
import argparse
def printParsers(subparser_actions):
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("Subparser '{}'".format(choice))
print(subparser.format_help())
# create the top-level parser
parser = argparse.ArgumentParser(prog='PARSING PROGRAM')
parser.add_argument('--foo', action='store_true', help='foo help')
subparsers = parser.add_subparsers(help='sub-command help')
# create the parser for the "A" command
parser_a = subparsers.add_parser('A', help='A help')
parser_a.add_argument('--bar', type=int, help='bar help')
# create the parser for the "B" command
parser_b = subparsers.add_parser('B', help='B help')
parser_b.add_argument('--baz', '-b', choices='XYZ', help='baz help') #choices='XYZ'
# print main help
# create the parser for the command "KEY"
parser_key = subparsers.add_parser('KEY', help='KEY help')
parser_key.add_argument('-k', dest='key', help='Insert key(s): [YAML format]')
print(parser.format_help())
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
#printParsers(subparsers_actions)
#________________________________
print('OLOLO')
options = parser.parse_args()
#if options.bar:
# print('[From parser A]', 'bar = ', options.bar)
if options.baz:
print('[From parser B]', 'baz = ', options.baz)
| null |
FF-Tool/src_core/parser.py
|
parser.py
|
py
| 1,551 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse._SubParsersAction",
"line_number": 35,
"usage_type": "attribute"
}
] |
493718179
|
import random
import wikilocation
from wikipedia import wikipedia
def get_articles(lat, lon):
"""
:type lat: str
:type lon: str
:return: list of dicts representing articles
"""
# Use really large radius, in case very far away from somewhere.
# Results are sorted by distance and limited so that works fine.
radius = 20000 # Upper limit
landmark_articles = wikilocation.articles(lat, lon, radius, 10, "landmark")
# event_articles = wikilocation.articles(lat, lon, radius, 5, "event")
if len(landmark_articles) == 0:
OLD_STREET_ROUNDABOUT = ("51.525603","-0.087558")
lat, lon = OLD_STREET_ROUNDABOUT
landmark_articles = wikilocation.articles(lat, lon, radius, 10, "landmark")
# wikilocation_articles = event_articles + landmark_articles
# wikilocation_articles = random.sample(wikilocation_articles, 5)
# wikilocation_articles = _interleave(landmark_articles, event_articles)
wikilocation_articles = landmark_articles
wikilocation_articles = _remove_lists(wikilocation_articles)
articles = []
for wikilocation_article in wikilocation_articles:
article = {}
title = wikilocation_article["title"]
article["title"] = title
# first_sentence = wikipedia.summary(title, sentences=1)
page = wikipedia.page(title)
# article["first_sentence"] = first_sentence
article["summary"] = page.summary
article["image"] = "http://upload.wikimedia.org/wikipedia/commons/3/3c/Stonehenge2007_07_30.jpg"
article["url"] = page.url
articles.append(article)
return articles
# def _interleave(l1, l2):
# return [val for pair in zip(l1, l2) for val in pair]
def _remove_lists(articles):
def not_list(article):
return "list" not in article["title"].lower()
return filter(not_list, articles)
| null |
model.py
|
model.py
|
py
| 1,875 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wikilocation.articles",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wikilocation.articles",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "wikipedia.wikipedia.page",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "wikipedia.wikipedia",
"line_number": 39,
"usage_type": "name"
}
] |
189195499
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from rdmo.core.validators import UniqueURIValidator
class OptionSetUniqueURIValidator(UniqueURIValidator):
app_label = 'options'
model_name = 'optionset'
def get_uri(self, model, data):
uri = model.build_uri(data.get('uri_prefix'), data.get('key'))
return uri
class OptionUniqueURIValidator(UniqueURIValidator):
app_label = 'options'
model_name = 'option'
def get_uri(self, model, data):
if 'key' not in data:
raise ValidationError({
'key': _('This field is required')
})
elif 'optionset' not in data:
raise ValidationError({
'optionset': _('This field is required')
})
else:
path = model.build_path(data.get('key'), data.get('optionset'))
uri = model.build_uri(data.get('uri_prefix'), path)
return uri
| null |
rdmo/options/validators.py
|
validators.py
|
py
| 999 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rdmo.core.validators.UniqueURIValidator",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "rdmo.core.validators.UniqueURIValidator",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 29,
"usage_type": "call"
}
] |
337938218
|
#necessary imports
import pandas_datareader as pdr
from pandas_datareader.data import DataReader as dr
import datetime as dt
from datetime import date
from yahoo_fin import stock_info as si
from yahoo_fin import options
import pandas as pd
import yfinance as yf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import DeltaPI
from DeltaPI.AutoDiff import AutoDiff
from DeltaPI.AutoDiff.AutoDiff import *
#first we define Black Scholes function to calculate delta and vega pull uptodate financial data
def BS_Delta(ticker, exp_year, exp_month, exp_day, op_type, strike):
""" Obtains uptodate financial data for stock price, option price, volatility, risk free rate
and calculates delta and vega from Black Scholes equation. Returns delta, vega, time to expiration,
most uptodate share price and corresponding option price.
INPUTS:
======
ticker: accepts both upper and lower case (string)
exp_year: year of expiration (int)
exp_month: month of expiration (int)
exp_day: day of expiration (int)
opt_type: 'puts' for put and 'calls' for call(string)
strike: price at which the opion can be excercised (int)
OUTPUT:
======
delta_bs: delta calculated using Black Scholes equation (partial derivative w.r.t. share price)
vega: vega calculated using Black Scholes euqation (partial derivative w.r.t. share price vol)
T_t: time to option expiration as fraction of the year
S: share price
C: option price
NOTES:
======
This fuction pulls most upto date financial data form yahoo finance. The option values are pulled
pulled during the market hours (9:30am-4pm, mon-fri). When the market is closed, the function
will ask the user to input the option price. Also, just like Black Sholes equation, this function
relies on static volatlity measured as historical s.d. of the share price over the past year (i.e. we
pull in a year share prices and calculate standard deviation of that for sigma).
This function also requires the following pacakges for data retrieval in addition to pandas, numpy amd
datetime:
import sys
!{sys.executable} -m pip install yahoo_fin
!{sys.executable} -m pip install requests_html
!{sys.executable} -m pip install yfinance
EXAMPLE:
=======
We are not writing a formal doc test here because the outputs are not static. This illustration should
help the user understand how the pakage work.
Let's suppose you wanted to run a GE call expriing on Jan 17, 2020, with strike 12.
ticker='GE'
exp_year=2020
exp_month=1
exp_day=17
exp_date=str(str(exp_month)+'/'+str(exp_day)+'/'+str(exp_year))
op_type='calls'
strike=12
delta_bs, vega, T_t, S, C = BS_Delta(ticker, exp_year, exp_month, exp_day, op_type, strike)
0.2803917681986156 0.009142607098052206 0.10684931506849316 11.100000381469727 0.4027907848942922
!!since the function is pulling live data for this doctest the output will keep changing!!
"""
#convert to exp_date format
exp_date=str(str(exp_month)+'/'+str(exp_day)+'/'+str(exp_year))
#get current risk free rate
#we are using 10tyear treasury which is industry standard
tbills = ['DGS10']
yc = dr(tbills, 'fred')
risk_free=np.array(yc)[-1][0]/100
#get current date
today=date.today()
#get stock price
stock = yf.Ticker(ticker)
S=stock.history(period='min')['Open'][0]
#get price of option closest to the same date/strike as our option
try:
table=options.get_options_chain(ticker,exp_date )
strikes=table[op_type]['Strike']
closest_strike=min(strikes, key=lambda x: abs(x-strike))
index=table[op_type][table[op_type]['Strike'].isin([closest_strike])==True].index[0]
C=np.mean(table[op_type][['Bid','Ask']].iloc[index])
except:
print('Could not find live option price, will calculate implied price instead')
C=0 #place holder value, we will calculate black scholes implied value below
#print('could not find live price for this option type. please enter best available estimate:')
#C=float(input(""))
def vol_est(today, ticker):
#get volatility of underlying assets
end = today
start=dt.datetime.now() - dt.timedelta(days=365)
prices=list(pdr.get_data_yahoo(ticker, start, end)['Adj Close'])
returns=[]
i=0
while i < (len(prices)-1):
r=np.log(prices[i+1]/prices[i])
returns.append(r)
i=i+1
vol=np.std(returns)*(250**(1/2))
return vol
#setting up all the inputs for the Black Scholes formula
vol=vol_est(today, ticker)
T=dt.date(exp_year, exp_month, exp_day,)
t=today
r=risk_free
T_t=(T-t).days/365
#first we will calculate d1 and d2
d1=(np.log(S/strike)+(r+vol**2/2)*(T_t))/(vol*((T_t)**(1/2)))
d2=d1-vol*(T_t)**(1/2)
if C==0:
if op_type == 'calls':
C=S*(1/(2*np.pi)**(1/2))*np.e**(-d1)*d1 - strike*np.e**(-r*(T_t))*(1/(2*np.pi)**(1/2))*np.e**(-d1)*d1
else:
if op_type =='puts':
C=strike*np.e**(-r*T_t)*(1/(2*np.pi)**(1/2))*np.e**(d2)*(-d2)-S*(1/(2*np.pi)**(1/2))*np.e**(d1)*(-d1)
#now we will calculate delta depending on option type
if op_type=='calls':
delta_bs=1/((2*np.pi)**(1/2))*np.e**(-d1**2/2)
else:
delta_bs=1/((2*np.pi)**(1/2))*np.e**(-d1**2/2)-1
#calculate vega (same formula for puts and calls)
vega=S/100*np.e**(-T_t)*(T_t**(1/2))*delta_bs
return delta_bs, vega, T_t, S, C
#now we build a volatility surface plot function
def Volatility_Surface(ticker, exp_year, exp_month, exp_day, op_type, strike, price):
""" This function calculates and plots 2 volatility 3D surface plots of stock volatlity as calculated
by Bharadia and Corrado approximations vs. time and share price of the underlying asset.
INPUTS:
======
ticker: accepts both upper and lower case (string)
exp_year: year of expiration (int)
exp_month: month of expiration (int)
exp_day: day of expiration (int)
opt_type: 'puts' for put and 'calls' for call(string)
strike: price at which the opion can be excercised (int)
price: option price (float)
OUTPUT:
======
3Dplot Bharadia: surface plot for estimated stock price volatilty over time to expiration and share price space
3Dplot Corrado: surface plot for estimate of stock volatility over time time to expiration and share price space.
NOTES:
======
This function pulls live stock price data and requires the following pacakges to run properly:
pandas
numpy
mpl_toolkits
matplotlib.pyplot
datetime
yahoo_fin
requests_html
yfinance
"""
#current data
today=date.today()
T=dt.date(exp_year, exp_month, exp_day,)
#cacl time to expiration
t=today
T_t=(T-t).days/365
#calculate volatility
def vol_est(today, ticker):
#get volatility of underlying assets
end = today
start=dt.datetime.now() - dt.timedelta(days=365)
prices=list(pdr.get_data_yahoo(ticker, start, end)['Adj Close'])
sd=np.std(prices)
return sd
vol=vol_est(today, ticker)
stock = yf.Ticker(ticker)
S=stock.history(period='min')['Open'][0]
#create a range of stock prices based on a + - 2 standard dev band (95% CI)
S_low=int(S-2*vol)
S_high=int(S+2*vol)
S_range=list(range(S_low, S_high))
#create an entire range of days upto the expriation date
days_range=list(range(1,int((T-t).days)))
C=price
vol_s=[]
vol_c=[]
#get range of volatlities for both methods
for i, t in enumerate(days_range):
vol_t=[]
vol_tc=[]
for i, s in enumerate(S_range):
vol_simple=(2*np.pi/(t/365))**(1/2)*((C-(s-strike))/2)/(s-(s-strike)/2)
vol_t.append(vol_simple)
vol_complex=((2*np.pi/(t/365))**2)**(1/2)*1/(s+strike)*(((C-(s-strike)/2+((C-(s-strike)/2))**2-(s-strike)**2/np.pi)**2)**(1/2))**(1/2)
vol_tc.append(vol_complex)
vol_s.append(vol_t)
vol_c.append(vol_tc)
data = np.array(vol_s)
#prepare the data for plotting
#plot Bharadia approximation
x, y = np.meshgrid(S_range,days_range)
fig = plt.figure(figsize=(12,10))
ax = plt.axes(projection='3d')
ax.plot_surface(x, y, data, rstride=1, cstride=1,cmap='viridis', edgecolor='none')
ax.set_title('Volatility Surface Simple (Bharadia et al. )')
ax.set_xlabel('stock price')
ax.set_ylabel('days to expiration')
ax.set_zlabel('volatility')
ax.view_init(30, 35)
plt.show(fig)
data = np.array(vol_c)
#plot Corrado approximation
x, y = np.meshgrid(S_range,days_range)
fig = plt.figure(figsize=(12,10))
ax = plt.axes(projection='3d')
ax.plot_surface(x, y, data, rstride=1, cstride=1,cmap='viridis', edgecolor='none')
ax.set_title('Volatility Surface Complex (Corrado et al. )')
ax.set_xlabel('stock price')
ax.set_ylabel('days to expiration')
ax.set_zlabel('volatility')
ax.view_init(30, 35)
plt.show(fig)
def OptionsRun():
""" This an interactive enclosing functionn for two nested functions that prompts the user
to enter the details about their option position and returns 3 deltas calculated with Black
Scholes formula, Bharadia and Corrado estimation. The closure also calculates the amounnt of
stock that the user needs to buy or sell short to hedge their position and plots 2 volatlity surfaces.
INPUTS: (user prompted to enter these in the following order)
======
option type: the user is prompted to enter option type or exit
ticker: accepts both upper and lower case (string), checks if its valid
strike: price at which the opion can be excercised (int)
exp_year: year of expiration (int), checks if its valid
exp_month: month of expiration (int), checks if its valid
exp_day: day of expiration (int), checks if its valid
position: number of long/short puts or calls for this stock that are on the books
OUTPUT:
======
delta Black Sholes: deploying BS_Delta function above, Black Scholes delta is returned
delta Bharadia: deploying AutoDiff package partial derrivatives of implied vol are calculated
and delta estimate is returned
delta Corrado: deploying AutoDiff package partial derrivatives of implied vol are calculated
and delta estimate is returned
hedging indstructions: whether the trader should buy or sell short and how much of the underlying asset.
two plots: volatlity surface plots for Bharadia and Corrado methods respectively
NOTES:
======
This package relies on yahoo_fin and yfinance packages for live stock prices and option price data as well as stock's
historical standard deviation calculated from the data pulled for the share price from the previous year.
If there are any issues with the yahoo_fin and/or yfinance pacakges being able to pull this data due to yahoo specific
glitches, this extension package will not run. In the 'real world' application, this package would be
linked to a more realiable platform like Bloomberg, which is what most of the traders use but is very expensive.
Yahoo finance is free but slow and not always reliable.
"""
def ObtainInputs():
#nested function to collect data from the user
print("Please Select Type of Option to Evaluate")
print("1) Exit")
print("2) Puts")
print("3) Calls")
try:
option=int(input(""))
except:
print("Please enter number from the options above: ")
option=int(input(""))
#collect option type variable
while option not in [1,2,3]:
option=int(input("Please enter number from the options above: "))
if option==3:
op_type='calls'
else:
if option==2:
op_type='puts'
else:
print('Thank you. Goodbye!')
return
#collect ticker variable
print("Please Enter Ticker")
try:
value = str(input(''))
stock = yf.Ticker(value)#checking if the ticker is valid
S=stock.history(period='min')['Open'][0]
#si.get_live_price(value) #use this if yfinance is not working
except ValueError:
print ("Sorry, {} is not a valid ticker, try again".format(value))
ObtainInputs()
ticker=value
#collect strike value
print("Please Enter Strike")
try:
option=int(input(""))
except: ValueError ("Please enter integer value ")
strike=option
def Date():
#nested function to organize and validate data values
today=date.today()
print("Please Enter Expiration Year")
try:
option=int(input(""))
if option<today.year or option > (today.year+2):
print("Please enter a valid listed year")
option=int(input(""))
except: ValueError ("Please enter an integer value ")
exp_year=option
print("Please Enter Expiration Month")
try:
value=int(input(""))
except: ValueError ("Please enter valid date ")
if value>12 or value<1:
print("Invalid input for Month. Please try again")
value=int(input(""))
if exp_year==today.year and value<today.month:
print("Invalid input for Month. Please try again")
value=int(input(""))
exp_month=value
print("Please Enter Expiration Day")
try:
option=int(input(""))
exp_day=option
except: ValueError ("Please enter valid date ")
if exp_month in [1,3,5,7,8,10,12]:
if option>31 or option <1:
print("Invalid input for day. Please try again")
option=int(input(""))
else:
if exp_month==2 and exp_year==2020:
if option>29 or option <1:
print("Invalid input for day. Please try again")
option=int(input(""))
else:
if exp_month in [4,6,9,11]:
if option>30 or option<1:
print("Invalid input for day. Please try again")
option=int(input(""))
exp_dat=option
exp_date=str(str(exp_month)+'/'+str(exp_day)+'/'+str(exp_year))
return exp_year, exp_month, exp_day, exp_date
#obtain the valid dates in the right format
exp_year, exp_month, exp_day, exp_date = Date()
#calculate black scholes delta
delta_bs, vega, T_t, S, C = BS_Delta(ticker, exp_year, exp_month, exp_day, op_type, strike)
#print output
print("Black Scholes Delta: ", delta_bs)
K = AutoDiff(strike, 1) #####################--AUTODIFF USED HERE
simple_implied = np.sqrt(2*np.pi/T_t) * ( ( C -(S - K)/2 ) / ( S - (S - K)/2 ) )
deltaPi_simple_implied= simple_implied.derv
complex_implied = np.sqrt(2*np.pi/T_t) * (1/(S + K)) * ( C - ((S - K)/2)\
+ np.sqrt( (C - (S-K)/2)**2 - (S -K)**2/np.pi ))
deltaPi_complex_implied = complex_implied.derv
delta_simple=delta_bs+vega*deltaPi_simple_implied
delta_complex=delta_bs+vega*deltaPi_complex_implied
#print deltas after calculating them
if delta_complex is None:
delta_complex=delta_simple
print('Could not approximate Corrado due to complex numbers ')
print("Bharadia delta: ", delta_simple)
print("Corrado delta: ", delta_complex)
#check if the user would like to get delta hedging advice
print("Would You Like to Delta Hedge Your Position: y/n?")
try:
option=str(input(""))
except: raise ValueError("please entery 'y' or 'n'")
if option=='y':
print("How many units? Enter + values for long and - values for short")
try:
option=int(input(""))
except: raise ValueError('please enter an integer value')
units=option
if option >0 and op_type=='puts':
action = 'buy'
elif option>0 and op_type=='calls':
action = 'sell short'
elif option <0 and op_type=='calls':
action ='buy'
elif option <0 and op_type=='puts':
action = 'sell short'
recomend=abs(int(units*delta_bs))
recomend1=abs(int(units*delta_simple))
recomend2=abs(int(units*delta_complex))
print("According to Black Scholes you need to ", action, "",abs(int(recomend)), " shares of ",ticker )
print("According to Bharadia apporach you need to ", action, "",abs(int(recomend1)), " shares of ", ticker)
print("Accoding to Corrado approach you need to ", action, "", abs(int(recomend2)), " shares of ", ticker)
#plot 3D vol plots
Volatility_Surface(ticker, exp_year, exp_month, exp_day, op_type, strike,C)
return
else:
#plot 3D vol polots
Volatility_Surface(ticker, exp_year, exp_month, exp_day, op_type, strike, C)
return
return
ObtainInputs()
OptionsRun()
| null |
build/lib/DeltaPI/Extension/DeltaHedgePi.py
|
DeltaHedgePi.py
|
py
| 19,253 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas_datareader.data.DataReader",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "yfinance.Ticker",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "yahoo_fin.options.get_options_chain",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "yahoo_fin.options",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.get_data_yahoo",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.get_data_yahoo",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "yfinance.Ticker",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "yfinance.Ticker",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "DeltaPI.AutoDiff.AutoDiff",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 398,
"usage_type": "attribute"
}
] |
491245277
|
import os.path
import sqlite3
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import pandas as pd
class Template:
"""PCR template.
Parameters
----------
seqrecord : Bio.SeqRecord
Mostly used with element in Bio.SeqIO.parse iterator.
Attributes
----------
seq : Bio.Seq
Unambiguous DNA/RNA sequence. Gap supported.
description : str
FASTA header line.
id : str
Unique sequence identifier.
taxonomy : str
Semicolon-separated, hierarchical taxonomic classification.
"""
def __init__(self, seqrecord):
if not isinstance(seqrecord, SeqRecord):
raise TypeError
self._seqrecord = seqrecord
self.seq = self._seqrecord.seq
self.description = self._seqrecord.description
self.id = self._seqrecord.id
self.taxonomy = self.description.split(' ', 1)[1]
class Primer(Seq):
"""PCR primer.
A copy of Bio.Seq class.
"""
pass
class PrimerPool:
"""PCR primer pool.
A collection of DNA primers for one strand. Must share the same
starting and ending coordinates.
Parameters
----------
name : str
Primer pool name.
start : int
1-based genomic coordinate indicating where the primer starts.
end : int
1-based genomic coordinate indicating where the primer ends.
primers : list of Primer or list of Seq, or None, default None
Primers for one strand. Specified upon instance creation or by
instance method later.
"""
def __init__(self, name, start, end, primers):
if not isinstance(start, int):
raise TypeError
if not isinstance(end, int):
raise TypeError
if not isinstance(primers, list):
raise TypeError
if any(map(lambda x: not isinstance(x, Primer), primers)):
raise TypeError
self.name = name
self.start = start
self.end = end
self.primers = primers
class PCRMatch:
def __init__(self, id, taxonomy, fw_match, rv_match, is_amplified):
self.id = id
self.taxonomy = taxonomy
self.fw_match = fw_match
self.rv_match = rv_match
self.is_amplified = is_amplified
class PCR:
"""in silico PCR.
Simulate PCR on a template sequence with forward primer pool and reverse
primer pool following certain rules defining primer/template match.
Parameters
----------
template : Template
PCR template. Support gapped/ungapped DNA/RNA sequence.
fw_primer_pool : PrimerPool
Forward primer pool containing one or multiple primers.
rv_primer_pool : PrimerPool
Reverse primer pool containing one or multiple primers.
find_match : function
Function to determine if primer could bind to template. Only
return boolean value.
"""
def __init__(self, template, fw_primer_pool, rv_primer_pool, find_match):
if not isinstance(template, Template):
raise TypeError
if not isinstance(fw_primer_pool, PrimerPool):
raise TypeError
if not isinstance(rv_primer_pool, PrimerPool):
raise TypeError
self.template = template
self.fw_primer_pool = fw_primer_pool
self.rv_primer_pool = rv_primer_pool
self._find_match = find_match
def cut_forward_primer_from_template(self):
start = self.fw_primer_pool.start - 1
end = self.fw_primer_pool.end
return self.template.seq[start:end]
def cut_reverse_primer_from_template(self):
start = self.rv_primer_pool.start - 1
end = self.rv_primer_pool.end
return self.template.seq[start:end]
def run(self):
"""Run in silico PCR.
"""
GAP = '-' # gap placeholder
UNKNOWN = '.' # leading and trailing placeholder
# cut primer from template and back transcribe to DNA
fw_tmplt = self.cut_forward_primer_from_template().back_transcribe()
rv_tmplt = self.cut_reverse_primer_from_template().back_transcribe()
# ungap
fw_tmplt = fw_tmplt.ungap(UNKNOWN).ungap(GAP)
rv_tmplt = rv_tmplt.ungap(UNKNOWN).ungap(GAP)
# test match
for fw in self.fw_primer_pool.primers:
if self._find_match(str(fw), str(fw_tmplt)):
fw_match = True
break
fw_match = False
for rv in self.rv_primer_pool.primers:
if self._find_match(str(rv.reverse_complement()), str(rv_tmplt)):
rv_match = True
break
rv_match = False
is_amplified = fw_match and rv_match
return PCRMatch(self.template.id, self.template.taxonomy,
fw_match, rv_match, is_amplified)
class PCRArray:
def __init__(self, fasta_path, fw_path, rv_path):
self._fasta_path = fasta_path
self._fw_path = fw_path
self._rv_path = rv_path
@staticmethod
def parse_primer_pool(path):
"""Parse file containing primer pool.
Assuming primers stored in this format:
#515f 11895-13861
GTGCCAGCAGTCGCGGTAA
GTGCCAGCAGGAGCGGTAA
GTGCCACCAGCCGCGGTAA
GTGCCAGAAGTCTCGGTAA
GTGCCAGAAGCGTCGGTAA
GTGCCAGAAGCCTCGGTAA
"""
with open(path, 'r') as handle:
lines = [line.strip() for line in handle if line.strip()]
header = lines[0].lstrip('#').strip()
primers = map(Primer, lines[1:])
name, position = header.split()
start, end = map(int, position.split('-'))
return PrimerPool(name, start, end, primers)
@property
def fw_primer_pool(self):
return PCRArray.parse_primer_pool(self._fw_path)
@property
def rv_primer_pool(self):
return PCRArray.parse_primer_pool(self._rv_path)
def iter(self):
for seqrecord in SeqIO.parse(self._fasta_path, 'fasta'):
template = Template(seqrecord)
pcr = PCR(template, self.fw_primer_pool,
self.rv_primer_pool, simple_match)
yield pcr.run()
def to_df(self):
data = [pcrmatch.__dict__ for pcrmatch in self.iter()]
return pd.DataFrame(data)
def to_sql(self, filename, out_dir):
df = self.to_df()
with sqlite3.connect(os.path.join(out_dir, filename)) as conn:
df.to_sql('testprimer', conn, if_exists='replace',
index=False)
return
def simple_match(seq1, seq2):
return seq1.upper() == seq2.upper()
def pcr(fasta_path, fw_path, rv_path, filename, out_dir):
'''Main module entrance'''
pcrarray = PCRArray(fasta_path, fw_path, rv_path)
pcrarray.to_sql(filename, out_dir)
return
| null |
testprimer/pcr.py
|
pcr.py
|
py
| 6,860 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Bio.SeqRecord.SeqRecord",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "Bio.Seq.Seq",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 218,
"usage_type": "name"
}
] |
166017370
|
import importlib
from typing import List
from django.conf import settings
from django.db.models import prefetch_related_objects
from rest_access_policy import AccessPolicyException
from rest_framework import permissions
class AccessPolicy(permissions.BasePermission):
statements = []
id = None
group_prefix = "group:"
id_prefix = "id:"
def has_permission(self, request, view) -> bool:
action = self._get_invoked_action(view)
statements = self.get_policy_statements(request, view)
if len(statements) == 0:
return False
return self._evaluate_statements(statements, request, view, action)
def get_policy_statements(self, request, view) -> List[dict]:
return self.statements
def get_user_group_values(self, user) -> List[str]:
if user.is_anonymous:
return []
prefetch_related_objects([user], "groups")
return [g.name for g in user.groups.all()]
@classmethod
def scope_queryset(cls, request, qs):
return qs.none()
def _get_invoked_action(self, view) -> str:
"""
If a CBV, the name of the method. If a regular function view,
the name of the function.
"""
if hasattr(view, "action"):
return view.action
elif hasattr(view, "__class__"):
return view.__class__.__name__
raise AccessPolicyException("Could not determine action of request")
def _evaluate_statements(
self, statements: List[dict], request, view, action: str
) -> bool:
statements = self._normalize_statements(statements)
matched = self._get_statements_matching_principal(request, statements)
matched = self._get_statements_matching_action(request, action, matched)
matched = self._get_statements_matching_context_conditions(
request, view, action, matched
)
denied = [_ for _ in matched if _["effect"] != "allow"]
if len(matched) == 0 or len(denied) > 0:
return False
return True
def _normalize_statements(self, statements=[]) -> List[dict]:
for statement in statements:
if isinstance(statement["principal"], str):
statement["principal"] = [statement["principal"]]
if isinstance(statement["action"], str):
statement["action"] = [statement["action"]]
if "condition" not in statement:
statement["condition"] = []
elif isinstance(statement["condition"], str):
statement["condition"] = [statement["condition"]]
return statements
def _get_statements_matching_principal(
self, request, statements: List[dict]
) -> List[dict]:
user = request.user
user_roles = None
matched = []
for statement in statements:
principals = statement["principal"]
found = False
if "*" in principals:
found = True
elif "authenticated" in principals:
found = not user.is_anonymous
elif "anonymous" in principals:
found = user.is_anonymous
elif self.id_prefix + str(user.pk) in principals:
found = True
else:
if not user_roles:
user_roles = self.get_user_group_values(user)
for user_role in user_roles:
if self.group_prefix + user_role in principals:
found = True
break
if found:
matched.append(statement)
return matched
def _get_statements_matching_action(
self, request, action: str, statements: List[dict]
):
"""
Filter statements and return only those that match the specified
action.
"""
matched = []
SAFE_METHODS = ("GET", "HEAD", "OPTIONS")
http_method = "<method:%s>" % request.method.lower()
for statement in statements:
if action in statement["action"] or "*" in statement["action"]:
matched.append(statement)
elif http_method in statement["action"]:
matched.append(statement)
elif (
"<safe_methods>" in statement["action"]
and request.method in SAFE_METHODS
):
matched.append(statement)
return matched
def _get_statements_matching_context_conditions(
self, request, view, action: str, statements: List[dict]
):
"""
Filter statements and only return those that match all of their
custom context conditions; if no conditions are provided then
the statement should be returned.
"""
matched = []
for statement in statements:
if len(statement["condition"]) == 0:
matched.append(statement)
continue
fails = 0
for condition in statement["condition"]:
passed = self._check_condition(condition, request, view, action)
if not passed:
fails += 1
break
if fails == 0:
matched.append(statement)
return matched
def _check_condition(self, condition: str, request, view, action: str):
"""
Evaluate a custom context condition; if method does not exist on
the access policy class, then return False.
Condition value can contain a value that is passed to method, if
formatted as `<method_name>:<arg_value>`.
"""
parts = condition.split(":", 1)
method_name = parts[0]
arg = parts[1] if len(parts) == 2 else None
method = self._get_condition_method(method_name)
if arg is not None:
result = method(request, view, action, arg)
else:
result = method(request, view, action)
if type(result) is not bool:
raise AccessPolicyException(
"condition '%s' must return true/false, not %s"
% (condition, type(result))
)
return result
def _get_condition_method(self, method_name: str):
if hasattr(self, method_name):
return getattr(self, method_name)
if hasattr(settings, "DRF_ACCESS_POLICY"):
module_path = settings.DRF_ACCESS_POLICY.get("reusable_conditions")
if module_path:
module = importlib.import_module(module_path)
if hasattr(module, method_name):
return getattr(module, method_name)
raise AccessPolicyException(
"condition '%s' must be a method on the access policy or be defined in the 'reusable_conditions' module"
% method_name
)
| null |
rest_access_policy/access_policy.py
|
access_policy.py
|
py
| 6,942 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.prefetch_related_objects",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "rest_access_policy.AccessPolicyException",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "rest_access_policy.AccessPolicyException",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "django.conf.settings",
"line_number": 197,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings.DRF_ACCESS_POLICY.get",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DRF_ACCESS_POLICY",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "rest_access_policy.AccessPolicyException",
"line_number": 206,
"usage_type": "call"
}
] |
316333306
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import c, mu_0, epsilon_0
class Gaussian(object):
def __init__(self, dt, center_wv, spread, pick_pos, dtype):
self.dt = dt
self.dtype = dtype
self.wvlenc = center_wv
self.spread = spread
self.pick_pos = pick_pos
self.freqc = c / self.wvlenc
self.w0 = 2 * np.pi * self.freqc
self.ws = self.spread * self.w0
self.ts = 1./self.ws
self.tc = self.pick_pos * self.dt
def pulse_re(self,step,pick_pos):
pulse_re = np.exp((-.5) * (((step*self.dt-self.tc)*self.ws)**2)) * np.cos(self.w0*(step*self.dt-self.tc))
return pulse_re
def pulse_im(self,step,pick_pos):
pulse_im = np.exp((-.5) * (((step*self.dt-self.tc)*self.ws)**2)) * np.sin(self.w0*(step*self.dt-self.tc))
return pulse_im
def plot_pulse(self, tsteps, freqs, savedir):
nax = np.newaxis
time_domain = np.arange(tsteps, dtype=self.dtype)
t = time_domain * self.dt
self.freqs = freqs
self.wvlens = c / self.freqs[::-1]
pulse_re = np.exp((-.5) * (((t-self.tc)*self.ws)**2)) * np.cos(self.w0*(t-self.tc))
pulse_im = np.exp((-.5) * (((t-self.tc)*self.ws)**2)) * np.sin(self.w0*(t-self.tc))
pulse_re_ft = (self.dt * pulse_re[nax,:]* np.exp(1j*2*np.pi*self.freqs[:,nax]*t[nax,:])).sum(1) / np.sqrt(2*np.pi)
pulse_im_ft = (self.dt * pulse_im[nax,:]* np.exp(1j*2*np.pi*self.freqs[:,nax]*t[nax,:])).sum(1) / np.sqrt(2*np.pi)
pulse_re_ft_amp = abs(pulse_re_ft)**2
pulse_im_ft_amp = abs(pulse_im_ft)**2
fig = plt.figure(figsize=(21,7))
ax1 = fig.add_subplot(1,3,1)
ax2 = fig.add_subplot(1,3,2)
ax3 = fig.add_subplot(1,3,3)
ax1.plot(time_domain, pulse_re, color='b', label='real')
ax1.plot(time_domain, pulse_im, color='r', label='imag', linewidth='1.5', alpha=0.5)
ax2.plot(self.freqs/10**12, pulse_re_ft_amp, color='b', label='real')
ax2.plot(self.freqs/10**12, pulse_im_ft_amp, color='r', label='imag', linewidth='1.5', alpha=0.5)
ax3.plot(self.wvlens/1e-6, pulse_re_ft_amp, color='b', label='real')
ax3.plot(self.wvlens/1e-6, pulse_im_ft_amp, color='r', label='imag', linewidth='1.5', alpha=0.5)
ax1.set_xlabel('time step')
ax1.set_ylabel('Amp')
ax1.legend(loc='best')
ax1.grid(True)
#ax1.set_xlim(4000,6000)
ax2.set_xlabel('freq(THz)')
ax2.set_ylabel('Amp')
ax2.legend(loc='best')
ax2.grid(True)
ax2.set_ylim(0,None)
ax3.set_xlabel('wavelength(um)')
ax3.set_ylabel('Amp')
ax3.legend(loc='best')
ax3.grid(True)
ax3.set_ylim(0,None)
fig.savefig(savedir+"/graph/src_input.png")
class Sine(object):
def __init__(self, dt, dtype):
self.dt = dt
self.dtype = dtype
def set_freq(self, freq):
self.freq = freq
self.wvlen = c / self.freq
self.omega = 2 * np.pi * self.freq
self.wvector = 2 * np.pi / self.wvlen
def set_wvlen(self, wvlen):
self.wvlen = wvlen
self.freq = c / self.wvlen
self.omega = 2 * np.pi * self.freq
self.wvector = 2 * np.pi / self.wvlen
def signal(self, tstep):
pulse_re = np.sin(self.omega * tstep * self.dt)
return pulse_re
class Cosine(object):
def __init__(self, dt, dtype):
self.dt = dt
self.dtype = dtype
def set_freq(self, freq):
self.freq = freq
self.wvlen = c / self.freq
self.omega = 2 * np.pi * self.freq
self.wvector = 2 * np.pi / self.wvlen
def set_wvlen(self, wvlen):
self.wvlen = wvlen
self.freq = c / self.wvlen
self.omega = 2 * np.pi * self.freq
self.wvector = 2 * np.pi / self.wvlen
def signal(self, tstep):
pulse_re = np.cos(self.omega * tstep * self.dt)
return pulse_re
| null |
pyctypes/FDTD.real.diel.CPML.MPI/source.py
|
source.py
|
py
| 4,067 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scipy.constants.c",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scipy.constants.c",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "scipy.constants.c",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.c",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "scipy.constants.c",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.c",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 138,
"usage_type": "call"
}
] |
414349889
|
'''
绘制灰度图的统计直方图
-----------------------
首先我们将图片读入为img, 然后转换为灰度图gray.
然后将gray 用numpy的ravel函数变为一维的扁平数组, 输入到plt.hist 中.
了解更多查看numpy.ravel -文档
最终我们得到灰度图的统计图.
'''
from matplotlib import pyplot as plt
import numpy as np
import cv2
# img = cv2.imread('little_chess.png')
img = cv2.imread('dao_roi.png')
if img is None:
print("图片读入失败")
exit()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
'''
1Darray: 一维数组 这里通过gray.ravel(),把灰度图变为一维数组.
bins: 统计分隔区间 如果是256 就是分成256份统计, 你可以修改这个值, 看不同的统计效果
range: 统计数值的空间
'''
plt.hist(gray.ravel(), bins=256, range=[0, 256])
plt.show()
| null |
TeachCode代码教学区/CH4_ROI颜色统计与图像二值化/CH4.2_DrawImgGrayscaleStat.py
|
CH4.2_DrawImgGrayscaleStat.py
|
py
| 871 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
}
] |
633139889
|
"""
filesync.py
Copy files from an repo directory to an full environment. This script ties in
nicely to vim's plugin neomake which will call this script every time a file
saves. This script will copy an entire OpenNet copy over to an environment, or
just a single file.
"""
import shutil
from distutils import dir_util
import re
import os
import argparse
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copy the whole OpenNet folder
Keyword arguments:
src -- the directory we want to copy from
dst -- the directory we want to copy to
syslinks -- if true, preserve symlinks. if false, copy symlink contents
ignore -- ignore specified directories
returns: none
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def can_copy_to_dest():
"""
Determine if we are able to copy to the specified destination. If a
different OpenNet copy has been copied over to the dest, prompt the user on
whether or not it is ok to copy.
returns: bool
"""
# get src directory
r = re.search(".*?(Prod1|Prod2|Prod3)/(.*?)/", dest_file)
print(dest_file)
if r:
directory = r.group(2)
print(r.group(2))
# check src_file
src_file_path = targetdir + 'src.fs'
if os.path.isfile(src_file_path):
src_file = open(src_file_path, 'r')
src = src_file.read().splitlines()
if src:
src = src[0]
if not src == directory:
print("The last file sync to this destination used a different source. Would you like to continue? y/n")
answer = ""
while answer is not "y":
answer = input()
if answer is "n":
return False
if answer is "y":
src_file.close()
#delete src file
os.remove(targetdir + 'src.fs')
src_file = open(src_file_path, "w")
src_file.write(directory)
src_file.close()
return True
print("valid answers are \"y\" and \"n\"")
else:
#TODO: do a product, path in the src_file so we can write from multiple
# projects without having to switch
src_file = open(src_file_path, 'w')
src_file.write(directory)
src_file.close()
return True
def main():
parser = argparse.ArgumentParser(description='filesync', prog="filesync.py")
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.0.1.0')
parser.add_argument('-f', help='Sync single file, by default all files are copied')
parser.add_argument('--dest_file', help='Absolute path to the destination file. This is required')
args = parser.parse_args()
if args.f:
r = re.search("(.*Prod1/.*?/)", args.f)
if r:
dest_file = r.group(1) + "/dest.fs"
else:
r = re.search("(.*Open[a-zA-Z]+/.*?/)", args.f)
if r:
dest_file = r.group(1) + "dest.fs"
else:
dest_file = args.dest_file
try:
config_file = open(dest_file, 'r')
config_file = config_file.read().splitlines()
config = config_file[0]
except FileNotFoundError:
print("dest.fs does not exist. Please configure before using")
exit()
if config == "auto":
#use auto matching to determine which directory to copy to
targetdir = smart_dest.smart_dest(dest_file)
else:
targetdir = '/home/jared/D/osi/' + config + '/'
# check if targetdir exists. If not complain and exit
if not os.path.isdir(targetdir):
print(targetdir)
print("Environment provided in dest.fs does not exist. Check configuration and try again")
exit()
# check is src.fs exists in dest, if so make sure we are allowed to copy from this directory
if not args.f:
can_copy_to_dest()
if args.f:
#file path
r = re.search(".*Prod1/.*?/(.*)", args.f)
if r:
file_path = r.group(1)
target = targetdir + file_path
else:
r = re.search(".*Open[a-zA-Z]+/.*?/(.*)", args.f)
if r:
file_path = r.group(1)
target = targetdir + file_path
print("src: " + args.f)
print("target: " + target)
shutil.copy(args.f, target)
print("copy complete!")
else:
#src path
r = re.search("(.*Prod1/.*?/)", dest_file)
if r:
src_path = r.group(1)
else:
r = re.search("(.*Open[a-zA-Z]+/.*?/)", dest_file)
if r:
src_path = r.group(1)
print("src: " + src_path)
print("target: " + targetdir)
dir_util.copy_tree(src_path, targetdir)
print("copy complete!")
if __name__ == '__main__':
main()
| null |
environment_sync/src/filesync/filesync.py
|
filesync.py
|
py
| 5,098 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "shutil.copytree",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "shutil.copy2",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "distutils.dir_util.copy_tree",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "distutils.dir_util",
"line_number": 163,
"usage_type": "name"
}
] |
154353794
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import random
name = "Python blog tracker"
version = "0.1"
depends = ['beatifulsoup4']
waiter = True
def get_news():
url = "https://www.python.org/blogs/"
data = urlopen(url).read().decode('utf8')
bs = BeautifulSoup(data, features="html.parser")
date = bs.find(
'ul', class_='list-recent-posts').li.p.time.attrs['datetime']
title = bs.find(
'ul', class_='list-recent-posts').li.h3.text.replace('!', '')\
.replace('.', '')
if datetime.datetime.strptime(date, '%Y-%m-%d').day == \
datetime.date.today().day \
and \
datetime.datetime.strptime(date, '%Y-%m-%d').month == \
datetime.date.today().month:
return {'title': 'New post in Python blog!',
'text': f'It\'s titled "{title}". '
f'Check it out on python.org/blogs/ now!'}
return None
| null |
newsportal/reporters/reporter_4.py
|
reporter_4.py
|
py
| 981 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 27,
"usage_type": "attribute"
}
] |
143256848
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from shutil import rmtree
from tempfile import mkdtemp
from os import listdir
from types import GeneratorType
from os.path import join, isfile, abspath, dirname, splitext
from skbio.util import remove_files
from skbio.parse.sequences import parse_fasta
from skbio.alignment import SequenceCollection
from skbio import DNA
from biom import load_table
import logging
from deblur.workflow import (dereplicate_seqs,
remove_chimeras_denovo_from_seqs,
remove_artifacts_seqs,
create_otu_table,
get_files_for_table,
trim_seqs,
multiple_sequence_alignment,
launch_workflow,
split_sequence_file_on_sample_ids_to_files,
build_index_sortmerna,
start_log)
class workflowTests(TestCase):
""" Test deblur pipeline and individual methods functionality """
def setUp(self):
""" Create working directory and two FASTA input
files corresponding to two samples (s1 and s2).
Each input file contains 120 sequences, of which
100 are 16S amplicons (ART simulator), 10 are
chimeric sequences (Grinder) and 10 are PhiX
artifacts (ART). The 100 amplicon sequences
intend to evenly represent a community of 10
species.
"""
# test output can be written to this directory
self.working_dir = mkdtemp()
# the data directory for the workflow test files
self.test_data_dir = join(dirname(abspath(__file__)), 'data')
self.seqs_s1_fp = join(self.test_data_dir, 'seqs_s1.fasta')
self.seqs_s2_fp = join(self.test_data_dir, 'seqs_s2.fasta')
self.seqs_s3_fp = join(self.test_data_dir, 'seqs_s3.fasta')
self.orig_s1_fp = join(self.test_data_dir, 'simset.s1.fasta')
self.orig_s2_fp = join(self.test_data_dir, 'simset.s2.fasta')
self.orig_s3_fp = join(self.test_data_dir, 'simset.s3.fasta')
self.files_to_remove = []
logfilename = join(self.working_dir, "log.txt")
start_log(level=logging.DEBUG, filename=logfilename)
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.working_dir)
def test_trim_seqs(self):
seqs = [("seq1", "tagggcaagactccatggtatga"),
("seq2", "cggaggcgagatgcgtggta"),
("seq3", "tactagcaagattcctggtaaagga"),
("seq4", "aggatgcgagatgcgtg"),
("seq5", "gagtgcgagatgcgtggtgagg"),
("seq6", "ggatgcgagatgcgtggtgatt"),
("seq7", "agggcgagattcctagtgga--")]
obs = trim_seqs(seqs, 20)
self.assertTrue(isinstance(obs, GeneratorType))
exp = [("seq1", "tagggcaagactccatggta"),
("seq2", "cggaggcgagatgcgtggta"),
("seq3", "tactagcaagattcctggta"),
("seq5", "gagtgcgagatgcgtggtga"),
("seq6", "ggatgcgagatgcgtggtga"),
("seq7", "agggcgagattcctagtgga")]
self.assertEqual(list(obs), exp)
def test_dereplicate_seqs_remove_singletons(self):
""" Test dereplicate_seqs() method functionality with
removing singletons
"""
seqs = [("seq1", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq2", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq3", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq4", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCT"),
("seq5", "TACCAGCCCCTTAAGTGGTAGGGACGATTATTTGGCCTAAAGCGTCCG"),
("seq6", "CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT"),
("seq7", "CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT")]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
output_fp = join(self.working_dir, "seqs_derep.fasta")
dereplicate_seqs(seqs_fp=seqs_fp,
output_fp=output_fp)
self.assertTrue(isfile(output_fp))
exp = [("seq1;size=3;",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq6;size=2;",
"CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT")]
with open(output_fp, 'U') as out_f:
act = [item for item in parse_fasta(out_f)]
self.assertEqual(act, exp)
def test_dereplicate_seqs(self):
""" Test dereplicate_seqs() method functionality,
keep singletons
"""
seqs = [("seq1", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq2", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq3", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq4", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCT"),
("seq5", "TACCAGCCCCTTAAGTGGTAGGGACGATTATTTGGCCTAAAGCGTCCG"),
("seq6", "CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT"),
("seq7", "CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT")]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
output_fp = join(self.working_dir, "seqs_derep.fasta")
dereplicate_seqs(seqs_fp=seqs_fp,
output_fp=output_fp,
min_size=1)
self.assertTrue(isfile(output_fp))
exp = [("seq1;size=3;",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("seq6;size=2;",
"CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT"),
("seq4;size=1;",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCT"),
("seq5;size=1;",
"TACCAGCCCCTTAAGTGGTAGGGACGATTATTTGGCCTAAAGCGTCCG")]
with open(output_fp, 'U') as out_f:
act = [item for item in parse_fasta(out_f)]
self.assertEqual(act, exp)
def test_remove_artifacts_seqs(self):
""" Test remove_artifacts_seqs() function for removing
sequences not matching to a reference database
using SortMeRNA. This test forces a new index
construction for the reference sequences.
"""
seqs = [("seq1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq2", "CCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("seq3", "TCGCTATTATTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCC"),
("seq4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq5", "CTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATAGGGTC"),
("seq6", "TTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAAT"),
("phix1", "TCTAAAGGTAAAAAACGTTCTGGCGCTCGCCCTGGTCGTCCGCAGCC"),
("phix2", "CTGGCGCTCGCCCTGGTCGTCCGCAGCCGTTGCGAGGTACTAAAGGC"),
("phix3", "GCGCATAAATTTGAGCAGATTTGTCGTCACAGGTTGCGCCGCCAAAA")]
exp_seqs = ["seq1", "seq2", "seq3", "seq4", "seq5", "seq6"]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
ref = [("ref1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTA"
"GTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref2", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref3", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref5", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATAGGGT"),
("ref6", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT")]
ref_fp = join(self.working_dir, "ref2.fasta")
with open(ref_fp, 'w') as ref_f:
for seq in ref:
ref_f.write(">%s\n%s\n" % seq)
self.files_to_remove.append(ref_fp)
ref_db_fp = build_index_sortmerna(
ref_fp=(ref_fp,),
working_dir=self.working_dir)
output_fp = remove_artifacts_seqs(seqs_fp=seqs_fp,
ref_fp=(ref_fp,),
working_dir=self.working_dir,
ref_db_fp=ref_db_fp,
negate=False,
threads=1)
obs_seqs = []
with open(output_fp, 'U') as output_f:
for label, seq in parse_fasta(output_f):
obs_seqs.append(label)
self.assertEqual(obs_seqs, exp_seqs)
def test_remove_artifacts_seqs_index_prebuilt(self):
""" Test remove_artifacts_seqs() function for removing
sequences not matching to a reference database
using SortMeRNA. This test passes a built index.
"""
seqs = [("seq1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq2", "CCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("seq3", "TCGCTATTATTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCC"),
("seq4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq5", "CTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATAGGGTC"),
("seq6", "TTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAAT"),
("phix1", "TCTAAAGGTAAAAAACGTTCTGGCGCTCGCCCTGGTCGTCCGCAGCC"),
("phix2", "CTGGCGCTCGCCCTGGTCGTCCGCAGCCGTTGCGAGGTACTAAAGGC"),
("phix3", "GCGCATAAATTTGAGCAGATTTGTCGTCACAGGTTGCGCCGCCAAAA")]
exp_seqs = ["seq1", "seq2", "seq3", "seq4", "seq5", "seq6"]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
ref = [("ref1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTA"
"GTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref2", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref3", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref5", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATAGGGT"),
("ref6", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT")]
ref_fp = join(self.working_dir, "ref3.fasta")
with open(ref_fp, 'w') as ref_f:
for seq in ref:
ref_f.write(">%s\n%s\n" % seq)
self.files_to_remove.append(ref_fp)
# build index
sortmerna_db = build_index_sortmerna([ref_fp], self.working_dir)
output_fp = join(self.working_dir, "seqs_filtered.fasta")
output_fp = remove_artifacts_seqs(seqs_fp=seqs_fp,
ref_fp=(ref_fp,),
working_dir=self.working_dir,
ref_db_fp=sortmerna_db,
negate=False,
threads=1)
obs_seqs = []
with open(output_fp, 'U') as output_f:
for label, seq in parse_fasta(output_f):
obs_seqs.append(label)
self.assertEqual(obs_seqs, exp_seqs)
def test_remove_artifacts_seqs_negate(self):
""" Test remove_artifacts_seqs() function for removing
sequences matching to a reference database
using SortMeRNA (negate option).
"""
seqs = [("seq1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq2", "CCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("seq3", "TCGCTATTATTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCC"),
("seq4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCC"),
("seq5", "CTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATAGGGTC"),
("seq6", "TTGAGCCTAAAACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAAT"),
("phix1", "TCTAAAGGTAAAAAACGTTCTGGCGCTCGCCCTGGTCGTCCGCAGCC"),
("phix2", "CTGGCGCTCGCCCTGGTCGTCCGCAGCCGTTGCGAGGTACTAAAGGC"),
("phix3", "GCGCATAAATTTGAGCAGATTTGTCGTCACAGGTTGCGCCGCCAAAA")]
# seq5 is 80% similar, so should be kept for 0.95 default similarity
# to artifacts
exp_seqs = ["seq5", "phix1", "phix2", "phix3"]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
ref = [("ref1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTA"
"GTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref2", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref3", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref5", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATAGGGT"),
("ref6", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT")]
ref_fp = join(self.working_dir, "ref4.fasta")
with open(ref_fp, 'w') as ref_f:
for seq in ref:
ref_f.write(">%s\n%s\n" % seq)
self.files_to_remove.append(ref_fp)
ref_db_fp = build_index_sortmerna([ref_fp], self.working_dir)
output_fp = join(self.working_dir, "seqs_filtered.fasta")
output_fp = remove_artifacts_seqs(seqs_fp=seqs_fp,
ref_fp=(ref_fp,),
working_dir=self.working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=1)
obs_seqs = []
with open(output_fp, 'U') as output_f:
for label, seq in parse_fasta(output_f):
obs_seqs.append(label)
self.assertEqual(obs_seqs, exp_seqs)
def test_remove_chimeras_denovo_from_seqs(self):
""" Test remove_chimeras_denovo_from_seqs() method functionality.
Remove chimeric sequences from a FASTA file using the UCHIME
algorithm, implemented in VSEARCH.
"""
seqs = [("s1_104;size=2;", "GTGCCAGCCGCCGCGGTAATACCCGCAGCTCAAGTGGTG"
"GTCGCTATTATTGAGCCTAAAACGTCCGTAGTCGGCTTT"
"GTAAATCCCTGGGTAAATCGGGAAGCTTAACTTTCCGAC"
"TTCCGAGGAGACTGTCAAACTTGGGACCGGGAG"),
("s1_106;size=2;", "GTGTCAGCCGCCGCGGTAATACCAGCTCTCCGAGTGGTG"
"TGGATGTTTATTGGGCCTAAAGCGTCCGTAGCCGGCTGC"
"GCAAGTCTGTCGGGAAATCCGCACGCCTAACGTGCGGGC"
"GTCCGGCGGAAACTGCGTGGCTTGGGACCGGAA"),
("s1_1;size=9;", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAA"
"ACGTCCGTAGTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"
"CGCTTAACGATCCGATTCTGGGGAGACTGCAAAGCTTGGGA"
"CCGGGCGAGGTTAGAGGTACTCTCGGG"),
("s1_20;size=9;", "TACCTGCAGCCCAAGTGGTGGTCGATTTTATTGAGTCTAA"
"AACGTTCGTAGCCGGTTTGATAAATCCTTGGGTAAATCGG"
"GAAGCTTAACTTTCCGATTCCGAGGAGACTGTCAAACTTG"
"GGACCGGGAGAGGCTAGAGGTACTTCTGGG"),
("s1_40;size=8;", "TACCAGCTCTCCGAGTGGTGTGGATGTTTATTGGGCCTAA"
"AGCATCCGTAGCTGGCTAGGTTAGTCCCCTGTTAAATCCA"
"CCGAATTAATCGTTGGATGCGGGGGATACTGCTTGGCTAG"
"GGGACGAGAGAGGCAGACGGTATTTCCGGG"),
("s1_60;size=8;", "TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAA"
"AGCGTCCGTAGCCGGCTGCGCAAGTCTGTCGGGAAATCCG"
"CACGCCTAACGTGCGGGTCCGGCGGAAACTGCGTGGCTTG"
"GGACCGGAAGACTCGAGGGGTACGTCAGGG")]
seqs_non_chimera = ["s1_1;size=9;", "s1_20;size=9;",
"s1_40;size=8;", "s1_60;size=8;"]
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for seq in seqs:
seqs_f.write(">%s\n%s\n" % seq)
output_fp = remove_chimeras_denovo_from_seqs(
seqs_fp=seqs_fp,
working_dir=self.working_dir)
seqs_obs = []
with open(output_fp, 'U') as output_f:
for label, seq in parse_fasta(output_f):
label = label.split()[0]
seqs_obs.append(label)
self.assertEqual(seqs_non_chimera, seqs_obs)
def test_multiple_sequence_alignment(self):
"""Test multiple sequence alignment.
"""
seqs = [DNA('caccggcggcccggtggtggccattattattgggtctaaag', id='seq_1'),
DNA('caccggcggcccgagtggtggccattattattgggtcaagg', id='seq_2'),
DNA('caccggcggcccgagtgatggccattattattgggtctaaag', id='seq_3'),
DNA('aaccggcggcccaagtggtggccattattattgggtctaaag', id='seq_4'),
DNA('caccgggcccgagtggtggccattattattgggtctaaag', id='seq_5')]
seqs_col = SequenceCollection(seqs)
seqs_fp = join(self.working_dir, "seqs.fna")
with open(seqs_fp, 'w') as o:
o.write(seqs_col.to_fasta())
alignment_file = multiple_sequence_alignment(seqs_fp)
with open(alignment_file, 'U') as f:
aligned_seqs = [DNA(item[1], id=item[0])
for item in parse_fasta(f)]
align_exp = [
DNA(
'caccggcggcccg-gtggtggccattattattgggtctaaag', id='seq_1'),
DNA(
'caccggcggcccgagtggtggccattattattgggtcaagg-', id='seq_2'),
DNA(
'caccggcggcccgagtgatggccattattattgggtctaaag', id='seq_3'),
DNA(
'aaccggcggcccaagtggtggccattattattgggtctaaag', id='seq_4'),
DNA(
'caccg--ggcccgagtggtggccattattattgggtctaaag', id='seq_5')]
self.assertItemsEqual(aligned_seqs, align_exp)
def test_build_index_sortmerna(self):
"""Test functionality of build_index_sortmerna()
"""
ref1 = [("ref1", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTA"
"GTCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref2", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref3", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref4", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT"),
("ref5", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATAGGGT"),
("ref6", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT")]
ref2 = [("ref1", "GTCGTAGCTAGCTGCCCACGATCGTAGCTAGCTAGCTACGTAGCTCATCAC"
"TCGCCGACCCACGTCCCACTGATGCTGTGGG"),
("ref2", "GCGGCGCCCAAAAATGTCGTGTAAAATTTTCTCGTACCCACTTGCTACCCA"
"TGGCCGCCATGCTGCTAACGCAATATATATA"),
("ref3", "TGTGAAAGCGCGCGAGAGAGTCGTATATATGGGCGCGGCGCGATGCTGCCC"
"GTCGATGCTGATCCCCCACGTACGTAGCCCC"),
("ref4", "GTGTGCTCGCGTAGCTAGCTTATATATCGGCGCGTAGTGCTAGCCCCAAAA"
"GTGTCCCCCCCCTCCTTTTTTATATATGCAA"),
("ref5", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATAGGGT"),
("ref6", "TACCCGCAGCTCAAGTGGTGGTCGCTATTATTGAGCCTAAAACGTCCGTAG"
"TCGGCTTTGTAAATCCCTGGGTAAATCGGGT")]
ref1_fp = join(self.working_dir, "ref1.fasta")
with open(ref1_fp, 'w') as ref_f:
for seq in ref1:
ref_f.write(">%s\n%s\n" % seq)
ref2_fp = join(self.working_dir, "ref2.fasta")
with open(ref2_fp, 'w') as ref_f:
for seq in ref2:
ref_f.write(">%s\n%s\n" % seq)
ref_fps = tuple([ref1_fp, ref2_fp])
ref_db_fp = build_index_sortmerna(
ref_fp=ref_fps,
working_dir=self.working_dir)
self.assertEqual(len(ref_fps), len(ref_db_fp))
def run_workflow_try(self, simfilename, origfilename, ref_fp, ref_db_fp):
"""Test launching the complete workflow using simulated sequences
and compare to original ground truth.
Parameters
----------
simfilename : str
name of the simulated reads fasta file
origfilename : str
name of the fasta file with the ground truth sequences
"""
seqs_fp = simfilename
output_fp = self.working_dir
read_error = 0.05
mean_error = 0.005
error_dist = None
indel_prob = 0.01
indel_max = 3
trim_length = 100
min_size = 2
negate = False
threads = 1
delim = '_'
nochimera = launch_workflow(seqs_fp, output_fp, read_error, mean_error,
error_dist, indel_prob, indel_max,
trim_length, min_size, (ref_fp,),
ref_db_fp, negate, threads, delim)
# get the trimmed ground truth sequences
with open(origfilename, 'U') as f:
orig_seqs = [item[1] for item in parse_fasta(f)]
orig_seqs = [item[:trim_length].upper() for item in orig_seqs]
output_filename = 'final.biom'
output_table_fp = join(output_fp, output_filename)
create_otu_table(output_table_fp, [(nochimera, seqs_fp)])
table_obs = load_table(output_table_fp)
outseqs = table_obs.ids(axis='observation')
outseqs = list(outseqs)
outseqs.sort()
orig_seqs.sort()
# test we see all ground truth sequences and no other
self.assertItemsEqual(outseqs, orig_seqs)
def test_get_files_for_table(self):
filelist = get_files_for_table(self.test_data_dir)
file1 = join(self.test_data_dir,
'testmerge.fasta.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras')
file2 = join(self.test_data_dir,
'testmerge2.fasta.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras')
self.assertEqual(len(filelist), 2)
self.assertTrue(file1 in [filelist[0][0], filelist[1][0]])
self.assertTrue(file2 in [filelist[0][0], filelist[1][0]])
self.assertTrue('testmerge' in [filelist[0][1], filelist[1][1]])
def test_create_otu_table(self):
# merge the fasta files
m1 = join(self.test_data_dir,
'testmerge.fasta.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras')
m2 = join(self.test_data_dir,
'testmerge2.fasta.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras')
outfile = join(self.working_dir, 'testmerge.biom')
create_otu_table(outfile, [(m1, 'testmerge'), (m2, 'testmerge2')])
# test the result
table = load_table(outfile)
# test a sequence present in both
self.assertEqual(table.get_value_by_ids(
'TACGAGGggggCGAGCGTTGTTCGGAATTATTGGGCGTAAAAGGTGCGTAGGCGGTTCG'
'GTAAGTTTCGTGTGAAATCTTCGGGCTCAACTCGAAGCCTGCACGAAATACTGCCGGGC'
'TTGAGTGTGGGAGAGGTGAGTGGAATTTCCGGT', 'testmerge'), 5)
self.assertEqual(table.get_value_by_ids(
'TACGAGGggggCGAGCGTTGTTCG'
'GAATTATTGGGCGTAAAAGGTGCGTAGGCGGTTCGGTAAGTTTCGTGTGAAATCTTCGGG'
'CTCAACTCGAAGCCTGCACGAAATACTGCCGGGCTTGAGTGTGGGAGAGGTGAGTGGAAT'
'TTCCGGT', 'testmerge2'), 8)
# and an otu present only in one
self.assertEqual(table.get_value_by_ids(
'TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGAGCGTAGGCGGTTTCTT'
'AAGTCTGATGTGAAAGCCCACGGCTCAACCGTGGAGGGTCATTGGAAACTGGGGAACTTGA'
'GTGCAGAAGAGGAGAGTGGAATTCCATGT', 'testmerge'), 7)
self.assertEqual(table.get_value_by_ids(
'TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGAGCGTAGGCGGTTTCTTA'
'AGTCTGATGTGAAAGCCCACGGCTCAACCGTGGAGGGTCATTGGAAACTGGGGAACTTGAGT'
'GCAGAAGAGGAGAGTGGAATTCCATGT', 'testmerge2'), 0)
def test_launch_workflow(self):
"""Test launching complete workflow using 3 simulated sequence files.
seqs1 - 100 reads using art, original sequences are >0.5 identical.
seqs2 - 200 reads using grinder, original sequences are >0.9 identical,
0.1 chimeras, 35 phix reads
seqs3 - simple - 15 reads from seqs1 (10 reads for 1001203,
5 reads for 694276) for manual test validation
"""
# index the 70% rep. set database
ref_fp = join(self.test_data_dir, '70_otus.fasta')
ref_db_fp = build_index_sortmerna(
ref_fp=(ref_fp,),
working_dir=self.working_dir)
self.run_workflow_try(self.seqs_s1_fp,
self.orig_s1_fp, ref_fp, ref_db_fp)
self.run_workflow_try(self.seqs_s2_fp,
self.orig_s2_fp, ref_fp, ref_db_fp)
self.run_workflow_try(self.seqs_s3_fp,
self.orig_s3_fp, ref_fp, ref_db_fp)
def get_seqs_act_split_sequence_on_sample_ids(self, output_dir):
"""Parse output of split_sequence_file_on_sample_ids_to_files()
Parameters
----------
output_dir: string
output directory path storing FASTA files
Returns
-------
seqs_act: dict
dictionary with keys being sample IDs and values list of
sequences belonging to sample ID
"""
seqs_act = {}
for fn in listdir(output_dir):
input_fp = join(output_dir, fn)
sample_file = splitext(fn)[0]
with open(input_fp, 'U') as input_f:
for label, seq in parse_fasta(input_f):
sample = label.split('_')[0]
self.assertEqual(sample_file, sample)
if sample not in seqs_act:
seqs_act[sample] = [(label, seq)]
else:
seqs_act[sample].append((label, seq))
return seqs_act
def test_split_sequence_file_on_sample_ids_to_files(self):
"""Test functionality of split_sequence_file_on_sample_ids_to_files()
"""
seqs_fasta = {"s1": [
("s1_seq1",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("s1_seq2",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG")],
"s2": [
("s2_seq3",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCG"),
("s2_seq4",
"TACCGGCAGCTCAAGTGATGACCGCTATTATTGGGCCTAAAGCGTCCT")],
"s3": [
("s3_seq5",
"TACCAGCCCCTTAAGTGGTAGGGACGATTATTTGGCCTAAAGCGTCCG"),
("s3_seq6",
"CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT")],
"s4": [
("s4_seq7",
"CTGCAAGGCTAGGGGGCGGGAGAGGCGGGTGGTACTTGAGGGGAGAAT")]}
# Test FASTA file split on sample IDs to multiple FASTA files
seqs_fp = join(self.working_dir, "seqs.fasta")
with open(seqs_fp, 'w') as seqs_f:
for sample in seqs_fasta:
for seq in seqs_fasta[sample]:
seqs_f.write(">%s\n%s\n" % seq)
output_dir = mkdtemp()
with open(seqs_fp, 'U') as seqs_f:
split_sequence_file_on_sample_ids_to_files(seqs=seqs_f,
outdir=output_dir)
seqs_act = self.get_seqs_act_split_sequence_on_sample_ids(
output_dir=output_dir)
self.assertDictEqual(seqs_fasta, seqs_act)
if __name__ == '__main__':
main()
| null |
deblur/test/test_workflow.py
|
test_workflow.py
|
py
| 29,830 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.start_log",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "skbio.util.remove_files",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.trim_seqs",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "types.GeneratorType",
"line_number": 81,
"usage_type": "argument"
},
{
"api_name": "os.path.join",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.dereplicate_seqs",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.dereplicate_seqs",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.build_index_sortmerna",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.remove_artifacts_seqs",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.build_index_sortmerna",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.remove_artifacts_seqs",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.build_index_sortmerna",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.remove_artifacts_seqs",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.remove_chimeras_denovo_from_seqs",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "skbio.alignment.SequenceCollection",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.multiple_sequence_alignment",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "skbio.DNA",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.build_index_sortmerna",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.launch_workflow",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.create_otu_table",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "biom.load_table",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.get_files_for_table",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.create_otu_table",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "biom.load_table",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.build_index_sortmerna",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "skbio.parse.sequences.parse_fasta",
"line_number": 566,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "deblur.workflow.split_sequence_file_on_sample_ids_to_files",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 612,
"usage_type": "call"
}
] |
379273250
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: toby
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: [email protected]
@software: pycharm
@file: logger.py
@time: 2020-11-18 23:20
@desc:
'''
# MNJUFOYUIBJAKWFG 邮箱pass
import os
import time
import threading
log_dir = '../logs'
import logging
class Log(object):
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(Log, "_instance"):
with Log._instance_lock:
if not hasattr(Log, "_instance"):
Log._instance = object.__new__(cls)
return Log._instance
def __init__(self,filename=None):
import os
folder_or_dir = log_dir
print('log_dir:'+log_dir)
if not os.path.exists(folder_or_dir):
print("folder or dir not exist !!!!!!")
os.makedirs(folder_or_dir) # 创建目录或文件夹
else:
print('folder exist!')
# 文件命名 os.path.join(): 将多个路径组合后返回
self.logname = os.path.join(log_dir, '%s.log' % time.strftime('%Y-%m-%d'))
# 生成记录器对象
self.logger = logging.getLogger()
# 设置日志级别
self.logger.setLevel(logging.DEBUG)
if filename==None:
# 日志输出格式
fm = '[%(asctime)s] | %(filename)s |%(levelname)-6s: %(message)s'
# fm = '%(levelname):%(levelno)s:%(name)s:%(funcName)s:%(asctime):%(pathname):%(filename):%(module):%(thread):%(threadName)'
self.formatter = logging.Formatter(fm)
else:
fm ='[%(asctime)s] | %('+filename+')s |%(levelname)-6s: %(message)s'
self.formatter = logging.Formatter(fm)
def __console(self, level, message):
# 创建一个FileHandler,用于写到本地日志文件
fh = logging.FileHandler(self.logname, encoding='utf-8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(self.formatter)
self.logger.addHandler(fh)
# 创建一个StreamHandler,用于输出到控制台
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(self.formatter)
self.logger.addHandler(sh)
# 判断日志级别
if level == 'info':
self.logger.info(message)
elif level == 'debug':
self.logger.debug(message)
elif level == 'warning':
self.logger.warning(message)
elif level == 'error':
self.logger.error(message)
# removeHandler在记录日志之后移除句柄,避免日志输出重复问题
self.logger.removeHandler(sh)
self.logger.removeHandler(fh)
# 关闭打开的文件
fh.close()
# debug < info< warning< error< critical
# debug模式
def debug(self, message):
self.__console('debug', message)
# info模式
def info(self, message):
self.__console('info', message)
# warning模式
def warning(self, message):
self.__console('warning', message)
# error模式
def error(self, message):
self.__console('error', message)
"""
filename: 指定日志文件名
filemode: 和file函数意义相同,指定日志文件的打开模式,’w’或’a’
format: 指定输出的格式和内容,format可以输出很多有用信息。显示的条目可以是以下内容:
%(levelname):日志级别的名字格式
%(levelno)s:日志级别的数字表示
%(name)s:日志名字
%(funcName)s:函数名字
%(asctime):日志时间,可以使用datefmt去定义时间格式,如上图。
%(pathname):脚本的绝对路径
%(filename):脚本的名字
%(module):模块的名字
%(thread):thread id
%(threadName):线程的名字
"""
if __name__ == '__main__':
mylog = Log()
mylog.info("main logging")
print(log_dir)
| null |
flask-celery-hjw/log_pkg/mylogger.py
|
mylogger.py
|
py
| 4,010 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "threading.Lock",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 72,
"usage_type": "attribute"
}
] |
590642987
|
"""Greeter.
Usage:
basic.py hello <name>
basic.py goodbye <name>
basic.py (-h | --help)
Options:
-h --help Show this screen.
"""
from docopt import docopt
def hello(name):
print('Hello, {0}'.format(name))
def goodbye(name):
print('Goodbye, {0}'.format(name))
if __name__ == '__main__':
arguments = docopt(__doc__)
# if an argument called hello was passed, execute the hello logic.
if arguments['hello']:
hello(arguments['<name>'])
elif arguments['goodbye']:
goodbye(arguments['<name>'])
| null |
019_CLI/docopt_test.py
|
docopt_test.py
|
py
| 547 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "docopt.docopt",
"line_number": 23,
"usage_type": "call"
}
] |
44840318
|
from flask import (
Blueprint, render_template, request, current_app, url_for, g, redirect
)
from main.web import db
from main.web.models import Dags
bp = Blueprint('dags', __name__, url_prefix='/dags')
@bp.route('/', methods=['GET', 'POST'])
def dags():
page = request.args.get('page', 1, type=int)
dags = Dags.query.order_by(Dags.pub_date.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('dags.dags', page=dags.next_num) \
if dags.has_next else None
prev_url = url_for('dags.dags', page=dags.prev_num) \
if dags.has_prev else None
return render_template('dags/dags.html', dags=dags.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/detail/<dag_name>')
def detail(dag_name):
current_dag = Dags.query.filter_by(dag_name=dag_name).first_or_404()
return render_template('dags/detail.html', dag=current_dag)
| null |
main/web/dags/dags.py
|
dags.py
|
py
| 942 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "main.web.models.Dags.query.order_by",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "main.web.models.Dags.query",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "main.web.models.Dags",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "main.web.models.Dags.pub_date.desc",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "main.web.models.Dags.pub_date",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app.config",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.url_for",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "main.web.models.Dags.query.filter_by",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "main.web.models.Dags.query",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "main.web.models.Dags",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
}
] |
305767334
|
# coding: utf-8
import flask
import requests
import auth
import config
import forms
import model
import util
from main import app
@app.route('/admin/newsletter/')
@auth.admin_required
def newsletter_list():
newsletter_dbs, cursors = model.NewsLetter.get_dbs(
order='-created',
prev_cursor=True,
)
return flask.render_template(
'newsletter/admin/list.html',
html_class='newsletter-list',
title=u'Список рассылок',
newsletter_dbs=newsletter_dbs,
next_url=util.generate_next_url(cursors['next']),
prev_url=util.generate_next_url(cursors['prev']),
api_url=flask.url_for('api.newsletter.list'),
)
@app.route('/admin/newsletter/new/')
@auth.admin_required
def newsletter_new():
newsletter = model.NewsLetter(subject=u'Введите тему')
newsletter.put()
return flask.redirect(
flask.url_for('newsletter_edit', newsletter_id=newsletter.key.id()))
@app.route('/admin/newsletter/clone/<int:newsletter_id>/')
@auth.admin_required
def newsletter_clone(newsletter_id):
origin = model.NewsLetter.get_by_id(newsletter_id)
if not origin:
return flask.redirect(flask.url_for('newsletter_list'))
new_newsletter = model.NewsLetter(
subject=origin.subject + u' Копия',
raw_recipients=origin.raw_recipients,
)
new_newsletter.put()
return flask.redirect(
flask.url_for('newsletter_edit', newsletter_id=new_newsletter.key.id())
)
@app.route(
'/admin/newsletter/edit/<int:newsletter_id>/', methods=['GET', 'POST'])
@auth.admin_required
def newsletter_edit(newsletter_id):
newsletter_db = model.NewsLetter.get_by_id(newsletter_id)
if not newsletter_db:
return flask.redirect(flask.url_for('newsletter_list'))
form = forms.NewsLetterForm(obj=newsletter_db)
if form.validate_on_submit():
form.populate_obj(newsletter_db)
newsletter_db.put()
if 'save_and_send' in flask.request.form:
res, mail_list = newsletter_db.send()
if res.ok:
return flask.redirect(
flask.url_for(
'show_mail_list', mail_list_id=mail_list.key.id()))
flask.flash(
u'Упс, что-то пошло не так, попробуйте еще раз - %s' % res.text,
'danger'
)
if 'save_and_close' in flask.request.form:
return flask.redirect(flask.url_for('newsletter_list'))
return flask.render_template(
'newsletter/admin/edit.html',
html_class='newsletter-edit',
title=u'Редактирование рассылки',
form=form,
newsletter_db=newsletter_db,
get_upload_url=flask.url_for(
'api.resource.upload_parent',
parent_key=newsletter_db.key.urlsafe()
),
back_url_for='newsletter_list'
)
@app.route('/admin/newsletter/mail_list/<int:mail_list_id>/')
@auth.admin_required
def show_mail_list(mail_list_id):
mail_list = model.NewsLetterSend.get_by_id(mail_list_id)
return flask.render_template(
'newsletter/admin/mail_list.html',
html_class='mail_list',
title=u'Рассылка готова к получению письма',
mail_list=mail_list,
back_url_for='newsletter_list'
)
@app.route('/admin/mail_list/black/', methods=['GET', 'POST'])
@auth.admin_required
def mail_list_black():
if flask.request.method == 'POST':
res = requests.get(
"https://api.mailgun.net/v3/mg.5studio.ru/bounces",
auth=('api', config.CONFIG_DB.mailgun_api_key),
params={'limit': 1000}
)
if not res.ok:
flask.flash(
u'Упс, что-то пошло не так, попробуйте еще раз - %s' % res.text,
'danger'
)
else:
res = [item.get('address', '') for item in res.json().get('items', [])]
model.BlackMailList.update(res)
return flask.render_template(
'newsletter/admin/black_list.html',
html_class='mail-list-black',
title=u'Черный список рассылки',
black_list=model.BlackMailList.get_black_list(),
)
| null |
main/control/newsletter.py
|
newsletter.py
|
py
| 4,081 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "model.NewsLetter.get_dbs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "model.NewsLetter",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "util.generate_next_url",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "util.generate_next_url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "main.app.route",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "model.NewsLetter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "main.app.route",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "model.NewsLetter.get_by_id",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "model.NewsLetter",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "model.NewsLetter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "main.app.route",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "model.NewsLetter.get_by_id",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "model.NewsLetter",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "forms.NewsLetterForm",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "main.app.route",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "model.NewsLetterSend.get_by_id",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "model.NewsLetterSend",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "main.app.route",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "config.CONFIG_DB",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "model.BlackMailList.update",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "model.BlackMailList",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "model.BlackMailList.get_black_list",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "model.BlackMailList",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "main.app.route",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "auth.admin_required",
"line_number": 110,
"usage_type": "attribute"
}
] |
214940190
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pandas as pd
from .base import WidgetBase
class MyViewBox(pg.ViewBox):
pass
class WaveformViewer(WidgetBase):
def __init__(self, spikesorter = None, parent=None):
WidgetBase.__init__(self, parent)
self.spikesorter = spikesorter
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.graphicsview = pg.GraphicsView()
self.layout.addWidget(self.graphicsview)
self.initialize_plot()
self.alpha = 60
self.refresh()
def initialize_plot(self):
self.viewBox1 = MyViewBox()
#~ self.viewBox.disableAutoRange()
grid = pg.GraphicsLayout(border=(100,100,100))
self.graphicsview.setCentralItem(grid)
self.plot1 = grid.addPlot(row=0, col=0, rowspan=2, viewBox=self.viewBox1)
self.plot1.hideButtons()
self.plot1.showAxis('left', True)
grid.nextRow()
grid.nextRow()
self.viewBox2 = MyViewBox()
self.plot2 = grid.addPlot(row=2, col=0, rowspan=1, viewBox=self.viewBox2)
self.plot2.hideButtons()
self.plot2.showAxis('left', True)
self.viewBox2.setXLink(self.viewBox1)
#~ self.viewBox.gain_zoom.connect(self.gain_zoom)
#~ self.viewBox.xsize_zoom.connect(self.xsize_zoom)
@property
def catalogue(self):
return self.spikesorter.clustering.catalogue
def refresh(self):
self.plot1.clear()
self.plot2.clear()
#lines
def addSpan(plot):
nb_channel = self.spikesorter.dataio.nb_channel
samples = self.spikesorter.all_waveforms.columns.levels[1]
n_left, n_right = min(samples)+2, max(samples)-1
white = pg.mkColor(255, 255, 255, 20)
width = n_right - n_left
for i in range(nb_channel):
if i%2==1:
region = pg.LinearRegionItem([width*i, width*(i+1)-1], movable = False, brush = white)
plot.addItem(region, ignoreBounds=True)
for l in region.lines:
l.setPen(white)
vline = pg.InfiniteLine(pos = -n_left + width*i, angle=90, movable=False, pen = pg.mkPen('w'))
plot.addItem(vline)
addSpan(self.plot1)
addSpan(self.plot2)
#waveforms
for i,k in enumerate(self.catalogue):
if not self.spikesorter.cluster_visible[k]:
continue
wf0 = self.catalogue[k]['center']
mad = self.catalogue[k]['mad']
color = self.spikesorter.qcolors.get(k, QtGui.QColor( 'white'))
curve = pg.PlotCurveItem(np.arange(wf0.size), wf0, pen=pg.mkPen(color, width=2))
self.plot1.addItem(curve)
color2 = QtGui.QColor(color)
color2.setAlpha(self.alpha)
curve1 = pg.PlotCurveItem(np.arange(wf0.size), wf0+mad, pen=color2)
curve2 = pg.PlotCurveItem(np.arange(wf0.size), wf0-mad, pen=color2)
self.plot1.addItem(curve1)
self.plot1.addItem(curve2)
fill = pg.FillBetweenItem(curve1=curve1, curve2=curve2, brush=color2)
self.plot1.addItem(fill)
curve = pg.PlotCurveItem(np.arange(wf0.size), mad, pen=color)
self.plot2.addItem(curve)
#~ ax.axvline(-n_left + width*i, alpha = .05, color = 'k')
def on_peak_selection_changed(self):
pass
#TODO peak the selected peak if only one
| null |
tridesclous/gui/waveformviewer.py
|
waveformviewer.py
|
py
| 3,821 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyqtgraph.ViewBox",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "base.WidgetBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "base.WidgetBase.__init__",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "base.WidgetBase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.Qt.QtGui.QVBoxLayout",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.Qt.QtGui",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.GraphicsView",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.GraphicsLayout",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.mkColor",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.LinearRegionItem",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.InfiniteLine",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.mkPen",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.Qt.QtGui.QColor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.Qt.QtGui",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.PlotCurveItem",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.mkPen",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.Qt.QtGui.QColor",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.Qt.QtGui",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.PlotCurveItem",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.PlotCurveItem",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.FillBetweenItem",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.PlotCurveItem",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 104,
"usage_type": "call"
}
] |
408651941
|
from __future__ import print_function
from jsonschema import validate
import jsonschema
import json
import sys
'''
https://spacetelescope.github.io/understanding-json-schema/
http://code.activestate.com/recipes/579135-validate-data-easily-with-json-schema/
'''
'''
definitions
'''
# schema = {
# "$schema": "http://json-schema.org/draft-04/schema#",
#
# "definitions": {
# "address": {
# "type": "object",
# "properties": {
# "street_address": { "type": "string" },
# "city": { "type": "string" },
# "state": { "type": "string" }
# },
# "required": ["street_address", "city", "state"]
# }
# },
#
# "type": "object",
# "properties": {
# "billing_address": { "$ref": "#/definitions/address" },
# "shipping_address": { "$ref": "#/definitions/address" }
# }
# }
# data = {
# "shipping_address": {
# "street_address": "1600 Pennsylvania Avenue NW",
# "city": "Washington",
# "state": "DC"
# },
# "billing_address": {
# "street_address": "1st Street SE",
# "city": "Washington",
# "state": "DC"
# }
# }
'''
email format
'''
# schema = {
# "$schema": "http://json-schema.org/draft-04/schema#",
# "type": "string",
# "format": "email"
# }
#
# data = "[email protected]"
'''
list of emails
'''
# schema = {
# "$schema": "http://json-schema.org/draft-04/schema#",
# "type": "array",
# "items": {
# "type": "string",
# "format": "email"
# }
# }
# data = ["[email protected]", "[email protected]"]
'''
list property of emails
'''
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"email_addresses": {
"type": "array",
"items": {
"type": "string",
"format": "email"
}
}
}
}
data = {
"email_addresses": ["aidan", "[email protected]"]
}
try:
validate(data, schema, format_checker=jsonschema.FormatChecker())
except jsonschema.exceptions.ValidationError as ve:
sys.stderr.write(str(ve) + "\n")
| null |
json/json_schema_verifier.py
|
json_schema_verifier.py
|
py
| 2,211 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jsonschema.validate",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "jsonschema.FormatChecker",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 104,
"usage_type": "attribute"
}
] |
518261651
|
# -*- coding: utf-8 -*-
import json
import logging
import time
import threading
import zmq
from lucena.io2.networking import create_pipe
logger = logging.getLogger(__name__)
class Plugin(object):
def __init__(self, zmq_context):
self.zmq_context = zmq_context
self.poller = zmq.Poller()
self.socket, self.worker_socket = create_pipe(self.zmq_context)
self.signal = threading.Event()
self.thread = None
def start(self):
if self.thread:
raise RuntimeError("Worker already started.")
self.thread = threading.Thread(target=self._start_thread)
self.thread.daemon = False
self.thread.start()
self.signal.wait()
def stop(self):
if self.thread is None:
logger.warning("Worker already stopped.")
return
self.socket.set(zmq.SNDTIMEO, 0)
self.socket.send_unicode("$TERM")
self.signal.wait()
def _run(self):
raise NotImplementedError("Implement me in a subclass")
def _start_thread(self):
self.signal.set()
self._run()
# At this point the worker has finished
# and we can clean up everything.
self.socket.close()
self.worker_socket.close()
self.socket = None
self.worker_socket = None
self.thread = None
self.signal.set()
def send(self, *args, **kwargs):
return self.socket.send(*args, **kwargs)
def send_unicode(self, *args, **kwargs):
return self.socket.send_unicode(*args, **kwargs)
def send_multipart(self, *args, **kwargs):
return self.socket.send_multipart(*args, **kwargs)
def send_json(self, *args, **kwargs):
return self.socket.send_json(*args, **kwargs)
def recv(self, *args, **kwargs):
return self.socket.recv(*args, **kwargs)
def recv_unicode(self, *args, **kwargs):
return self.socket.recv_unicode(*args, **kwargs)
def recv_multipart(self, *args, **kwargs):
return self.socket.recv_multipart(*args, **kwargs)
def handle_pipe(self):
# Get just the commands off the pipe
request = self.pipe.recv_multipart()
try:
json_request = json.loads(request[0].decode('utf-8'))
command = json_request.get('command')
except Exception:
command = request.pop(0).decode('UTF-8')
if not command:
return -1 # Interrupted
elif command == "CONFIGURE":
port = json_request.get('port')
self.configure(port)
elif command == "PUBLISH":
self.transmit = request.pop(0)
if self.interval == 0:
self.interval = INTERVAL_DFLT
# Start broadcasting immediately
self.ping_at = time.time()
elif command == "SILENCE":
self.transmit = None
elif command == "SUBSCRIBE":
self.filter = json_request.get('filter')
elif command == "UNSUBSCRIBE":
self.filter = None
elif command == "$TERM":
self.terminated = True
else:
logger.error("zbeacon: - invalid command: {0}".format(command))
def run(self):
# Signal actor successfully initialized
self.pipe.signal()
self.poller = zmq.Poller()
self.poller.register(self.pipe, zmq.POLLIN)
self.poller.register(self.udp_socket, zmq.POLLIN)
while not self.terminated:
timeout = 1
if self.transmit:
timeout = self.ping_at - time.time()
if timeout < 0:
timeout = 0
# Poll on API pipe and on UDP socket
items = dict(self.poller.poll(timeout * 1000))
if self.pipe in items and items[self.pipe] == zmq.POLLIN:
self.handle_pipe()
if self.udp_socket.fileno() in items \
and items[self.udp_socket.fileno()] == zmq.POLLIN:
self.handle_udp()
if self.transmit and time.time() >= self.ping_at:
self.send_beacon()
self.ping_at = time.time() + self.interval
| null |
lucena/plugins/plugin.py
|
plugin.py
|
py
| 4,173 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "zmq.Poller",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "lucena.io2.networking.create_pipe",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "zmq.SNDTIMEO",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "zmq.Poller",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "zmq.POLLIN",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "zmq.POLLIN",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "zmq.POLLIN",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "zmq.POLLIN",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 130,
"usage_type": "call"
}
] |
175352133
|
import os, zipfile, urllib.request
import re, lxml.etree
import numpy as np
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import CountVectorizer
class TedData(object):
def __init__(self, vocab_size):
if not os.path.isfile('ted_en-20160408.zip'):
urllib.request.urlretrieve("https://wit3.fbk.eu/get.php?path=XML_releases/xml/ted_en-20160408.zip&filename=ted_en-20160408.zip", filename="ted_en-20160408.zip")
with zipfile.ZipFile('ted_en-20160408.zip', 'r') as z:
self.xml = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
string_documents = []
for raw_document in self.xml.xpath('//content/text()'):
# Remove everything in parens
no_parens = re.sub(r'\([^)]*\)', '', raw_document)
# Remove line breaks and "foo: " prefixes
merged_lines = re.sub(r'\n([^:]{,20}:)?', ' ', no_parens)
# Lowercase, remove special chars
ascii_ = re.sub(r'[^a-z0-9\'\s]+', '', merged_lines.lower())
string_documents.append(ascii_)
self.permutation = np.random.permutation(len(string_documents))
string_documents = np.asarray(string_documents)[self.permutation]
self.representation = CountVectorizer(stop_words='english',
max_features=vocab_size-1, token_pattern='(?u)\\b\w+[\w\']*\\b')
self.representation.fit(string_documents[:1585])
self.UNKNOWN = 0
docs = [[self.representation.vocabulary_.get(word, self.UNKNOWN - 1) + 1
for word in doc.split()] for doc in string_documents]
self._x_train = np.copy(docs[:1585])
self.x_valid, self.x_valid_l = self._toNumpy(docs[1585:1835])
self.x_test, self.x_test_l = self._toNumpy(docs[1835:])
def _toNumpy(self, array):
length = np.array([len(a) for a in array])
mask = np.arange(length.max()) < length[:,None]
nparray = np.zeros(mask.shape)
nparray[mask] = np.concatenate(array)
return nparray, length
def vocabulary(self):
return self.representation.vocabulary_
class TedDataWithLabels(TedData):
def __init__(self, vocab_size):
TedData.__init__(self, vocab_size)
labels = [
(1 if "technology" in raw_category else 0) +
(2 if "entertainment" in raw_category else 0) +
(4 if "design" in raw_category else 0)
for raw_category in self.xml.xpath('//keywords/text()')]
del self.xml
labels = np.asarray(labels)[self.permutation]
self._y_train = np.copy(labels[:1585])
self.y_valid = labels[1585:1835]
self.y_test = labels[1835:]
def training_batches(self, batch_size):
docs, labels = shuffle(self._x_train, self._y_train)
for i in range(0, 1585, batch_size):
a, l = self._toNumpy(docs[i:i+batch_size])
yield a, l, labels[i:i+batch_size]
class TedDataSeq(TedData):
def __init__(self, vocab_size):
TedData.__init__(self, vocab_size-3)
del self.xml
self.START = vocab_size - 3
self.END = vocab_size - 2
self.BLANK = vocab_size - 1
self.x_valid, self.y_valid = self._pad(self.x_valid[1585:1835])
self.x_test, self.y_test = self._pad(self.x_test[1835:])
def _pad(self, documents):
length = max(map(len, documents))
batch = np.array([[self.START]+document+[self.END] + [self.BLANK]*(length-len(document)) for document in documents])
return batch[:,:-1], batch[:,1:]
def training_batches(self, batch_size):
docs = shuffle(self._x_train)
docs[np.argsort(map(len, docs), kind='mergesort')]
# docs are now sorted but random within the same length
for i in range(0, 1585, batch_size):
batch = docs[i:i+batch_size]
yield self._pad(batch, max(map(len, batch)))
def Glove(dims):
if not os.path.isfile('glove.6B.zip'):
urllib.request.urlretrieve("http://nlp.stanford.edu/data/glove.6B.zip", filename="glove.6B.zip")
with zipfile.ZipFile('glove.6B.zip', 'r') as z:
file = z.open('glove.6B.'+str(dims)+'d.txt', 'r')
embedding = {}
for line in file:
items = line.decode("utf-8").strip().split(' ')
assert len(items) == 51
word = items[0]
vec = [float(i) for i in items[1:]]
embedding[word] = vec
return embedding
| null |
data.py
|
data.py
|
py
| 4,148 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.isfile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lxml.etree.etree.parse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree.etree",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "lxml.etree",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 98,
"usage_type": "call"
}
] |
569578285
|
import datetime
from django.db import models
from django.db.models import Subquery, OuterRef, F, ExpressionWrapper, Q, Sum, Avg, Count, Value, Func, \
Case, When, Prefetch
from django.db.models.functions import Coalesce, Greatest, Round
from django.utils import timezone
from django.core.exceptions import ValidationError as DjangoValidationError
from core.models import CoreModel, CoreModelManager
from core.models import Round as CustomRound
from sows_events import models as events_models
from sows import models as sows_models
from piglets import models as piglets_models
import piglets_events
import transactions
class TourQuerySet(models.QuerySet):
def add_sow_data(self):
subquery_seminated = self.filter(semination__tour__pk=OuterRef('pk')) \
.values('semination__tour') \
.annotate(cnt_seminated=Count('semination__sow', distinct=True)) \
.values('cnt_seminated')
subquery_usound28_suporos = self.filter(
ultrasound__tour__pk=OuterRef('pk'), ultrasound__u_type__days=30,
ultrasound__result=True) \
.values('ultrasound__tour') \
.annotate(cnt_usound28_sup=Count('ultrasound__sow', distinct=True)) \
.values('cnt_usound28_sup')
subquery_usound28_proholost = self.filter(
ultrasound__tour__pk=OuterRef('pk'), ultrasound__u_type__days=30,
ultrasound__result=False) \
.values('ultrasound__tour') \
.annotate(cnt_usound28_proh=Count('ultrasound__sow', distinct=True)) \
.values('cnt_usound28_proh')
subquery_usound35_suporos = self.filter(
ultrasound__tour__pk=OuterRef('pk'), ultrasound__u_type__days=60,
ultrasound__result=True) \
.values('ultrasound__tour') \
.annotate(cnt_usound35_sup=Count('ultrasound__sow', distinct=True)) \
.values('cnt_usound35_sup')
subquery_usound35_proholost = self.filter(
ultrasound__tour__pk=OuterRef('pk'), ultrasound__u_type__days=60,
ultrasound__result=False) \
.values('ultrasound__tour') \
.annotate(cnt_usound35_proh=Count('ultrasound__sow', distinct=True)) \
.values('cnt_usound35_proh')
subquery_abort = self.filter(abortionsow__tour__pk=OuterRef('pk')) \
.values('abortionsow__tour') \
.annotate(cnt_abort=Count('abortionsow__sow', distinct=True)) \
.values('cnt_abort')
return self.annotate(
count_sow=Count('sows'),
count_seminated=Subquery(subquery_seminated),
count_usound28_suporos=Subquery(subquery_usound28_suporos),
count_usound28_proholost=Subquery(subquery_usound28_proholost),
count_usound35_suporos=Subquery(subquery_usound35_suporos),
count_usound35_proholost=Subquery(subquery_usound35_proholost),
count_abort=Subquery(subquery_abort),
)
def add_farrow_data(self):
data = dict()
for born_type in ['alive', 'dead', 'mummy']:
data[f'total_born_{born_type}'] = Subquery(
self.filter(sowfarrow__tour__pk=OuterRef('pk')) \
.values('sowfarrow__tour') \
.annotate(total_born=Sum(f'sowfarrow__{born_type}_quantity')) \
.values('total_born')
,output_field=models.IntegerField())
data['gilt_count'] = Subquery(self.filter(gilt__tour__pk=OuterRef('pk')) \
.values('gilt__tour') \
.annotate(cnt_gilt=Count('gilt')) \
.values('cnt_gilt'),
output_field=models.IntegerField())
data['count_farrows'] = Subquery(
self.filter(sowfarrow__tour__pk=OuterRef('pk'))
.values('sowfarrow__tour') \
.annotate(farrow_cnt=Count('sowfarrow__sow')) \
.values('farrow_cnt'))
return self.annotate(**data)
def add_farrow_percentage(self):
return self.annotate(farrow_percentage=ExpressionWrapper(
(F('count_farrows') * 100 ) / F('count_seminated'), output_field=models.FloatField()))
def add_count_tour_sow(self):
data = dict()
for ws_number in [1, 2, 3]:
data[f'ws{ws_number}_count_tour_sow'] = Subquery(
sows_models.Sow.objects.filter(tour__pk=OuterRef('pk')).filter(
Q(
Q(location__workshop__number=ws_number) |
Q(location__section__workshop__number=ws_number) |
Q(location__sowAndPigletsCell__workshop__number=ws_number)
)) \
.values('tour') \
.annotate(cnt_tour_sow=Count('*')) \
.values('cnt_tour_sow'),
output_field=models.IntegerField())
return self.annotate(**data)
def add_week_weight(self, places=['3/4', '4/8', '8/5', '8/6', '8/7']):
data = dict()
for place in places:
place_formatted = place.replace('/', '_')
weights_subquery = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'), place=place,) \
.values('place')
# total weights
weights_subquery_total_weight = weights_subquery.annotate(weight=Sum('total_weight')) \
.values('weight')
data[f'week_weight_{place_formatted}'] = Coalesce(Subquery(weights_subquery_total_weight,
output_field=models.FloatField()), 0.0)
# avg weights
weights_subquery_avg_weight = weights_subquery.annotate(avg_weight=Avg('average_weight')) \
.values('avg_weight')
data[f'week_weight_avg_{place_formatted}'] = Coalesce(Subquery(weights_subquery_avg_weight,
output_field=models.FloatField()), 0.0)
# qnty weights
weights_subquery_qnty_weight = weights_subquery.annotate(qnty_weight=Sum('piglets_quantity')) \
.values('qnty_weight')
data[f'week_weight_qnty_{place_formatted}'] = Coalesce(Subquery(weights_subquery_qnty_weight,
output_field=models.IntegerField()), 0)
weights_subquery_count_weight = weights_subquery.annotate(count_weight=Count('*')) \
.values('count_weight')
data[f'week_weight_count_{place_formatted}'] = Subquery(weights_subquery_count_weight,
output_field=models.IntegerField())
return self.annotate(**data)
def add_week_weight_ws8_v2(self):
# avg by week_tour
weights_subquery = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place__in=['8/5', '8/6', '8/7']) \
.values('week_tour')
weights_subquery_avg_weight = weights_subquery.annotate(avg_weight=Avg('average_weight')) \
.values('avg_weight')
return self.annotate(
week_weight_qnty_ws8=F('week_weight_qnty_8_5') + F('week_weight_qnty_8_6') + \
F('week_weight_qnty_8_7'),
week_weight_avg_ws8=Coalesce(Subquery(weights_subquery_avg_weight,
output_field=models.FloatField()), 0.0)
)
def add_weighing_first_dates(self):
data = dict()
for place in ['3/4', '4/8', '8/5', '8/6', '8/7']:
place_formatted = place.replace('/', '_')
first_date_weights_subquery = Subquery(piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place=place) \
.order_by('date') \
.values('date__date')[:1], output_field=models.DateTimeField())
data[f'first_date_{place_formatted}'] = first_date_weights_subquery
data['first_date_spec'] = Subquery(piglets_events.models.CullingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
culling_type='spec') \
.order_by('date') \
.values('date__date')[:1], output_field=models.DateTimeField())
return self.annotate(**data)
def add_culling_data_by_week_tour(self, ws_numbers=[3, 4, 5, 6, 7, 8]):
data = dict()
for ws_number in ws_numbers:
for c_type in ['padej', 'prirezka', 'vinuzhd', 'spec']:
culling_subquery = piglets_events.models.CullingPiglets.objects \
.filter(
culling_type=c_type,
week_tour__pk=OuterRef('pk')
).filter(
Q(
Q(location__workshop__number=ws_number) |
Q(location__pigletsGroupCell__workshop__number=ws_number) |
Q(location__section__workshop__number=ws_number) |
Q(location__sowAndPigletsCell__workshop__number=ws_number)
)
)
culling_subquery_qnty = culling_subquery \
.values('culling_type') \
.annotate(culling_qnty=Sum('quantity')) \
.values('culling_qnty')
data[f'ws{ws_number}_{c_type}_quantity'] = Coalesce(
Subquery(culling_subquery_qnty, output_field=models.IntegerField()), 0)
if ws_number in [5, 6, 7]:
if c_type == 'prirezka':
continue
if c_type == 'spec':
culling_subquery_avg_weight = culling_subquery \
.values('culling_type') \
.annotate(culling_avg_weight=Avg(F('total_weight') / F('quantity'), output_field=models.FloatField())) \
.values('culling_avg_weight')
data[f'ws{ws_number}_{c_type}_avg_weight'] = Coalesce(
Subquery(culling_subquery_avg_weight,
output_field=models.FloatField()), 0.0)
return self.annotate(**data)
@staticmethod
def gen_places_from_ws_number(ws_numbers):
places = list()
if 4 in ws_numbers:
places.append('3_4')
if 8 in ws_numbers:
places.append('4_8')
if 5 in ws_numbers:
places.append('8_5')
if 6 in ws_numbers:
places.append('8_6')
if 7 in ws_numbers:
places.append('8_7')
return places
# to delete
def add_culling_percentage(self, ws_numbers=[3, 4, 5, 6, 7, 8]):
data = dict()
places = self.gen_places_from_ws_number(ws_numbers=ws_numbers)
if 3 in ws_numbers:
data['ws3_padej_percentage'] = Case(
When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0),
When(total_born_alive__gt=0,
then=ExpressionWrapper(
F('ws3_padej_quantity') * 100.0 / F('total_born_alive'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
data['ws3_prirezka_percentage'] = Case(
When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0),
When(total_born_alive__gt=0,
then=ExpressionWrapper(
F('ws3_prirezka_quantity') * 100.0 / F('total_born_alive'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
ws_numbers.remove(3)
for ws_number, place_number in zip(ws_numbers, places):
if ws_numbers == 3:
continue
lookup1 = {f'week_weight_qnty_{place_number}__isnull': True, }
lookup2 = {f'week_weight_qnty_{place_number}': 0, }
lookup3 = {f'week_weight_qnty_{place_number}__gt': 0, }
data[f'ws{ws_number}_padej_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_padej_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
data[f'ws{ws_number}_vinuzhd_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_vinuzhd_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
if ws_number in [4, 8]:
data[f'ws{ws_number}_prirezka_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_prirezka_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
return self.annotate(**data)
def add_culling_percentage_by_ws3(self):
data = dict()
data['ws3_padej_percentage'] = Case(
When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0),
When(total_born_alive__gt=0,
then=ExpressionWrapper(
F('ws3_padej_quantity') * 100.0 / F('total_born_alive'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
data['ws3_prirezka_percentage'] = Case(
When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0),
When(total_born_alive__gt=0,
then=ExpressionWrapper(
F('ws3_prirezka_quantity') * 100.0 / F('total_born_alive'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
return self.annotate(**data)
def add_culling_percentage_by_ws_exclude_ws3(self, ws_number, place_number):
lookup1 = {f'week_weight_qnty_{place_number}__isnull': True, }
lookup2 = {f'week_weight_qnty_{place_number}': 0, }
lookup3 = {f'week_weight_qnty_{place_number}__gt': 0, }
data = dict()
data[f'ws{ws_number}_padej_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_padej_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
data[f'ws{ws_number}_vinuzhd_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_vinuzhd_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
if ws_number in [4, 8]:
data[f'ws{ws_number}_prirezka_percentage'] = Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
F(f'ws{ws_number}_prirezka_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'),
output_field=models.FloatField())
), output_field=models.FloatField()
)
return self.annotate(**data)
def add_culling_percentage_otkorm(self):
lookup1 = {f'week_weight_qnty_ws8__isnull': True, }
lookup2 = {f'week_weight_qnty_ws8': 0, }
lookup3 = {f'week_weight_qnty_ws8__gt': 0, }
return self.annotate(
otkorm_padej_qnty=F('ws5_padej_quantity') + F('ws6_padej_quantity') + \
F('ws7_padej_quantity'),
otkorm_vinuzhd_qnty=F('ws5_vinuzhd_quantity') + F('ws6_vinuzhd_quantity') + \
F('ws7_vinuzhd_quantity'),
otkorm_padej_percentage=Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
(F('ws5_padej_quantity') + F('ws6_padej_quantity') + F('ws7_padej_quantity')) \
* 100.0 / F('week_weight_qnty_ws8'), output_field=models.FloatField()
)
)
),
otkorm_vinuzhd_percentage=Case(
When(Q(**lookup1) | Q(**lookup2), then=0.0),
When(**lookup3,
then=ExpressionWrapper(
(F('ws5_vinuzhd_quantity') + F('ws6_vinuzhd_quantity') + F('ws7_vinuzhd_quantity')) \
* 100.0 / F('week_weight_qnty_ws8'), output_field=models.FloatField()
)
)
),
)
def add_sow_events(self, sow):
return self.prefetch_related(
Prefetch(
'semination_set',
queryset=events_models.Semination.objects.filter(sow=sow),
to_attr='sow_semination'
),
Prefetch(
'ultrasound_set',
queryset=events_models.Ultrasound.objects.filter(sow=sow),
to_attr='sow_ultrasound'
),
Prefetch(
'sowfarrow_set',
queryset=events_models.SowFarrow.objects.filter(sow=sow),
to_attr='sow_farrow'
),
Prefetch(
'weaningsow_set',
queryset=events_models.WeaningSow.objects.filter(sow=sow),
to_attr='sow_weaning'
),
)
def add_remont_trs_out(self, ws_numbers=[5, 6, 7]):
data = dict()
for ws_number in ws_numbers:
data[f'ws{ws_number}_remont'] = Subquery(
self.filter(
piglets_transactions__week_tour__pk=OuterRef('pk'),
piglets_transactions__to_location__workshop__number=2,
piglets_transactions__from_location__pigletsGroupCell__workshop__number=ws_number,
) \
.values('piglets_transactions__week_tour') \
.annotate(ws_remont_total=Sum('piglets_transactions__quantity'))
.values('ws_remont_total'))
if ws_numbers == [5, 6, 7]:
data['count_remont_total'] = Subquery(
self.filter(piglets_transactions__week_tour__pk=OuterRef('pk'),
piglets_transactions__to_location__workshop__number=2) \
.values('piglets_transactions__week_tour') \
.annotate(remont_total=Sum('piglets_transactions__quantity'))
.values('remont_total'))
return self.annotate(**data)
def add_remont_to_sows(self, ws_numbers=[5, 6, 7]):
data = dict()
for ws_number in ws_numbers:
data[f'ws{ws_number}_remont'] = Subquery(events_models.PigletsToSowsEvent.objects \
.filter(metatour__week_tour__pk=OuterRef('pk'),
piglets__location__pigletsGroupCell__workshop__number=ws_number) \
.values('metatour__week_tour') \
.annotate(ws_remont_total=Sum('quantity')) \
.values('ws_remont_total'))
if ws_numbers == [5, 6, 7]:
data['count_remont_total'] = events_models.PigletsToSowsEvent.objects \
.filter(metatour__week_tour__pk=OuterRef('pk'),
piglets__location__pigletsGroupCell__workshop__number__in=[5,6,7]) \
.values('metatour__week_tour') \
.annotate(remont_total=Sum('quantity')) \
.values('remont_total')
return self.annotate(**data)
def add_remont_to_sows_otkorm_all(self):
return self.annotate(count_remont_total=Coalesce(Subquery(
events_models.PigletsToSowsEvent.objects \
.filter(metatour__week_tour__pk=OuterRef('pk')) \
.values('metatour__week_tour') \
.annotate(remont_total=Sum('quantity')) \
.values('remont_total')), 0)
)
@staticmethod
def get_place_formatted(places):
if len(places) == 1:
return places[0].replace('/', '_')
else:
return 'ws8'
@staticmethod
def sv_avg_ave_expr():
return ExpressionWrapper(
Sum(
ExpressionWrapper(
F('piglets_age')
* F('quantity'),
output_field=models.FloatField())
)
/ Sum('quantity'),
output_field=models.FloatField()
)
def subquery_sv_age_at_place(self, places):
place_formatted = self.get_place_formatted(places=places)
ann_data = {f'weight_sv_avg_age_{place_formatted}':ExpressionWrapper(
Sum(
ExpressionWrapper(
F('piglets_age')
* F('piglets_quantity'),
output_field=models.FloatField())
)
/ Sum('piglets_quantity'),
output_field=models.FloatField()
) }
return piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place__in=places
).values('week_tour') \
.annotate(**ann_data) \
.values(f'weight_sv_avg_age_{place_formatted}')
def subquery_total2_place(self, places):
place_formatted = self.get_place_formatted(places=places)
ann_data = {f'total2_{place_formatted}': Sum('total_weight')}
return piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place__in=places
).values('week_tour') \
.annotate(**ann_data) \
.values(f'total2_{place_formatted}')
def subquery_avg_place(self, places):
place_formatted = self.get_place_formatted(places=places)
ann_data = {f'avg_{place_formatted}': Avg('average_weight')}
return piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place__in=places
).values('week_tour') \
.annotate(**ann_data) \
.values(f'avg_{place_formatted}')
def add_prives_prepare(self, places=['3/4', '4/8', '8/5', '8/6', '8/7']):
data = dict()
for place in places:
place_formatted = place.replace('/', '_')
data[f'sv_age_{place_formatted}'] = self.subquery_sv_age_at_place([place])
data[f'total2_{place_formatted}'] = self.subquery_total2_place([place])
if '8/5' in places or '8/6' in places or '8/7' in places:
data['sv_age_ws8'] = self.subquery_sv_age_at_place(['8/5', '8/6', '8/7'])
data['total2_ws8'] = self.subquery_total2_place(['8/5', '8/6', '8/7'])
return self.annotate(**data)
def add_prives_prepare_otkorm_weight_data_without_remont(self, ws_numbers=[5, 6, 7]):
data = dict()
for ws_number in ws_numbers:
data[f'total3_8_{ws_number}'] = ExpressionWrapper(
F(f'week_weight_8_{ws_number}') - \
(F(f'ws{ws_number}_remont') * F(f'week_weight_avg_8_{ws_number}')),
output_field=models.FloatField()
)
return self.annotate(**data)
def add_prives_prepare_otkorm_without_remont_all_ws(self, weight_total_label,
weight_avg_label, remont_label):
data = dict()
data['total3'] = ExpressionWrapper(
F(weight_total_label) - \
(F(remont_label) * F(weight_avg_label)),
output_field=models.FloatField()
)
return self.annotate(**data)
def add_prives_prepare_spec(self, ws_numbers=[5, 6, 7]):
data = dict()
for ws_number in ws_numbers:
subquery_age = Subquery(piglets_events.models.CullingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
culling_type='spec',
location__pigletsGroupCell__workshop__number=ws_number,
) \
.values('week_tour') \
.annotate(spec_sv_avg_age=ExpressionWrapper(
Sum(
ExpressionWrapper(
F('piglets_age')
* F('quantity'),
output_field=models.FloatField())
)
/ Sum('quantity'),
output_field=models.FloatField()
)) \
.values('spec_sv_avg_age'))
subquery_weight_total = Subquery(piglets_events.models.CullingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
culling_type='spec',
location__pigletsGroupCell__workshop__number=ws_number,
) \
.values('week_tour') \
.annotate(spec_prives_total_weight=Sum('total_weight')) \
.values('spec_prives_total_weight'))
data[f'spec_sv_avg_age_ws{ws_number}'] = subquery_age
data[f'spec_weight_total_ws{ws_number}'] = subquery_weight_total
return self.annotate(**data)
def add_prives_prepare_spec_all_otkorm(self):
data = dict()
subquery_age = Subquery(piglets_events.models.CullingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
culling_type='spec',
) \
.values('week_tour') \
.annotate(spec_sv_avg_age=ExpressionWrapper(
Sum(
ExpressionWrapper(
F('piglets_age')
* F('quantity'),
output_field=models.FloatField())
)
/ Sum('quantity'),
output_field=models.FloatField()
)) \
.values('spec_sv_avg_age'))
data[f'spec_sv_avg_age_otkorm'] = subquery_age
return self.annotate(**data)
@staticmethod
def gen_prives_otkorm(ws_number):
return \
(F(f'spec_weight_total_ws{ws_number}') - F(f'total2_8_{ws_number}')) / \
(F(f'spec_sv_avg_age_ws{ws_number}') - F(f'sv_age_8_{ws_number}')), \
(F(f'spec_weight_total_ws{ws_number}') - F(f'total3_8_{ws_number}')) / \
(F(f'spec_sv_avg_age_ws{ws_number}') - F(f'sv_age_8_{ws_number}'))
@staticmethod
def gen_prives_otkorm_all(ws_number):
return \
(F(f'spec_weight_total_ws{ws_number}') - F(f'total3_8_{ws_number}')) / \
(F(f'spec_sv_avg_age_ws{ws_number}') - F(f'sv_age_8_{ws_number}'))
def add_prives(self, ws_numbers=[3, 4, 8, 5, 6, 7]):
data = dict()
prives_prepare_places = []
prives_prepare_spec_places = []
if 3 in ws_numbers:
data['prives_3'] = (F('total2_3_4') / F('sv_age_3_4'))
prives_prepare_places.append('3/4')
if 4 in ws_numbers:
data['prives_4'] = (F('total2_4_8') - F('total2_3_4')) / (F('sv_age_4_8') - F('sv_age_3_4'))
prives_prepare_places.append('4/8')
prives_prepare_places.append('3/4')
if 8 in ws_numbers:
data['prives_8'] = (F('total2_ws8') - F('total2_4_8')) / (F('sv_age_ws8') - F('sv_age_4_8'))
prives_prepare_places.append('4/8')
prives_prepare_places.append('8/5')
if 5 in ws_numbers:
data['prives_5'], data['prives_without_remont_5'] = \
self.gen_prives_otkorm(ws_number=5)
prives_prepare_places.append('8/5')
prives_prepare_spec_places.append(5)
if 6 in ws_numbers:
data['prives_6'], data['prives_without_remont_6'] = \
self.gen_prives_otkorm(ws_number=6)
prives_prepare_places.append('8/6')
prives_prepare_spec_places.append(6)
if 7 in ws_numbers:
data['prives_7'], data['prives_without_remont_7'] = \
self.gen_prives_otkorm(ws_number=7)
prives_prepare_places.append('8/7')
prives_prepare_spec_places.append(7)
return self.add_prives_prepare(places=prives_prepare_places) \
.add_prives_prepare_spec(ws_numbers=prives_prepare_spec_places) \
.add_prives_prepare_otkorm_weight_data_without_remont(ws_numbers=prives_prepare_spec_places) \
.annotate(**data)
def add_prives_all_otkorm_for_dashboard(self):
return self.add_remont_to_sows_otkorm_all() \
.annotate(
sv_age_ws8=self.subquery_sv_age_at_place(places=['8/5', '8/6','8/7']),
total2_ws8=self.subquery_total2_place(places=['8/5', '8/6', '8/7']),
avg_ws8=self.subquery_avg_place(places=['8/5', '8/6', '8/7'])
) \
.add_prives_prepare_spec_all_otkorm() \
.add_prives_prepare_otkorm_without_remont_all_ws(
weight_total_label='total2_ws8',
weight_avg_label='avg_ws8',
remont_label='count_remont_total'
) \
.annotate(prives=Round(
(F('weaning_total_weight') - F('total3')) /
(F('spec_sv_avg_age_otkorm') - F('sv_age_ws8'))
)
) \
.annotate(prives_na_1g=Round(ExpressionWrapper(F('prives') * 1000 /
F('weaning_quantity'), output_field=models.FloatField())))
@staticmethod
def gen_prives_otkorm_na_1g(ws_number):
return \
ExpressionWrapper(
F(f'prives_{ws_number}') * 1000 / F(f'ws{ws_number}_spec_quantity'),
output_field=models.FloatField()), \
ExpressionWrapper(
F(f'prives_without_remont_{ws_number}') * 1000 / F(f'ws{ws_number}_spec_quantity'),
output_field=models.FloatField())
def add_prives_na_1g(self, ws_numbers=[3, 4, 8, 5, 6, 7]):
data = dict()
if 3 in ws_numbers:
data['prives_1g_3'] = ExpressionWrapper(
F('prives_3') * 1000 / F('week_weight_qnty_3_4'),
output_field=models.FloatField())
if 4 in ws_numbers:
data['prives_1g_4'] = ExpressionWrapper(
F('prives_4') * 1000 / F('week_weight_qnty_4_8'),
output_field=models.FloatField())
if 8 in ws_numbers:
data['prives_1g_8'] = ExpressionWrapper(
F('prives_8') * 1000 / F('week_weight_qnty_ws8'),
output_field=models.FloatField())
if 5 in ws_numbers:
data['prives_1g_5'], data['prives_without_remont_1g_5'] = \
self.gen_prives_otkorm_na_1g(ws_number=5)
if 6 in ws_numbers:
data['prives_1g_6'], data['prives_without_remont_1g_6'] = \
self.gen_prives_otkorm_na_1g(ws_number=6)
if 7 in ws_numbers:
data['prives_1g_7'], data['prives_without_remont_1g_7'] = \
self.gen_prives_otkorm_na_1g(ws_number=7)
return self.annotate(**data)
def add_prives_na_1g_by_ws348(self, ws_number, week_weight_label=None):
if not week_weight_label:
week_weight_label = 'week_weight_qnty_3_4'
data = dict()
data[f'prives_1g_{ws_number}'] = ExpressionWrapper(
F(f'prives_{ws_number}') * 1000 / F(week_weight_label),
output_field=models.FloatField())
return self.annotate(**data)
def add_dates_ws1_2(self):
first_semination_date = self.filter(semination__tour__pk=OuterRef('pk')) \
.values('semination__date__date').order_by('semination__date')[:1]
last_farrow_date = self.filter(sowfarrow__tour__pk=OuterRef('pk')) \
.values('sowfarrow__date__date').order_by('-sowfarrow__date')[:1]
return self.annotate(first_semination_date=Subquery(first_semination_date),
last_farrow_date=Subquery(last_farrow_date))
def add_dates_ws3(self):
first_farrow_date = self.filter(sowfarrow__tour__pk=OuterRef('pk')) \
.values('sowfarrow__date__date').order_by('sowfarrow__date')[:1]
last_weight_out_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place='3/4').order_by('-date') \
.values('date__date')[:1]
return self.annotate(first_farrow_date=Subquery(first_farrow_date),
ws3_last_weight_out_date=Subquery(last_weight_out_date))
def add_dates_ws4(self):
first_weight_in_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place='3/4').order_by('date') \
.values('date__date')[:1]
last_weight_out_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place='4/8').order_by('-date') \
.values('date__date')[:1]
return self.annotate(ws4_first_weight_in_date=Subquery(first_weight_in_date),
ws4_last_weight_out_date=Subquery(last_weight_out_date))
def add_dates_ws8(self):
first_weight_in_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place='4/8').order_by('date') \
.values('date__date')[:1]
last_weight_out_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place__in=['8/5','8/6','8/7']).order_by('-date') \
.values('date__date')[:1]
return self.annotate(ws8_first_weight_in_date=Subquery(first_weight_in_date),
ws8_last_weight_out_date=Subquery(last_weight_out_date))
def add_dates_ws_otkorm(self, ws_number):
place = f'8/{ws_number}'
first_weight_in_date = piglets_events.models.WeighingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
place=place).order_by('date') \
.values('date__date')[:1]
last_spec_date = piglets_events.models.CullingPiglets.objects.filter(
week_tour__pk=OuterRef('pk'),
culling_type='spec',
location__pigletsGroupCell__workshop__number=ws_number,
).order_by('-date') \
.values('date__date')[:1]
data = dict()
data[f'ws{ws_number}_first_weight_in_date'] = Subquery(first_weight_in_date)
data[f'ws{ws_number}_last_spec_date'] = Subquery(last_spec_date)
return self.annotate(**data)
def add_weights_for_dashboard_chart_ws348(self, places):
weights_subquery = piglets_events.models.WeighingPiglets.objects \
.filter(place__in=places, week_tour__pk=OuterRef('pk')) \
.values('week_tour')
return self.annotate(
weaning_quantity=Subquery(
weights_subquery.annotate(qnty=Sum('piglets_quantity')).values('qnty')),
weaning_total_weight=Round(Subquery(
weights_subquery.annotate(total=Sum('total_weight')).values('total')), precision=2),
weaning_avg_weight=Round(Subquery(
weights_subquery.annotate(avg_w=Avg('average_weight')).values('avg_w')), precision=2),
)
def add_spec_for_dashboard_chart_ws_otkorm(self):
weights_subquery = piglets_events.models.CullingPiglets.objects \
.filter(culling_type='spec', week_tour__pk=OuterRef('pk')) \
.values('week_tour')
return self.annotate(
weaning_quantity=Subquery(
weights_subquery.annotate(qnty=Sum('quantity')).values('qnty')),
weaning_total_weight=Round(Subquery(
weights_subquery.annotate(total=Sum('total_weight')).values('total')), precision=2),
weaning_avg_weight=Round(Subquery(
weights_subquery.annotate(avg_w=Avg('avg_weight')).values('avg_w'))),
)
def add_ws_piglets_income_for_dashboard_chart(self, subdivision):
if subdivision == 3:
return self.annotate(
alive_piglets_for_safety=Sum('sowfarrow__alive_quantity')
)
places = []
if subdivision == 4:
places = ['3/4']
if subdivision == 8:
places = ['4/8']
if subdivision == '_otkorm':
places = ['8/5', '8/6', '8/7']
weights_subquery = piglets_events.models.WeighingPiglets.objects \
.filter(place__in=places, week_tour__pk=OuterRef('pk')) \
.values('week_tour') \
.annotate(qnty=Sum('piglets_quantity')) \
.values('qnty')
return self.annotate(
alive_piglets_for_safety=Subquery(weights_subquery),
)
def add_ws_piglets_culling_for_dashboard_chart(self, cull_locs):
subquery = Subquery(piglets_events.models.CullingPiglets.objects \
.filter(location__in=cull_locs, week_tour__pk=OuterRef('pk')) \
.exclude(culling_type='spec') \
.values('week_tour') \
.annotate(piglets_culling_quantity=Sum('quantity')) \
.values('piglets_culling_quantity'))
return self.annotate(
piglets_culling_quantity=Coalesce(subquery, 0),
)
def add_ws_piglets_culling_percentage_for_dashboard_chart(self):
return self.annotate(culling_percentage=CustomRound(Coalesce(ExpressionWrapper(
F('piglets_culling_quantity') * 100.0 /
F('alive_piglets_for_safety'), output_field=models.FloatField()), 0.0))) \
.annotate(safety_percentage=ExpressionWrapper(100.0 - F('culling_percentage'),
output_field=models.FloatField()))
def add_ws_piglets_plan_data_for_dashboard_chart(self, subdivision):
return self.annotate(
plan_safety=F(f'plan__ws{subdivision}_safety'),
plan_weaning_qnty=ExpressionWrapper(F('alive_piglets_for_safety') \
* F(f'plan__ws{subdivision}_safety') / 100, output_field=models.FloatField()),
plan_culling_percentage=ExpressionWrapper(
100.0 - F(f'plan__ws{subdivision}_safety'), output_field=models.FloatField()),
plan_avg_weight=F(f'plan__ws{subdivision}_av_weight'),
plan_total_weight=ExpressionWrapper(
F(f'plan__ws{subdivision}_av_weight') * (F('alive_piglets_for_safety') \
* F(f'plan__ws{subdivision}_safety') / 100), output_field=models.FloatField())
)
def add_seminated_count_for_dashboard_chart(self):
# sem_subquery = events_models.Semination.objects.filter(tour_pk=OuterRef('pk'))\
# .values('tour')
return self.annotate(
count_seminated=Coalesce(Count('semination__sow', distinct=True), 0),
)
def add_farrow_data_for_dashboard_chart(self):
farrow_subquery = events_models.SowFarrow.objects.filter(tour__pk=OuterRef('pk'))\
.values('tour')
return self.annotate(
alive_count=Subquery(
farrow_subquery.annotate(alive_count=Sum('alive_quantity')).values('alive_count')),
count_farrows=Subquery(
farrow_subquery.annotate(count_farrows=Count('sow')).values('count_farrows')),
).annotate(av_priplod=F('alive_count') / F('count_farrows'))
def add_sows_plan_data_for_dashboard_chart(self):
return self.annotate(
plan_seminations=F('plan__ws12_seminations'),
plan_farrow_percent=F('plan__ws12_farrow_percent'),
plan_av_priplod=F('plan__ws12_av_priplod'),
).annotate(
plan_born_alive=ExpressionWrapper((
F('plan_seminations') * F('plan_farrow_percent') / 100) * \
F('plan_av_priplod'), output_field=models.FloatField())
)
class TourManager(CoreModelManager):
def get_queryset(self):
return TourQuerySet(self.model, using=self._db)
def get_monday_date_by_week_number(self, week_number, year):
start_week_number_pre = str(year) + '-W' + str(week_number)
return datetime.datetime.strptime(start_week_number_pre + '-1', "%Y-W%W-%w")
def get_or_create_by_week(self, week_number, year, start_date=None):
if not start_date:
# start_date=timezone.now()
start_date = self.get_monday_date_by_week_number(week_number=week_number, year=year)
tour = self.get_queryset().filter(week_number=week_number, year=year).first()
if not tour:
tour = self.create(start_date=start_date, week_number=week_number, year=year)
return tour
def get_or_create_by_week_with_plan(self, week_number, year, start_date=None):
tour = self.get_or_create_by_week(week_number=week_number, year=year, start_date=start_date)
TourPlan.objects.assign_plan_to_tour(tour=tour)
return tour
def get_or_create_by_week_in_current_year(self, week_number):
return self.get_or_create_by_week(week_number, timezone.now().year)
def get_tour_by_week_in_current_year(self, week_number):
return self.get_queryset().filter(week_number=week_number, year=timezone.now().year).first()
# for Import_From_Farm mechanism
def create_or_return_by_raw(self, raw_tour, start_date=None):
week_number = int(raw_tour[2:])
year = int('20' + raw_tour[:2])
if not start_date:
start_date = self.get_monday_date_by_week_number(week_number, year)
return self.get_or_create_by_week_with_plan(week_number, year, start_date)
def create_tour_from_farrow_date_string(self, farrow_date, days=135):
semination_date = datetime.datetime.strptime(farrow_date, '%Y-%m-%d') \
- datetime.timedelta(days)
week_number = int(semination_date.strftime("%V"))
return self.get_or_create_by_week(week_number, semination_date.year, semination_date)
def get_tours_by_piglets(self, piglets):
return self.get_queryset().filter(metatourrecords__metatour__piglets__in=piglets).distinct()
def get_week_tours_by_piglets(self, piglets):
return self.get_queryset().filter(id__in=piglets.values_list('metatour__week_tour', flat=True))
class Tour(CoreModel):
start_date = models.DateTimeField()
week_number = models.IntegerField()
year = models.IntegerField()
objects = TourManager()
def __str__(self):
return "Тур {} {}г".format(self.week_number, self.year)
@property
def days_left_from_farrow_approx(self):
return timezone.now() - (self.start_date + datetime.timedelta(days=135))
@property
def days_left_from_farrow(self):
if self.sowfarrow_set.all().first():
return timezone.now() - self.sowfarrow_set.all().first().date
return None
class MetaTourManager(CoreModelManager):
pass
class MetaTour(CoreModel):
piglets = models.OneToOneField('piglets.Piglets', on_delete=models.CASCADE)
week_tour = models.ForeignKey(Tour, on_delete=models.SET_NULL, null=True, related_name='week_tours')
objects = MetaTourManager()
def __str__(self):
return 'Piglets {} MetaTour {}'.format(self.piglets, self.pk)
def records_repr(self):
return [{
'tour': record.tour.week_number,
'percentage': round(record.percentage, 2),
'quantity': round(record.quantity, 2),
'days_left_from_farrow_approx': str((record.tour.days_left_from_farrow_approx).days),
'days_left_from_farrow': str((record.tour.days_left_from_farrow).days)
if record.tour.days_left_from_farrow else None
}
for record in self.records.all()]
def set_week_tour(self):
record = self.records.all().order_by('-percentage', 'tour__year', 'tour__week_number').first()
if record:
self.week_tour = record.tour
self.save()
class MetaTourRecordQuerySet(models.QuerySet):
def sum_quantity_by_tour(self, tour):
return self.filter(tour=tour).aggregate(models.Sum('quantity'))['quantity__sum']
def get_set_of_tours(self):
return Tour.objects.filter(metatourrecords__in=self).distinct()
class MetaTourRecordManager(CoreModelManager):
def get_queryset(self):
return MetaTourRecordQuerySet(self.model, using=self._db)
def create_record(self, metatour, tour, quantity, total_quantity, percentage=None):
if not percentage:
percentage = (quantity * 100) / total_quantity
note = ''
if percentage > 100:
percentage = 100
note = f'Новая версия. Неверно подсчитались проценты {percentage}, \
у группы с количеством {metatour.piglets.quantity}, \
Данные : тур={tour.week_number}, quantity={quantity}, total_quantity={total_quantity}, \
percentage={percentage}. Проценты изменены на 100. \
ID piglets {metatour.piglets.pk}, piglets.quantty={metatour.piglets.quantity}, \
piglets.quantty={metatour.piglets.start_quantity},'
return self.create(metatour=metatour, tour=tour, quantity=quantity,
percentage=percentage, note=note)
def recount_records_by_total_quantity(self, new_total_quantity):
self.get_queryset().update(quantity=(models.F('percentage') * new_total_quantity / 100))
class MetaTourRecord(CoreModel):
metatour = models.ForeignKey(MetaTour, on_delete=models.CASCADE, related_name='records')
tour = models.ForeignKey(Tour, on_delete=models.CASCADE, related_name='metatourrecords')
quantity = models.FloatField()
percentage = models.FloatField()
note = models.TextField(default='')
objects = MetaTourRecordManager()
class Meta:
ordering = ['tour', ]
def __str__(self):
return 'MetaTourRecord {} {}'.format(self.pk, self.tour)
def increase_quantity(self, amount):
self.quantity += amount
self.save()
class TourPlanQuerySet(models.QuerySet):
@staticmethod
def get_places(ws):
places = list()
if ws == 4:
places = ['3/4']
if ws == 8:
places = ['4/8']
if ws == '_otkorm':
places = ['8/5', '8/6', '8/7']
return places
def get_or_create_plan(self, week_number, year):
plan = self.filter(week_number=week_number, year=year).first()
if not plan:
plan = self.filter(default=True).first()
plan.pk = None
plan.default = False
plan.week_number = week_number
plan.year = year
plan.save()
return plan
def assign_plan_to_tour(self, tour):
plan = self.get_or_create_plan(week_number=tour.week_number, year=tour.year)
plan.tour = tour
plan.save()
return plan
def add_income_quantity_and_total_from_farrows(self):
subquery = events_models.SowFarrow.objects.filter(tour=OuterRef('tour')) \
.values('tour') \
.annotate(alive_count=Sum('alive_quantity')) \
.values('alive_count')
return self.annotate(count_income=ExpressionWrapper(
Coalesce(
Round(
Subquery(subquery) * F('ws3_safety') / 100.0, precision=0
),
0),
output_field=models.IntegerField()),
).annotate(income_total=ExpressionWrapper(F('count_income') * F('ws3_av_weight'),
output_field=models.FloatField()))
def add_income_quantity_and_total_from_weights(self, ws):
places = self.get_places(ws=ws)
subquery = piglets_events.models.WeighingPiglets.objects \
.filter(week_tour=OuterRef('tour'), place__in=places) \
.values('week_tour') \
.annotate(sum_quantity=Sum('piglets_quantity')) \
.values('sum_quantity')
return self.annotate(count_income=ExpressionWrapper(
Coalesce(
Round(
Subquery(subquery) * F(f'ws{ws}_safety') / 100.0, precision=0),
0),
output_field=models.IntegerField()),
).annotate(income_total=ExpressionWrapper(
Coalesce(
Round(
F('count_income') * F(f'ws{ws}_av_weight'), precision=0),
0),
output_field=models.IntegerField()))
def agg_for_month_by_ws(self, ws_number):
agg_dict = {
'_count_income': Sum('count_income'),
'_income_total': Sum('income_total'),
f'ws{ws_number}_safety': Avg(f'ws{ws_number}_safety'),
f'ws{ws_number}_av_weight': Avg(f'ws{ws_number}_av_weight'),
f'ws{ws_number}_korm_conversion': Avg(f'ws{ws_number}_korm_conversion'),
f'ws{ws_number}_trud': Avg(f'ws{ws_number}_trud'),
f'ws{ws_number}_vet': Avg(f'ws{ws_number}_vet'),
}
return self.aggregate(**agg_dict)
def add_born_alive_plan(self):
return self.annotate(plan_farrow=ExpressionWrapper(Round(
F('ws12_seminations') * F('ws12_farrow_percent') / 100.0, precision=0),
output_field=models.IntegerField())
) \
.annotate(count_piglets_alive=ExpressionWrapper(Round(
F('plan_farrow') * F('ws12_av_priplod')),
output_field=models.IntegerField()
))
def agg_sows_month_data(self):
return self.aggregate(
ws12_seminations=Sum('ws12_seminations'),
count_piglets_alive_total=Sum('count_piglets_alive'),
plan_farrows=Sum('plan_farrow'),
ws12_farrow_percent=Avg('ws12_farrow_percent'),
ws12_av_priplod=Avg('ws12_av_priplod'),
ws12_korm_conversion=Avg('ws12_korm_conversion'),
ws12_vet=Avg('ws12_vet'),
ws12_trud=Avg('ws12_trud'),
)
def coming_piglets_plans(self, ws_number):
if ws_number == '_otkorm':
return self.filter(tour__in=Tour.objects.exclude(piglets_culling__culling_type='spec'))
else:
places = list()
if ws_number == 3:
places = ['3/4']
if ws_number == 4:
places = ['4/8']
if ws_number == 8:
places = ['8/5', '8/6', '8/7']
return self.filter(tour__in=Tour.objects.exclude(piglets_weights__place__in=places))
def coming_sow_plan(self):
return self.filter(tour__sowfarrow=None, default=False)
class TourPlan(CoreModel):
tour = models.OneToOneField(Tour, on_delete=models.SET_NULL, null=True, blank=True,
related_name='plan')
tour_name = models.CharField(max_length=20, null=True)
week_number = models.IntegerField()
year = models.IntegerField()
default = models.BooleanField(default=False)
# vosproizvodstvo
ws12_seminations = models.IntegerField(default=0) # + set
ws12_born_alive = models.IntegerField(default=0) # + calc
ws12_farrow_percent = models.IntegerField(default=0) # + set
ws12_av_priplod = models.FloatField(default=0) # + set
ws12_korm_conversion = models.FloatField(default=0) # month set
ws12_vet = models.FloatField(default=0) # month set
ws12_trud = models.FloatField(default=0) # + чел часы month set
# matochnik
ws3_weaning = models.FloatField(default=0) # +
ws3_av_weight = models.FloatField(default=0) # +
ws3_total_weight = models.FloatField(default=0) # +
ws3_safety = models.IntegerField(default=0) # +
ws3_korm_conversion = models.FloatField(default=0) # month
ws3_trud = models.FloatField(default=0) # month
ws3_vet = models.FloatField(default=0) # month
# ws3_prives
# dor1
ws4_weaning = models.FloatField(default=0) # +
ws4_av_weight = models.FloatField(default=0) # +
ws4_total_weight = models.FloatField(default=0) # +
ws4_safety = models.IntegerField(default=0) # +
ws4_korm_conversion = models.FloatField(default=0) # +
ws4_trud = models.FloatField(default=0) # +
ws4_vet = models.FloatField(default=0) # +
# dor1
ws8_weaning = models.FloatField(default=0) # +
ws8_av_weight = models.FloatField(default=0) # +
ws8_total_weight = models.FloatField(default=0) # +
ws8_safety = models.IntegerField(default=0) # +
ws8_korm_conversion = models.FloatField(default=0) # +
ws8_trud = models.FloatField(default=0) # +
ws8_vet = models.FloatField(default=0) # +
# dor1
ws_otkorm_weaning = models.FloatField(default=0) # +
ws_otkorm_av_weight = models.FloatField(default=0) # +
ws_otkorm_total_weight = models.FloatField(default=0) # +
ws_otkorm_safety = models.IntegerField(default=0) # +
ws_otkorm_korm_conversion = models.FloatField(default=0) # +
ws_otkorm_trud = models.FloatField(default=0) # +
ws_otkorm_vet = models.FloatField(default=0) # +
objects = TourPlanQuerySet.as_manager()
def __str__(self):
return f'Plan {self.week_number} {self.year}'
# @property
# def plan_ws3_weaning(self):
# return self.ws3_weaning * self.tour.weaning_quantity
| null |
svinbin/tours/models.py
|
models.py
|
py
| 56,938 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.models.QuerySet",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "sows.models.Sow.objects.filter",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sows.models.Sow",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "sows.models",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "django.db.models.Avg",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "django.db.models.Count",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "django.db.models.Avg",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "django.db.models.F",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "django.db.models.Case",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "django.db.models.Case",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "django.db.models.When",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "django.db.models.Prefetch",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "sows_events.models.Semination.objects.filter",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "sows_events.models.Semination",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "django.db.models.Prefetch",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "sows_events.models.Ultrasound.objects.filter",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "sows_events.models.Ultrasound",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "django.db.models.Prefetch",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "sows_events.models.SowFarrow.objects.filter",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "sows_events.models.SowFarrow",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "django.db.models.Prefetch",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "sows_events.models.WeaningSow.objects.filter",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "sows_events.models.WeaningSow",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent.objects.filter",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent.objects.filter",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent",
"line_number": 454,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent.objects.filter",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "sows_events.models.PigletsToSowsEvent",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 554,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 585,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 585,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 588,
"usage_type": "name"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 592,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 608,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 608,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 608,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 609,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 618,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 618,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 621,
"usage_type": "name"
},
{
"api_name": "django.db.models.F",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 700,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 705,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 707,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 707,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 710,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 717,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 718,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 718,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 720,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 745,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 746,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 747,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 747,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 761,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 763,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 764,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 769,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 772,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 772,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 773,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 776,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 785,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 785,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 790,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 790,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 791,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 796,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 800,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 800,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 801,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 805,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 805,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 806,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 813,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 814,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 819,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 820,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 825,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 826,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 826,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 827,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 828,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 828,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 829,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 833,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 833,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 834,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 839,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 841,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 843,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 862,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 862,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 863,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 865,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 869,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 873,
"usage_type": "call"
},
{
"api_name": "piglets_events.models.CullingPiglets.objects.filter",
"line_number": 873,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 873,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 874,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 877,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 881,
"usage_type": "call"
},
{
"api_name": "core.models.Round",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 886,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 887,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 887,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 887,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 889,
"usage_type": "name"
},
{
"api_name": "django.db.models.F",
"line_number": 893,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 894,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 894,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 895,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 895,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 895,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 896,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 897,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 897,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 897,
"usage_type": "name"
},
{
"api_name": "django.db.models.F",
"line_number": 898,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 899,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 900,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 901,
"usage_type": "name"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 908,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 908,
"usage_type": "call"
},
{
"api_name": "sows_events.models.SowFarrow.objects.filter",
"line_number": 912,
"usage_type": "call"
},
{
"api_name": "sows_events.models.SowFarrow",
"line_number": 912,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 912,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 912,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 917,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 918,
"usage_type": "call"
},
{
"api_name": "django.db.models.Count",
"line_number": 919,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 920,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 924,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 925,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 926,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 928,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 929,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 930,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 930,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 930,
"usage_type": "name"
},
{
"api_name": "core.models.CoreModelManager",
"line_number": 934,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 940,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 940,
"usage_type": "attribute"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 958,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 958,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 961,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 961,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 972,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 972,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 973,
"usage_type": "call"
},
{
"api_name": "piglets.values_list",
"line_number": 981,
"usage_type": "call"
},
{
"api_name": "core.models.CoreModel",
"line_number": 984,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 985,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 985,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 986,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 986,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 987,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 987,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 996,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 996,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 996,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 1001,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 1001,
"usage_type": "name"
},
{
"api_name": "core.models.CoreModelManager",
"line_number": 1005,
"usage_type": "name"
},
{
"api_name": "core.models.CoreModel",
"line_number": 1009,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 1010,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1010,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 1010,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 1011,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1011,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 1011,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 1036,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 1036,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1038,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1038,
"usage_type": "name"
},
{
"api_name": "core.models.CoreModelManager",
"line_number": 1044,
"usage_type": "name"
},
{
"api_name": "django.db.models.F",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1066,
"usage_type": "name"
},
{
"api_name": "core.models.CoreModel",
"line_number": 1069,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 1070,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1070,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 1070,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 1071,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1071,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 1071,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1072,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1072,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1073,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1073,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 1074,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1074,
"usage_type": "name"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 1089,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 1089,
"usage_type": "name"
},
{
"api_name": "sows_events.models.SowFarrow.objects.filter",
"line_number": 1123,
"usage_type": "call"
},
{
"api_name": "sows_events.models.SowFarrow",
"line_number": 1123,
"usage_type": "attribute"
},
{
"api_name": "sows_events.models",
"line_number": 1123,
"usage_type": "name"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 1123,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1125,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1128,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 1129,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 1130,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 1131,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1131,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1134,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1134,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1135,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1135,
"usage_type": "call"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1136,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1136,
"usage_type": "name"
},
{
"api_name": "piglets_events.models.WeighingPiglets.objects.filter",
"line_number": 1141,
"usage_type": "call"
},
{
"api_name": "piglets_events.models",
"line_number": 1141,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.OuterRef",
"line_number": 1142,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1144,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1147,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 1148,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 1149,
"usage_type": "call"
},
{
"api_name": "django.db.models.Subquery",
"line_number": 1150,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1150,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1152,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1152,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1153,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Coalesce",
"line_number": 1154,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 1155,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1156,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1158,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1158,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1162,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1163,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1164,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1166,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1167,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1168,
"usage_type": "call"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1173,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 1173,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1174,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1175,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1175,
"usage_type": "name"
},
{
"api_name": "django.db.models.ExpressionWrapper",
"line_number": 1177,
"usage_type": "call"
},
{
"api_name": "django.db.models.functions.Round",
"line_number": 1177,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 1178,
"usage_type": "call"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1179,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1179,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1184,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1185,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 1186,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1187,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1188,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1189,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1190,
"usage_type": "call"
},
{
"api_name": "django.db.models.Avg",
"line_number": 1191,
"usage_type": "call"
},
{
"api_name": "core.models.CoreModel",
"line_number": 1214,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 1215,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1215,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 1215,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.CharField",
"line_number": 1217,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1217,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1218,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1218,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1219,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1219,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 1221,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1221,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1224,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1224,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1225,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1225,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1226,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1226,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1227,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1227,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1228,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1228,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1229,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1229,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1230,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1230,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1233,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1233,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1234,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1234,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1235,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1235,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1236,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1236,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1237,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1237,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1238,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1238,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1239,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1239,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1243,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1243,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1244,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1244,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1245,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1245,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1246,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1246,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1247,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1247,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1248,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1248,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1249,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1249,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1252,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1252,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1253,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1253,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1254,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1254,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1255,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1255,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1256,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1256,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1257,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1257,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1258,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1258,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1261,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1261,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1262,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1262,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1263,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1263,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 1264,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1264,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1265,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1265,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1266,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1266,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 1267,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 1267,
"usage_type": "name"
}
] |
344846077
|
import stk
import os
from collections import namedtuple
from os.path import join
import numpy as np
from ..._test_utilities import _test_dump_and_load, _compare_with_valid
test_dir = 'cof_topology_tests_output'
if not os.path.exists(test_dir):
os.mkdir(test_dir)
def _alignment(vertex, building_block, vertices, edges):
fg_position = building_block.get_centroid(
atom_ids=building_block.func_groups[0].get_bonder_ids()
)
v1 = stk.normalize_vector(fg_position - vertex.get_position())
def inner(edge_id):
edge_position = edges[edge_id].get_position(vertex, vertices)
v2 = edge_position - vertex.get_position()
return v1 @ stk.normalize_vector(v2)
return inner
def _test_placement(vertex, bb, vertices, edges):
vertex.place_building_block(bb, vertices, edges)
assert np.allclose(
a=bb.get_centroid(bb.get_bonder_ids()),
b=vertex.get_position(),
atol=1e-8
)
aligned = max(
vertex.get_edge_ids(),
key=_alignment(vertex, bb, vertices, edges)
)
vertex_edges = list(vertex.get_edge_ids())
assert aligned == vertex_edges[vertex.get_aligner_edge()]
def _angle(bb, edge, vertex, vertices):
edge_vector = (
edge.get_position(vertex, vertices) -
bb.get_centroid(bb.get_bonder_ids())
)
def inner(fg_id):
fg = bb.func_groups[fg_id]
fg_vector = (
bb.get_centroid(fg.get_bonder_ids()) -
bb.get_centroid(bb.get_bonder_ids())
)
return stk.vector_angle(fg_vector, edge_vector)
return inner
def _test_assignment(vertex, bb, vertices, edges):
assignments = (
vertex.assign_func_groups_to_edges(bb, vertices, edges)
)
vertex_edges = list(vertex.get_edge_ids())
assert assignments[0] == vertex_edges[vertex.get_aligner_edge()]
for edge_id in vertex.get_edge_ids():
closest = min(
range(len(bb.func_groups)),
key=_angle(bb, edges[edge_id], vertex, vertices)
)
assert assignments[closest] == edge_id
if len(bb.func_groups) == 2:
not_aligner = next(
e for e in vertex.get_edge_ids()
if e != vertex_edges[vertex.get_aligner_edge()]
)
assert assignments[1] == not_aligner
def test_vertex(
tmp_amine2,
tmp_aldehyde3,
tmp_aldehyde4_alt2,
tmp_aldehyde6
):
topology_graphs = (
stk.cof.Honeycomb((2, 2, 1)),
stk.cof.Hexagonal((2, 2, 1)),
stk.cof.Square((2, 2, 1)),
stk.cof.Kagome((2, 2, 1)),
stk.cof.LinkerlessHoneycomb((2, 2, 1))
)
building_blocks = {
2: tmp_amine2,
3: tmp_aldehyde3,
4: tmp_aldehyde4_alt2,
6: tmp_aldehyde6
}
for topology_graph in topology_graphs:
vertices = topology_graph.vertices
edges = topology_graph.edges
for vertex in topology_graph.vertices:
bb = building_blocks[vertex.get_num_edges()]
_test_placement(vertex, bb, vertices, edges)
_test_assignment(vertex, bb, vertices, edges)
def _test_construction(
cof,
num_expected_bbs,
num_unreacted_fgs,
periodic
):
is_periodic = '_periodic' if periodic else ''
path = join(
test_dir,
f'{cof.topology_graph.__class__.__name__}{is_periodic}.mol'
)
cof.write(path)
for bb in cof.get_building_blocks():
assert cof.building_block_counter[bb] == num_expected_bbs[bb]
# This test only holds true when each building block is
# involved in every construction bond and the cof is
# periodic.
is_periodic = all(
num == 0 for num in num_unreacted_fgs.values()
)
if len(num_expected_bbs) < 3 and is_periodic:
assert (
len(cof.construction_bonds) ==
cof.building_block_counter[bb] * len(bb.func_groups)
)
# Check that the correct number of bonds got made.
assert (
len(cof.construction_bonds) ==
# For every 2 unreacted functional groups there should be one
# less construction bond.
len(cof.topology_graph.edges)-sum(num_unreacted_fgs.values())/2
)
# Check correct total number of atoms.
num_deleters = sum(
len(fg.deleters)*cof.building_block_counter[bb]
for bb in cof.get_building_blocks()
for fg in bb.func_groups
)
# Remove the deleters from all the uncreated functional groups.
# This assumes that all functional groups in a building block are
# the same.
num_deleters -= sum(
len(bb.func_groups[0].deleters)
for bb in num_unreacted_fgs
for i in range(num_unreacted_fgs[bb])
)
num_bb_atoms = sum(
len(bb.atoms)*cof.building_block_counter[bb]
for bb in cof.get_building_blocks()
)
assert len(cof.atoms) == num_bb_atoms - num_deleters
# Check correct total number of bonds.
num_bb_bonds = sum(
len(bb.bonds)*cof.building_block_counter[bb]
for bb in cof.get_building_blocks()
)
assert (
len(cof.bonds) ==
num_bb_bonds + len(cof.construction_bonds) - num_deleters
)
def test_alignments(amine2_alt3, aldehyde4_alt1, valid_cof_dir):
num_expected_bbs = {
amine2_alt3: 6*9,
aldehyde4_alt1: 3*9
}
periodic_unreacted = {
amine2_alt3: 0,
aldehyde4_alt1: 0
}
island_unreacted = {
amine2_alt3: 11,
aldehyde4_alt1: 11
}
for i in range(4):
for periodic in (True, False):
cof = stk.ConstructedMolecule(
building_blocks=[amine2_alt3, aldehyde4_alt1],
topology_graph=stk.cof.Kagome(
lattice_size=(3, 3, 1),
vertex_alignments={
0: i,
len(stk.cof.Kagome.vertex_data)-1: i % 2
},
periodic=periodic
)
)
kind = '_periodic' if periodic else ''
cof.write(
join(test_dir, f'aligning_{i}_{i%2}{kind}.mol')
)
num_unreacted_fgs = (
periodic_unreacted if periodic else island_unreacted
)
_test_construction(
cof=cof,
num_expected_bbs=num_expected_bbs,
num_unreacted_fgs=num_unreacted_fgs,
periodic=periodic
)
_test_dump_and_load(
test_dir=test_dir,
mol=cof,
name=f'aligning_{i}_{i%2}{kind}'
)
_compare_with_valid(
valid_dir=valid_cof_dir,
mol=cof,
name=f'aligning_{i}_{i%2}{kind}'
)
def test_multi_bb(
amine2,
amine2_alt1,
amine2_alt2,
amine2_alt3,
aldehyde4,
aldehyde4_alt1,
valid_cof_dir
):
building_blocks = [
amine2,
amine2_alt1,
amine2_alt2,
amine2_alt3,
aldehyde4,
aldehyde4_alt1
]
periodic_unreacted = {bb: 0 for bb in building_blocks}
island_unreacted = {
amine2: 0,
amine2_alt1: 0,
amine2_alt2: 3,
amine2_alt3: 8,
aldehyde4: 3,
aldehyde4_alt1: 8
}
for periodic in (True, False):
kagome = stk.cof.Kagome((3, 3, 1), periodic)
di_verts = [
v for v in kagome.vertices if v.get_num_edges() == 2
]
tetra_verts = [
v for v in kagome.vertices if v.get_num_edges() == 4
]
cof = stk.ConstructedMolecule(
building_blocks=building_blocks,
topology_graph=kagome,
building_block_vertices={
amine2: di_verts[:4],
amine2_alt1: di_verts[4:5],
amine2_alt2: di_verts[5:7],
amine2_alt3: di_verts[7:],
aldehyde4: tetra_verts[:2],
aldehyde4_alt1: tetra_verts[2:]
}
)
num_expected_bbs = {
amine2: len(di_verts[:4]),
amine2_alt1: len(di_verts[4:5]),
amine2_alt2: len(di_verts[5:7]),
amine2_alt3: len(di_verts[7:]),
aldehyde4: len(tetra_verts[:2]),
aldehyde4_alt1: len(tetra_verts[2:])
}
kind = '_periodic' if periodic else ''
cof.write(join(test_dir, f'multi_bb{kind}.mol'))
num_unreacted_fgs = (
periodic_unreacted if periodic else island_unreacted
)
_test_construction(
cof=cof,
num_expected_bbs=num_expected_bbs,
num_unreacted_fgs=num_unreacted_fgs,
periodic=periodic
)
_test_dump_and_load(test_dir, cof, f'multi{kind}')
_compare_with_valid(valid_cof_dir, cof, f'multi{kind}')
def test_topologies(
tmp_honeycomb,
tmp_periodic_honeycomb,
tmp_kagome,
tmp_periodic_kagome,
tmp_hexagonal,
tmp_periodic_hexagonal,
tmp_square,
tmp_periodic_square,
tmp_linkerless_honeycomb,
tmp_periodic_linkerless_honeycomb,
valid_cof_dir
):
COFData = namedtuple(
'COFData',
[
'cof',
'num_linkers',
'num_building_blocks',
'num_unreacted_linker_fgs',
'num_unreacted_building_block_fgs',
'periodic'
]
)
cofs = (
COFData(tmp_honeycomb, 3*9, 2*9, 6, 6, False),
COFData(tmp_periodic_honeycomb, 3*9, 2*9, 0, 0, True),
COFData(tmp_kagome, 6*9, 3*9, 11, 11, False),
COFData(tmp_periodic_kagome, 6*9, 3*9, 0, 0, True),
COFData(tmp_hexagonal, 12*9, 4*9, 23, 23, False),
COFData(tmp_periodic_hexagonal, 12*9, 4*9, 0, 0, True),
COFData(tmp_square, 2*9, 1*9, 6, 6, False),
COFData(tmp_periodic_square, 2*9, 1*9, 0, 0, True),
COFData(tmp_linkerless_honeycomb, 1*9, 1*9, 6, 6, False),
COFData(tmp_periodic_linkerless_honeycomb, 9, 9, 0, 0, True)
)
for cof in cofs:
linker, building_block = sorted(
cof.cof.get_building_blocks(),
key=lambda bb: len(bb.func_groups)
)
num_expected_bbs = {
linker: cof.num_linkers,
building_block: cof.num_building_blocks
}
num_unreacted_fgs = {
linker: cof.num_unreacted_linker_fgs,
building_block: cof.num_unreacted_building_block_fgs
}
_test_construction(
cof=cof.cof,
num_expected_bbs=num_expected_bbs,
num_unreacted_fgs=num_unreacted_fgs,
periodic=cof.periodic
)
periodic = '_periodic' if cof.periodic else ''
name = f'{cof.cof.topology_graph.__class__.__name__}{periodic}'
_test_dump_and_load(test_dir, cof.cof, name)
_compare_with_valid(valid_cof_dir, cof.cof, name)
| null |
tests/molecular/topology_graphs/test_cof_topologies.py
|
test_cof_topologies.py
|
py
| 10,920 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.exists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "stk.normalize_vector",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "stk.normalize_vector",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "stk.vector_angle",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "stk.cof.Honeycomb",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "stk.cof.Hexagonal",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "stk.cof.Square",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "stk.cof.Kagome",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "stk.cof.LinkerlessHoneycomb",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "stk.ConstructedMolecule",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "stk.cof.Kagome",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "stk.cof",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "_test_utilities._test_dump_and_load",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "_test_utilities._compare_with_valid",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "stk.cof.Kagome",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "stk.cof",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "stk.ConstructedMolecule",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "_test_utilities._test_dump_and_load",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "_test_utilities._compare_with_valid",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "_test_utilities._test_dump_and_load",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "_test_utilities._compare_with_valid",
"line_number": 355,
"usage_type": "call"
}
] |
446480223
|
from PIL import ImageGrab, ImageOps
import pyautogui
import time
from numpy import *
class Coordinates:
replayBtn = (480, 238)
dino = (246, 405)
boxx = (255, 270)
duckBox = (270, 231)
# 1232 x to check
# 431 lowest point to check
def restartGame():
pyautogui.click(Coordinates.replayBtn)
# pyautogui.keyDown('down')
def duck():
pyautogui.keyDown('down')
time.sleep(x)
print("duck")
pyautogui.keyUp('down')
z = x - .3
x = z + 0
print(x)
def pressSpace():
pyautogui.keyDown('space')
time.sleep(x)
print("Jump")
time.sleep(0.1)
pyautogui.keyUp('space')
z = x - .3
x = z + 0
print(x)
def imageGrab():
# box = (Coordinates.dino[0] + 74, Coordinates.dino[1], Coordinates.dino[0] + 100, Coordinates.dino[1]+30)
box = (Coordinates.boxx[0], Coordinates.boxx[1], Coordinates.boxx[0]+75, Coordinates.boxx[1]+1)
image = ImageGrab.grab(box)
grayImage = ImageOps.grayscale(image)
a = array(grayImage.getcolors())
return a.sum()
def grab2():
dBox = (Coordinates.duckBox[0], Coordinates.duckBox[1], Coordinates.duckBox[0]+75, Coordinates.duckBox[1]+1)
image2 = ImageGrab.grab(dBox)
tGrey = ImageOps.grayscale(image2)
b = array(tGrey.getcolors())
return b.sum()
def main():
# button1()
restartGame()
x = 3
while True:
if imageGrab() != 330:
time.sleep(0)
pressSpace()
time.sleep(0)
if grab2() != 330:
time.sleep(0)
duck()
time.sleep(0)
x = 6-x-.3
print(x)
main()
| null |
dinobot.py
|
dinobot.py
|
py
| 1,627 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyautogui.click",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyautogui.keyDown",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyautogui.keyUp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyautogui.keyDown",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyautogui.keyUp",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab.grab",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PIL.ImageOps.grayscale",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PIL.ImageOps",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "PIL.ImageGrab.grab",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "PIL.ImageOps.grayscale",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "PIL.ImageOps",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 72,
"usage_type": "call"
}
] |
211880163
|
import importlib
import os
import sys
import typing as t
from decimal import Decimal
from functools import wraps
from dpath.util import get
from yaml import dump as dump_yaml
SHRED_DATA_FIELD_NAMES = (
'password',
'secret',
'pass',
'pwd',
'key',
'token',
'auth',
'cred',
)
def shred(key_name: str, value):
key_name = key_name.lower()
need_shred = False
for data_field_name in SHRED_DATA_FIELD_NAMES:
if data_field_name in key_name:
need_shred = True
break
if not need_shred:
return value
return '*' * len(str(value))
def import_from(module: str, name: str):
return getattr(
importlib.import_module(module, [name]),
name
)
def dot_path(obj, path: str, default=None):
"""
Access elements of mixed dict/object by path.
:param obj: object or dict
:param path: path to value
:param default: default value if chain resolve failed
:return: value
"""
path_items = path.split('.')
val = obj
sentinel = object()
for item in path_items:
if isinstance(val, dict):
val = val.get(item, sentinel)
if val is sentinel:
return default
else:
val = getattr(val, item, sentinel)
if val is sentinel:
return default
return val
def dotkey(obj: dict, path: str, default=None, separator='.'):
"""
:param obj: dict like {'some': {'value': 3}}
:param path: 'some.value'
:param separator: '.' | '/'
:param default: default for KeyError
:return: value or default value
"""
try:
return get(obj, path, separator=separator)
except KeyError:
return default
def _materialize_dict(d: t.Dict, separator: str = '.'):
"""
Traverses and transforms a given dict into a tuples of key paths and values.
:param d: a dict to traverse
:param separator: build paths with given separator
:return: yields tuple(materialized_path, value)
>>> list(_materialize_dict({'test': {'path': 1}, 'key': 'val'}, '.'))
>>> [('key', 'val'), ('test.path', 1)]
"""
if not hasattr(d, 'items'):
raise ValueError('Cannot materialize an object with no `items()`: %s' % repr(d))
for path_prefix, v in d.items():
if not isinstance(v, dict):
yield str(path_prefix), v
continue
for nested_path, nested_val in _materialize_dict(v, separator=separator):
yield '{0}{1}{2}'.format(path_prefix, separator, nested_path), nested_val
def materialize_dict(d: dict, separator: str = '.') -> t.List[t.Tuple[str, t.Any]]:
"""
Transforms a given dict into a sorted list of tuples of key paths and values.
:param d: a dict to materialize
:param separator: build paths with given separator
:return: a depth descending and alphabetically ascending sorted list (-deep, asc), longest first
>>> sample = {
>>> 'a': 1,
>>> 'aa': 1,
>>> 'b': {
>>> 'c': 1,
>>> 'b': 1,
>>> 'a': 1,
>>> 'aa': 1,
>>> 'aaa': {
>>> 'a': 1
>>> }
>>> }
>>> }
>>> materialize_dict(sample, '/')
>>> [
>>> ('b/aaa/a', 1),
>>> ('b/a', 1),
>>> ('b/aa', 1),
>>> ('b/b', 1),
>>> ('b/c', 1),
>>> ('a', 1),
>>> ('aa', 1)
>>> ]
"""
def _matkeysort(tup: t.Tuple[str, t.Any]):
return len(tup[0].split(separator))
s1 = sorted(_materialize_dict(d, separator=separator), key=lambda x: x[0])
return sorted(s1, key=_matkeysort, reverse=True)
def mp_serialize_dict(
d: dict,
separator: str = '.',
serialize: t.Optional[t.Callable] = dump_yaml,
value_prefix: str = '::YAML::\n') -> t.List[t.Tuple[str, bytes]]:
"""
:param d: dict to materialize
:param separator: build paths with given separator
:param serialize: method to serialize non-basic types, default is yaml.dump
:param value_prefix: prefix for non-basic serialized types
:return: list of tuples (mat_path, b'value')
"""
md = materialize_dict(d, separator=separator)
res = []
for path, value in md:
# have to serialize values (value should be None or a string / binary data)
if value is None:
pass
elif isinstance(value, str) and value != '':
# check for value != '' used to armor empty string with forced serialization
# since it can be not recognized by a storage backend
pass
elif isinstance(value, bytes):
pass
elif isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, (int, float, Decimal)):
value = str(value)
else:
value = (value_prefix + serialize(value))
if isinstance(value, str):
value = value.encode()
res.append((path, value))
return res
def wf(raw_str, flush=True, prevent_completion_polluting=True):
"""
:param raw_str: Raw string to print.
:param flush: execute sys.stdout.flush
:param prevent_completion_polluting: don't print anything
:return:
"""
if prevent_completion_polluting and len(sys.argv) <= 1:
return
sys.stdout.write(raw_str)
flush and sys.stdout.flush()
def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool:
"""
:param val: ['', 0, 1, true, false, True, False]
:param strict: raise Exception if got anything except ['', 0, 1, true, false, True, False]
:return: True | False
"""
if isinstance(val, bool):
return val
# flag is set
if val == '':
return True
val = str(val).lower()
if val in ['0', '1']:
return bool(int(val))
if val == 'true':
return True
if val == 'false':
return False
if strict:
raise ValueError('Unsupported value for boolean flag: `%s`' % val)
return bool(val)
def env_bool_flag(flag_name: str, strict: bool = False, env: t.Dict = os.environ) -> bool:
"""
Environment boolean checker. Empty string (presence in env) is treat as True.
:param flag_name: 'dockerized'
:param strict: raise Exception if got anything except ['', 0, 1, true, false]
:param env: dict-alike object, ``os.environ`` by default
:return: True | False
"""
sentinel = object()
val = env.get(flag_name, sentinel)
if val is sentinel:
return False
return coerce_str_to_bool(val, strict=strict)
def run_env_once(f):
"""
ENV variables used to prevent running init code twice for manage.py command
(https://stackoverflow.com/questions/16546652/why-does-django-run-everything-twice)
"""
@wraps(f)
def wrapper(*args, **kwargs):
has_run = os.environ.get(wrapper.__name__)
if has_run == '1':
return
result = f(*args, **kwargs)
os.environ[wrapper.__name__] = '1'
return result
return wrapper
def is_dockerized(flag_name: str = 'DOCKERIZED', strict: bool = False):
return env_bool_flag(flag_name, strict=strict)
def is_production(flag_name: str = 'PRODUCTION', strict: bool = False):
return env_bool_flag(flag_name, strict=strict)
| null |
django_docker_helpers/utils.py
|
utils.py
|
py
| 7,366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "importlib.import_module",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "dpath.util.get",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "typing.Callable",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "yaml.dump",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "functools.wraps",
"line_number": 250,
"usage_type": "call"
}
] |
161834532
|
# Use OpenCL To Add Two Random Arrays
import pyopencl as cl # Import the OpenCL GPU computing API
import numpy # Import Numpy number tools
context = cl.create_some_context() # Initialize the Context
queue = cl.CommandQueue(context) # Instantiate a Queue
a = numpy.random.rand(50000).astype(numpy.float32) # Create a random numpy array
b = numpy.random.rand(50000).astype(numpy.float32) # Create a random numpy array
c = numpy.empty_like(a) # Create an empty destination array
a_buffer = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=a)
b_buffer = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=b)
c_buffer = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, b.nbytes)
# Create three buffers (plans for areas of memory on the device)
kernel = """__kernel void sum(__global float* a, __global float* b, __global float* c)
{
int i = get_global_id(0);
c[i] = a[i] + b[i];
}""" # Create a kernel (a string containing C-like OpenCL device code)
program = cl.Program(context, kernel).build()
# Compile the kernel code into an executable OpenCL program
program.sum(queue, a.shape, a_buffer, b_buffer, c_buffer)
# Enqueue the program for execution, causing data to be copied to the device
# - queue: the command queue the program will be sent to
# - a.shape: a tuple of the arrays' dimensions
# - a.buffer, b.buffer, c.buffer: the memory spaces this program deals with
cl.enqueue_read_buffer(queue, c_buffer, c).wait()
# Copy the data for array c back to the host
print("a: {}".format(a))
print("b: {}".format(b))
print("c: {}".format(c))
# Print all three host arrays, to show sum() worked
| null |
020_array_sum.py
|
020_array_sum.py
|
py
| 1,672 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyopencl.create_some_context",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyopencl.CommandQueue",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty_like",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyopencl.Buffer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyopencl.mem_flags",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.Buffer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyopencl.mem_flags",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.Buffer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyopencl.mem_flags",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.Program",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyopencl.enqueue_read_buffer",
"line_number": 33,
"usage_type": "call"
}
] |
111753528
|
#duckHacks 2019 Submission by Matthew De La Paz, Eugene Kozlakov, and David Carpenter
#Team Name: "Threat Level Onyx"
## Imports
from config import TOKEN
from googletrans import Translator
import googletrans
import ast
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
##
bot = commands.Bot(command_prefix='&')
translator = Translator();
## Taken from the bot page
debug = True
def save_settings(settings_dict):
'''
Write all settings to a text file 'settings.txt' in the same format as load_settings
'''
s = open("settings.txt", "r")
str_settings = str(settings_dict)
dict_list = []
for line in s:
try:
dictionary = ast.literal_eval(line)
if dictionary["server_id"] != settings_dict["server_id"]:
dict_list.append(str(dictionary))
except:
pass
s.close()
lines = '\n'.join(dict_list)
settings = open("settings.txt", "w")
for line in lines:
settings.write(line)
settings.write("\n" + str_settings)
settings.close()
def load_settings(server_id):
'''
Read all settings from 'settings.txt'.
settings are in the following format:
{
server_id : num, ID of the server
realtime : boolean, If the bot should translate in realtime
language : string, Language code of the default language of the server
confidence : num Number between 0 and 1, defaults to .5
}
'''
settings = open("settings.txt", "r")
dict_list = [] #will contain dictionaries for each server's settings
for line in settings:
try:
dict_list.append(ast.literal_eval(line))
except:
print("Error in appending dictionary/converting string to dict")
settings.close()
#contains the defaut settings
default_dict = {"server_id":server_id,"realtime":True,"language":"en","confidence":.5}
for dictionary in dict_list:
if server_id in dictionary.values():
return dictionary
f = open("settings.txt","a")
f.write("\n"+str(default_dict))
f.close()
return default_dict
@bot.event
async def on_ready():
print("I'm in")
print(bot.user)
await bot.change_presence(game=discord.Game(name="Type '&help' for commands"))
print(list(bot.servers)[0].id)
@bot.event
async def on_message(message):
if message.content[0] != '&':
settings = load_settings(message.server.id)
if not message.author.bot and is_not_target_language(settings["language"], settings["confidence"], message.content) and settings["realtime"]:
#if the user has a nickname, user it. if not, user their username minus the last 5 characters
if message.author.nick != None :
await bot.send_message(message.channel, str(message.author.nick) + " said:")
else:
user_name = str(message.author)
user_length = len(user_name)
await bot.send_message(message.channel, user_name[:user_length-5] + " said:")
# Translated message could be over 2000 characters. This code allows the message to be sent in that case
# If it is over 2000 characters, sends two messages, with the first 2000 characters and then the rest
print(settings["language"])
transed_msg = translator.translate(message.content.lower(), dest=settings["language"]).text
if (len(transed_msg) > 2000):
await bot.send_message(message.channel, transed_msg[:2000])
await bot.send_message(message.channel, transed_msg[2000:])
else:
await bot.send_message(message.channel, transed_msg)
# Because we want commands AND on_message, this line is neccessary
await bot.process_commands(message)
#!sl <language> command
#determine user permissions
#determine if the language is valid
#write it to the text file
#set variable for language
@bot.command(name= "sl",pass_context=True)
async def _sl(ctx,in_lang):
"""
Sets server language
This command sets the server-wide language. This is the language that anything written on the server will be translated into.
"""
settings = load_settings(ctx.message.server.id)
is_admin = ctx.message.author.server_permissions.administrator
in_lang = in_lang.lower()
if is_admin:
if in_lang in list(googletrans.LANGUAGES.keys()):
in_lang = googletrans.LANGUAGES[in_lang]
await bot.say("Language has been set to " + in_lang + "!")
settings["language"] = googletrans.LANGCODES[in_lang]
save_settings(settings)
elif in_lang in list(googletrans.LANGCODES.keys()):
await bot.say("Language has been set to " + in_lang + "!")
settings["language"] = googletrans.LANGCODES[in_lang]
save_settings(settings)
else:
await bot.say("Invalid language, please use \"&list\" command for a list of valid languages and codes.")
else:
await bot.say("Sorry, you don't have the permissions!")
#!sc <language> command
#determine user permissions
#determine if the tr is valid
#write it to the text file
#set variable for language
@bot.command(name= "list", pass_context=True)
async def _list(ctx):
"""
Lists valid languages and codes.
"""
pretty_languages = ""
for key in googletrans.LANGCODES.keys():
pretty_languages = pretty_languages + googletrans.LANGCODES[key]+ "\t"+ key + "\n"
print(pretty_languages)
await bot.say("List of valid languages:\n" + pretty_languages)
@bot.command(name = "language",pass_context=True)
async def _language(cxt):
"""
Displays current language of server/channel.
"""
server_lang = load_settings(cxt.message.server.id)["language"]
await bot.say("The current language is set to " + googletrans.LANGUAGES[server_lang] + ".")
@bot.command(name = "at") #autotranslate
async def _at(out_lang,*args):
"""
Detects <src> language and translates to <src> language.
format: &at <dest_language_code> <text>
"""
dest = out_lang
to_trans = ""
for i in range(0,len(args)):
to_trans += args[i] + " "
await bot.say(translator.translate(to_trans,dest).text)
@bot.command(name = "mt") #manual translate
async def _mt(in_lang,out_lang,*args):
"""
Translates given text from <src> to <dest> language.
NOTE: Does not detect languages whatsoever! Be carefule with what you input as your <src>!
format: &mt <source language> <destination language> <text>
"""
src = in_lang
dest = out_lang
to_trans = ""
for i in range(0,len(args)):
to_trans += args[i] + " "
await bot.say(translator.translate(to_trans,dest,src).text)
@bot.command(name = "realtime",pass_context=True)
async def _realtime(ctx, arg):
'''
Turns realtime translate on/off given on or off.
'''
is_admin = ctx.message.author.server_permissions.administrator
if is_admin:
settings = load_settings(ctx.message.server.id)
if arg.lower() in ["true","on"]:
realtime = True
settings["realtime"] = True
elif arg.lower() in ["false","off"]:
realtime = False
settings["realtime"] = False
else:
await bot.say("Please use on/true or off/false.")
status = "on" if realtime else "off"
await bot.say("Real-time translation currently " + status + ".")
save_settings(settings)
else:
await bot.say("Sorry, you don't have permissions for that.")
@bot.command(name = "sc",pass_context=True)
async def _sc(ctx, percent):
'''
Sets the acceptable confidence level.
Sets the accepted confidence level between 0 and 1 that Langwave must be before it will present the translated message. Default is 0.5.
'''
is_admin = ctx.message.author.server_permissions.administrator
if is_admin:
settings = load_settings(ctx.message.server.id)
percent = float(percent)
if percent > 1 or percent < 0:
await bot.say("Please enter a number between 0 and 1. (e.g. 0.5)")
else:
confidence_threshold = str(percent)
await bot.say("The accepted confidence level is now " + confidence_threshold)
settings["confidence"] = percent
save_settings(settings)
else:
await bot.say("Sorry, you don't have permissions for that.")
'''
TODO:
//done &help command - display commands // DONE
//done &put !help in the "playing" field //DONE
//done &list - list languages //DONE
//done &sl <language> - set server language //DONE
//done &language - display what language the server is set to //DONE
&realtime <true/false> - enable realtime translating to the server-set language
//done &translate / !t <in-language> <out-language> <text> - translates text from one language to another //DONE
//done &autotranslate / !at <out-language> <text> - detects language of text and translates it to out-language. with no out-language it defaults to server language //DONE
&st <1 >= number >= 0> - Sets the confidence threshold, default is .5
'''
def is_not_target_language(target, confidence_threshold, message):
'''
Given the server-wide chosen language and a user message, returns
whether or not googletrans *thinks* the language is in the target language.
'''
detection = translator.detect(message) #detection object from googletrans
message_lang = str(detection.lang) #language member, two-letter string denoting language
confidence = detection.confidence #confidence level denoting the percent chance that the user's message was detected correctly
print(message_lang, confidence)
return (confidence > confidence_threshold) and (message_lang != target)
bot.run(TOKEN)
| null |
main.py
|
main.py
|
py
| 10,053 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Bot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "googletrans.Translator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "discord.Game",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "googletrans.LANGUAGES.keys",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "googletrans.LANGUAGES",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGUAGES",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGCODES",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGCODES.keys",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "googletrans.LANGCODES",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGCODES",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGCODES.keys",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "googletrans.LANGCODES",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGCODES",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "googletrans.LANGUAGES",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "config.TOKEN",
"line_number": 266,
"usage_type": "argument"
}
] |
608017954
|
'''
Created on Oct 30, 2017
@author: matti
'''
import bclab
import logging
import time
import numpy as np
import base64
import threading
import sys
class StreamA(bclab.Producer, threading.Thread):
def __init__(self, logger, broker_address, broker_port, client_id, n_channels=50, rows_per_message=200, qos=1):
bclab.Producer.__init__(self,logger, broker_address, broker_port, client_id, qos=qos)
threading.Thread.__init__(self)
self.total_rows_sent = 0
self.n_channels=n_channels
self.fq_adj= np.random.randint(50,300,n_channels)
self.rows_per_message=rows_per_message
self.daemon=True
self.start()
def on_arm(self, attr):
chs =[]
for i in range(self.n_channels):
ch = bclab.DataChannel("channel %i" % i, i, -1, 1, "stream1")
chs.append(ch)
self.initialize_data(2000, len(chs), chs)
def run(self):
nxt= time.perf_counter()+1
iv = 0.1
while(True):
status = self.get_status()
if status in [bclab.STATUS_ARMED, bclab.STATUS_TRIGGERED]:
bbuf = np.zeros((self.rows_per_message, self.n_channels), np.float32)
for i in range(0,self.n_channels):
c=0.8*np.sin(np.arange(self.total_rows_sent,self.total_rows_sent+self.rows_per_message)/self.fq_adj[i])+np.random.rand(self.rows_per_message)*0.2
bbuf[:,i]=c
s = base64.b64encode(bbuf).decode("utf-8")
self.stream_data(s, self.total_rows_sent, self.rows_per_message)
self.total_rows_sent += self.rows_per_message
now = time.perf_counter()
d = nxt-now
nxt += iv
if (d>0):
time.sleep(d)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.info('running')
qos = int(sys.argv[1]) if len(sys.argv) > 1 else 1
rows_to_send = int(sys.argv[2]) if len(sys.argv) > 2 else 20000
with StreamA(logging, 'localhost', 1883, "streama", 50, 200, qos ) as producer:
while(producer.total_rows_sent < rows_to_send):
time.sleep(0.5)
logging.info('done')
| null |
py/streama.py
|
streama.py
|
py
| 2,245 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bclab.Producer",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "bclab.Producer.__init__",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bclab.Producer",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "bclab.DataChannel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bclab.STATUS_ARMED",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "bclab.STATUS_TRIGGERED",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 64,
"usage_type": "call"
}
] |
415897232
|
import boto3
from json import loads, dumps
from boto3.dynamodb.conditions import Attr
def insert_policy(policy_name, policy_type, data_json):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("octopus_policy")
item = {"PolicyName": policy_name,
"Type": policy_type,
"Data": dumps(data_json, ensure_ascii=False)}
table.put_item( Item=item )
def delete_policy(policy_name, policy_type):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("octopus_policy")
table.delete_item(Key={"PolicyName":policy_name, "Type":policy_type})
def get_policy_by_name(policy_name):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("octopus_policy")
return table.scan(FilterExpression=Attr("PolicyName").eq(policy_name))['Items']
def get_policies_by_type(policy_type):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("octopus_policy")
content = table.scan(FilterExpression=Attr("Type").eq(policy_type))['Items']
list = []
if content:
list = [ p['PolicyName'] for p in content]
return list
def get_all_policies_by_type(policy_type):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("octopus_policy")
return table.scan(FilterExpression=Attr("Type").eq(policy_type))['Items']
def delete_if_not_found(policy_type, object):
policy_type = policy_type.upper()
# search for all policies of type
policies_db = get_all_policies_by_type(policy_type)
# we do a comparation to find the item, if not found delete
for policy_db in policies_db:
found = False
for policy in object:
if policy_db['Type'] == policy_type:
if "Name" in policy and policy_db['PolicyName'] == policy['Name']:
found = True
break
if "role_name" in policy and policy_db['PolicyName'] == policy['role_name']:
found = True
break
if not found:
delete_policy(policy_db['PolicyName'],policy_type)
def lambda_handler(event,context):
print("Debug:",event)
if event['httpMethod'] == "GET":
if event['resource'] == "/policy/content":
policies_db = get_all_policies_by_type("POLICY")
trusts_db = get_all_policies_by_type("TRUST")
roles_db = get_all_policies_by_type("ROLE")
return {"statusCode":200, "body":dumps({"error":False,
"policies": dumps(policies_db), "trusts": dumps(trusts_db) , "roles": dumps(roles_db) }),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
elif event['resource'] == "/policy/available/iam":
content = get_policies_by_type("IAM")
content.sort()
return {"statusCode":200, "body":dumps({"error":False, "policies":content}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
elif event['resource'] == "/policy/available/trust":
content = get_policies_by_type("TRUST")
content.sort()
return {"statusCode":200, "body":dumps({"error":False, "policies":content}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
elif event['resource'] == "/policy/available/role":
content = get_policies_by_type("ROLE")
content.sort()
return {"statusCode":200, "body":dumps({"error":False, "policies":content}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
elif event['httpMethod'] == "POST" and event['resource'] == "/policy/update":
try:
body = loads(event['body'])
policies = body['policies']
trusts = body['trusts_relationship']
roles = body['roles']
except KeyError:
return {"statusCode":400, "body":dumps({"error":True, "message":"Params invalid"}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
content = "ok"
for policy in policies:
policy_name = policy['Name']
policy_type = "POLICY"
insert_policy(policy_name, policy_type, policy)
for trust in trusts:
policy_name = trust['Name']
policy_type = "TRUST"
insert_policy(policy_name, policy_type, trust)
for role in roles:
policy_name = role['role_name']
policy_type = "ROLE"
insert_policy(policy_name, policy_type, role)
# search for all policies of type POLICY
# we do a comparation to find the item, if not found delete
delete_if_not_found("POLICY", policies)
delete_if_not_found("TRUST" , trusts)
delete_if_not_found("ROLE", roles)
return {"statusCode":200, "body":dumps({"error":False, "policy":content}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
return {"statusCode":200, "body":dumps({"error":False, "message":"ok"}),
"headers":{ "Content-Type":"application/json", "Access-Control-Allow-Origin":"*"}}
| null |
src/lambda_manage_policy.py
|
lambda_manage_policy.py
|
py
| 5,374 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "boto3.dynamodb.conditions.Attr",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "boto3.dynamodb.conditions.Attr",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "boto3.dynamodb.conditions.Attr",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 133,
"usage_type": "call"
}
] |
447032432
|
import os
from discord_slash import SlashCommand
import discord
import keyFinder
from discord_slash.utils.manage_commands import create_option
import json
from urllib.request import Request, urlopen
import random
import InfoDataValues as IDV
try:
from dotenv import load_dotenv
load_dotenv(dotenv_path = os.path.join(os.path.dirname(__file__), '.env'))
except Exception:
print("Unable to load dotenv, reverting to system environment variable") # dotenv is a bitch
TOKEN = os.getenv("TOKEN")
client = discord.Client(intents=discord.Intents.all())
slash = SlashCommand(client, sync_commands=True) # Declares slash commands through the client.
@client.event
async def on_ready():
print('running bot in')
print(os.getcwd())
print('------')
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
await client.change_presence(activity=discord.Game(name="spansh.co.uk"))
@slash.slash(
name="findCarrier",
description="Finds a station or fleet carrier",
options=[
create_option(
name="station",
description="The carrier to find",
option_type=3,
required=True
)
]
)
async def findCarrier(ctx, station):
try:
system_name = keyFinder.findKey("station", station, "system_name", "name") # use ID to get system name
update_time = keyFinder.findKey("station", station, "updated_at", "name")
await ctx.send("Carrier/station " + station + " was last seen in system " + system_name + " at " + update_time ) # send message showing whereabouts of station
except Exception as E:
if "@" in station:
await ctx.send("I'm not falling for that one, " + ctx.message.author.mention)
else:
await ctx.send("Carrier/station " + station + " could not be found. Bear in mind that carriers must be identified by their code, in the format 'XXX-XXX'. \n \n Reason: " + str(E)) # if there are any errors, show this message along with said error
@slash.slash(
name="station",
description="Shows information about a station or fleet carrier",
options=[
create_option(
name="station",
description="The station to display info about",
option_type=3,
required=True
)
]
)
async def station(ctx, station):
try:
foundName = keyFinder.findKey("station", station.lower(), "name", "name")
pruned_station_data = keyFinder.findKeys("station", station.lower(), IDV.station, "name")
formatted_pruned_station_data = []
for key, value in pruned_station_data.items():
formatted_pruned_station_data.append(str(key).replace('id', 'name').replace('_', ' ').capitalize() + ': ' + str(value).capitalize()) # make dictionary look kinda nice and change system id to system name
await ctx.send("**" + foundName.upper() + "** \n \n" + '\n'.join(formatted_pruned_station_data))
except Exception as E:
if str(E) == 'list index out of range':
if "@" in station:
await ctx.send("I'm not falling for that one, " + ctx.message.author.mention)
else:
await ctx.send("Station '" + station + "' does not exist.")
else:
await ctx.send(str(E) + "\n \n Please contact JoshTheB#1572 if this error persists")
@slash.slash(
name="system",
description="Shows information about a system",
options=[
create_option(
name="system",
description="The system to display info about",
option_type=3,
required=True
)
]
)
async def system(ctx, system):
try:
foundName = keyFinder.findKey("system", system.lower(), "name", "name")
pruned_system_data = keyFinder.findKeys("system", system.lower(), IDV.system, "name")
formatted_pruned_system_data = []
for key, value in pruned_system_data.items():
formatted_pruned_system_data.append(str(key).replace('id', 'name').replace('_', ' ').capitalize() + ': ' + str(value).capitalize()) # make dictionary look kinda nice and change system id to system name
await ctx.send("**" + foundName.upper() + "** \n \n" + '\n'.join(formatted_pruned_system_data))
except Exception as E:
if str(E) == 'list index out of range':
if "@" in system:
await ctx.send("I'm not falling for that one, " + ctx.message.author.mention)
else:
await ctx.send("System '" + system + "' does not exist.")
else:
await ctx.send(str(E) + "\n \n Please contact JoshTheB#1572 if this error persists")
@slash.slash(
name="body",
description="Shows information about a body",
options=[
create_option(
name="body",
description="The body to display info about",
option_type=3,
required=True
)
]
)
async def body(ctx, body):
try:
foundName = keyFinder.findKey("body", body.lower(), "name", "name")
pruned_body_data = keyFinder.findKeys("body", body.lower(), IDV.body, "name")
formatted_pruned_body_data = []
for key, value in pruned_body_data.items():
formatted_pruned_body_data.append(str(key).replace('id', 'name').replace('_', ' ').capitalize() + ': ' + str(value).capitalize()) # make dictionary look kinda nice and change body id to body name
if not body.upper() == foundName.upper():
await ctx.send("Could not find body " + body + ", instead found")
await ctx.send("**" + foundName.upper() + "** \n \n" + '\n'.join(formatted_pruned_body_data))
except Exception as E:
if str(E) == 'list index out of range':
if "@" in body:
await ctx.send("I'm not falling for that one, " + ctx.message.author.mention)
else:
await ctx.send("Body '" + body + "' does not exist.")
else:
await ctx.send(str(E) + "\n \n Please contact JoshTheB#1572 if this error persists")
client.run(TOKEN)
| null |
src/main.py
|
main.py
|
py
| 6,268 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "discord.Client",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.Intents.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "discord_slash.SlashCommand",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "discord.Game",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKey",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKey",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord_slash.utils.manage_commands.create_option",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKey",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKeys",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "InfoDataValues.station",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "discord_slash.utils.manage_commands.create_option",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKey",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKeys",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "InfoDataValues.system",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "discord_slash.utils.manage_commands.create_option",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKey",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "keyFinder.findKeys",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "InfoDataValues.body",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "discord_slash.utils.manage_commands.create_option",
"line_number": 128,
"usage_type": "call"
}
] |
239913287
|
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^profiles/$', index_view),
url(r'^login/$',login_view,name='login'),
url(r'^logout/$',logout_view,name='logout'),
url(r'^profiles/$',profiles_view,name='profiles'),
url(r'^my_profile/$',my_profile_view,name='my_profile'),
url(r'^register/$',register_view,name='register'),
url(r'^see/(?P<id_user>.*)/$',see_profile_view,name='see'),
]
| null |
users/urls.py
|
urls.py
|
py
| 422 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
}
] |
44090576
|
from ursina import *
from ursina.prefabs.platformer_controller_2d import PlatformerController2d
app = Ursina()
window.title = 'Platformer'
window.color = color.gold
window.fullscreen = True
player = PlatformerController2d(
position = (-8, 0),
collider = 'box',
collision = True,
color = color.white,
texture='brick'
)
camera.add_script(SmoothFollow(target=player, offset=[0.5, -30], speed=40))
def update():
player.x += held_keys['d'] * time.dt * 2
player.x -= held_keys['a'] * time.dt * 2
def input(key):
if key == 'enter':
player.position = (0, -1)
"""
def draw_world(x = 0, y = 0, columns = 0):
def draw_column(height = 5):
for i in range(height + 1):
tile = Entity(
position(x, y)
model = "quad",
collider = 'box',
scale = (1, 1, 1),
collision = True,
)
draw_world()
"""
counter_2 = -10
for i in range(10):
counter_1 = -11
for i in range(5):
tile = Entity(
model = "quad",
collider = "box",
collision = True,
x = counter_2,
y = counter_1,
)
counter_1 += 1
counter_2 += 1
app.run()
| null |
urs/main.py
|
main.py
|
py
| 1,318 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ursina.prefabs.platformer_controller_2d.PlatformerController2d",
"line_number": 10,
"usage_type": "call"
}
] |
129547043
|
import tkinter
from tkinter import *
from PIL import Image, ImageTk, ImageEnhance
def setupCanvas(imageName):
"Create canvas and load provided image"
try:
global im
im = Image.open(imageName)
canvas.image = ImageTk.PhotoImage(im)
canvas.create_image(0, 0, image=canvas.image, anchor="nw")
except:
print("Image could not be loaded")
def setupButtons():
"Create image processing scales and add to window"
brightnessScale = Scale(root, label="Brightness", orient=HORIZONTAL, from_=0.0, to= 3.0, length = 200, resolution=0.01, command = brightenImage)
brightnessScale.set(1.0)
brightnessScale.pack()
def promptUser():
"Asks user for name of image located in same directory as program"
imageName = input("Please input the name of your image: ")
return imageName
#image processing functions
def brightenImage(val):
enhancer = ImageEnhance.Brightness(im)
canvas.image.paste(enhancer.enhance(float(val)))
#imageName = promptUser()
root = Tk()
root.title("Image Editor")
#create canvas
canvas = Canvas(root, width=1920, height=1080)
canvas.pack()
#load image, create pixel map and add GUI elements
setupCanvas("wallpaper.jpg")
setupButtons()
#start application
root.mainloop()
| null |
ImageEditor.py
|
ImageEditor.py
|
py
| 1,202 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PIL.ImageEnhance.Brightness",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.ImageEnhance",
"line_number": 31,
"usage_type": "name"
}
] |
383830323
|
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import tensorflow as tf
from sklearn.base import ClassifierMixin
from sklearn.linear_model import LogisticRegression
from zenml.core.repo import Repository
from zenml.pipelines import pipeline
from zenml.steps import step
from zenml.steps.base_step_config import BaseStepConfig
from zenml.steps.step_output import Output
class TrainerConfig(BaseStepConfig):
"""Trainer params"""
epochs: int = 1
gamma: float = 0.7
lr: float = 0.001
@step
def importer_mnist() -> Output(
X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray
):
"""Download the MNIST data store it as an artifact"""
(X_train, y_train), (
X_test,
y_test,
) = tf.keras.datasets.mnist.load_data()
return X_train, y_train, X_test, y_test
@step
def normalize_mnist(
X_train: np.ndarray, X_test: np.ndarray
) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):
"""Normalize the values for all the images so they are between 0 and 1"""
X_train_normed = X_train / 255.0
X_test_normed = X_test / 255.0
return X_train_normed, X_test_normed
@step
def tf_trainer(
config: TrainerConfig,
X_train: np.ndarray,
y_train: np.ndarray,
) -> tf.keras.Model:
"""Train a neural net from scratch to recognise MNIST digits return our
model or the learner"""
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(10),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.fit(
X_train,
y_train,
epochs=config.epochs,
)
# write model
return model
@step
def tf_evaluator(
X_test: np.ndarray,
y_test: np.ndarray,
model: tf.keras.Model,
) -> float:
"""Calculate the loss for the model for each epoch in a graph"""
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
return test_acc
@step
def sklearn_trainer(
config: TrainerConfig,
X_train: np.ndarray,
y_train: np.ndarray,
) -> ClassifierMixin:
"""Train SVC from sklearn."""
clf = LogisticRegression(penalty="l1", solver="saga", tol=0.1)
clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)
return clf
@step
def sklearn_evaluator(
X_test: np.ndarray,
y_test: np.ndarray,
model: ClassifierMixin,
) -> float:
"""Calculate accuracy score with classifier."""
test_acc = model.score(X_test.reshape((X_test.shape[0], -1)), y_test)
return test_acc
@pipeline
def mnist_pipeline(
importer,
normalizer,
trainer,
evaluator,
):
# Link all the steps artifacts together
X_train, y_train, X_test, y_test = importer()
X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)
model = trainer(X_train=X_trained_normed, y_train=y_train)
evaluator(X_test=X_test_normed, y_test=y_test, model=model)
# Run the pipeline
# Initialise a pipeline run
tf_p = mnist_pipeline(
importer=importer_mnist(),
normalizer=normalize_mnist(),
trainer=tf_trainer(config=TrainerConfig(epochs=1)),
evaluator=tf_evaluator(),
)
# Run the pipeline
tf_p.run()
# Initialise a new pipeline run
scikit_p = mnist_pipeline(
importer=importer_mnist(),
normalizer=normalize_mnist(),
trainer=sklearn_trainer(config=TrainerConfig()),
evaluator=sklearn_evaluator(),
)
# Run the new pipeline
scikit_p.run()
# Post execution flow
repo = Repository()
p = repo.get_pipeline(pipeline_name="mnist_pipeline")
print(f"Pipeline `mnist_pipeline` has {len(p.runs)} run(s)")
for r in p.runs[0:2]:
eval_step = r.steps[3]
print(
f"For {eval_step.name}, the accuracy is: "
f"{eval_step.output.read():.2f}"
)
| null |
examples/low_level_guide/chapter_4.py
|
chapter_4.py
|
py
| 4,547 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "zenml.steps.base_step_config.BaseStepConfig",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.datasets.mnist.load_data",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "zenml.steps.step",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "zenml.steps.step_output.Output",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "zenml.steps.step",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "zenml.steps.step_output.Output",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "zenml.steps.step",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "zenml.steps.step",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "zenml.steps.step",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "sklearn.base.ClassifierMixin",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "sklearn.base.ClassifierMixin",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "zenml.steps.step",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "zenml.pipelines.pipeline",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "zenml.core.repo.Repository",
"line_number": 164,
"usage_type": "call"
}
] |
117864208
|
from nose.tools import set_trace, eq_
from . import DatabaseTest
from scripts import RedoOCLCForThreeMScript
from core.model import (
Identifier,
DataSource,
CoverageRecord,
)
class DummyCoverageProvider(object):
hit_count = 0
def ensure_coverage(self, identifier):
self.hit_count += 1
class TestRedoOCLCForThreeM(DatabaseTest):
def setup(self):
super(TestRedoOCLCForThreeM, self).setup()
self.script = RedoOCLCForThreeMScript(self._db)
self.edition1, lp = self._edition(
data_source_name = DataSource.THREEM,
identifier_type = Identifier.THREEM_ID,
with_license_pool = True,
title = "Metropolis"
)
self.edition2, lp = self._edition(
data_source_name = DataSource.THREEM,
identifier_type = Identifier.THREEM_ID,
with_license_pool = True,
title = "The ArchAndroid"
)
# Give edition2 a coverage record.
self._coverage_record(self.edition2, self.script.input_data_source)
# Create a control case.
self.edition3, lp = self._edition(
data_source_name = DataSource.THREEM,
identifier_type = Identifier.THREEM_ID,
with_license_pool = True,
title = "The Electric Lady"
)
self._db.commit()
# Remove contributors for the first two editions.
contributions = list(self.edition1.contributions) + list(self.edition2.contributions)
contributors = list(self.edition1.contributors) + list(self.edition2.contributors)
for c in contributions + contributions:
self._db.delete(c)
self._db.commit()
def test_fetch_authorless_threem_identifiers(self):
identifiers = self.script.fetch_authorless_threem_identifiers()
# Both the editions with and without coverage records are selected...
eq_(2, len(identifiers))
# ...while the edition with contributors is not.
assert self.edition3.primary_identifier not in identifiers
def test_delete_coverage_records(self):
oclc = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)
q = self._db.query(CoverageRecord).filter(
CoverageRecord.data_source==oclc
)
coverage_records_before = q.all()
eq_(1, len(coverage_records_before))
eq_(self.edition2.primary_identifier, coverage_records_before[0].identifier)
identifiers = [self.edition1.primary_identifier, self.edition2.primary_identifier]
self.script.delete_coverage_records(identifiers)
coverage_records_after = q.all()
eq_(0, len(coverage_records_after))
def test_ensure_isbn_identifier(self):
self.script.oclc_classify = DummyCoverageProvider()
eq_(0, self.script.oclc_classify.hit_count)
# When there are no equivalent identifiers, both identifiers go to the
# OCLCClassify coverage provider.
identifiers = [self.edition1.primary_identifier, self.edition2.primary_identifier]
self.script.ensure_isbn_identifier(identifiers)
eq_(2, self.script.oclc_classify.hit_count)
# If an edition already has an ISBN identifier it doesn't go to the
# coverage provider.
self.script.oclc_classify.hit_count = 0
self.edition1.primary_identifier.equivalent_to(
DataSource.lookup(self._db, DataSource.GUTENBERG),
self._identifier(identifier_type = Identifier.ISBN), 1
)
self._db.commit()
self.script.ensure_isbn_identifier(identifiers)
eq_(1, self.script.oclc_classify.hit_count)
def test_merge_contributors(self):
oclc_work = self._identifier(identifier_type=Identifier.OCLC_WORK)
oclc_number = self._identifier(identifier_type=Identifier.OCLC_NUMBER)
denzel, ignore = self._contributor("Denzel Washington")
for oclc_id in [oclc_work, oclc_number]:
# Create editions for each OCLC Identifier, give them a contributor,
# and set them equivalent.
edition = self._edition(
data_source_name = self.script.input_data_source.name,
identifier_type = oclc_id.type,
identifier_id = oclc_id.identifier,
title = "King Kong Ain't Got Nothin On Me"
)
[contribution] = edition.contributions
contribution.contributor = denzel
self.edition1.primary_identifier.equivalent_to(
self.script.input_data_source, oclc_id, 1
)
self._db.commit()
eq_(0, len(self.edition1.contributors))
self.script.merge_contributors(self.edition1.primary_identifier)
eq_(1, len(self.edition1.contributors))
eq_(
["Washington, Denzel"],
[x.sort_name for x in self.edition1.contributors]
)
| null |
tests/test_scripts.py
|
test_scripts.py
|
py
| 4,922 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scripts.RedoOCLCForThreeMScript",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "core.model.DataSource.THREEM",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "core.model.DataSource",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "core.model.Identifier.THREEM_ID",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "core.model.DataSource.THREEM",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "core.model.DataSource",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "core.model.Identifier.THREEM_ID",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "core.model.DataSource.THREEM",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "core.model.DataSource",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "core.model.Identifier.THREEM_ID",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "core.model.DataSource.lookup",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "core.model.DataSource",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "core.model.DataSource.OCLC_LINKED_DATA",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "core.model.CoverageRecord",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "core.model.CoverageRecord.data_source",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "core.model.CoverageRecord",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "core.model.DataSource.lookup",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "core.model.DataSource",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "core.model.DataSource.GUTENBERG",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier.ISBN",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "core.model.Identifier.OCLC_WORK",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "core.model.Identifier.OCLC_NUMBER",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "core.model.Identifier",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 119,
"usage_type": "call"
}
] |
10281447
|
# flake8: noqa
from os import environ
from django.core.exceptions import ImproperlyConfigured
from bananas.url import database_conf_from_url
from .base import *
DATABASES = {
'default': database_conf_from_url(environ.get('DATABASE_URI'))
}
WWW_ROOT = environ.get('WWW_ROOT')
if not WWW_ROOT:
raise ImproperlyConfigured('WWW_ROOT is falsey, '
'can\'t continue in a safe manner')
STATIC_ROOT = os.path.join(
WWW_ROOT,
'static'
)
MEDIA_ROOT = os.path.join(
WWW_ROOT,
'media'
)
PUBLISH = {
'redis': {
'host': environ.get('REDIS_HOST', 'localhost')
}
}
ELASTICSEARCH = {
'hosts': [
{'host': environ.get('ELASTICSEARCH_HOST', 'localhost')}
]
}
| null |
src/trak/settings/live.py
|
live.py
|
py
| 734 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bananas.url.database_conf_from_url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ImproperlyConfigured",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "name"
}
] |
62033484
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import yeast_dataset_folded as yemd
from solver_folded import Solver
import tensorflow as tf
tf.random.set_seed(1234)
import numpy as np
np.random.seed(1234)
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--neighbors", dest="num_neighbors", default=10, type=int, help="The number of neighbors to retrieve from the Neareset Neighbors model")
parser.add_argument("-a", "--augmentations", dest="num_augmentations", default=4, type=int, help="The number test-time augmentations to apply on every test sample")
parser.add_argument("-f", "--folds", dest="n_folds", default=10, type=int, help="The number of folds in the K-Fold cross-validation")
args = parser.parse_args()
def get_execute_time(start_time, end_time):
hours, rem = divmod(end_time - start_time, 3600)
minutes, seconds = divmod(rem, 60)
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds))
HOME_PATH = '/home/nivgold'
EPOCHS = 500
YEAST_DIM = 8
start_time = time.time()
yeast_X, yeast_y, yeast_pairs = yemd.get_dataset(HOME_PATH, 32, from_disk=True)
end_train_test = time.time()
print("--- Yeast dataset ready after: ", end='')
get_execute_time(start_time, end_train_test)
dataset_name = 'yeast'
weights_path = '/home/nivgold/models/oversampling_models'
solver_obj = Solver(X=yeast_X, y=yeast_y, n_folds=args.n_folds, dataset_name=dataset_name, epochs=EPOCHS, features_dim=YEAST_DIM, siamese_data=yeast_pairs)
# TRAINING
start_time = time.time()
print("Start training...")
solver_obj.train_folded()
end_training = time.time()
print("---training finished after: ", end='')
get_execute_time(start_time, end_training)
# # saving the trained weights
# solver_obj.save_weights_folded(path=weights_path)
# solver_obj.load_weights_folded(weights_path)
# TEST WITHOUT TTA
start_time = time.time()
print("--- Start baseline testing...")
solver_obj.test_folded()
end_testing = time.time()
print("--- Baseline testing finished after: ", end='')
get_execute_time(start_time, end_testing)
# TEST WITH TTA
num_neighbors = args.num_neighbors
num_augmentations = args.num_augmentations
start_time = time.time()
print(f"--- Start TTA testing with: \t {num_neighbors} neighbors, {num_augmentations} TTA augmentations")
solver_obj.test_tta_folded(num_neighbors=num_neighbors, num_augmentations=num_augmentations)
end_tta_testing = time.time()
print("--- TTA testing finished after: ", end='')
get_execute_time(start_time, end_tta_testing)
solver_obj.print_test_results()
| null |
oversampling/run_yeast_folded.py
|
run_yeast_folded.py
|
py
| 2,548 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.random.set_seed",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "yeast_dataset_folded.get_dataset",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "solver_folded.Solver",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 65,
"usage_type": "call"
}
] |
437687673
|
# Copyright (c) 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.tests.unit.nsx_v3 import test_constants as test_constants_v3
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
from vmware_nsx.tests.unit.nsxlib.v3 import test_client
LOG = log.getLogger(__name__)
class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
def _body(self, qos_marking=None, dscp=None,
description=test_constants_v3.FAKE_NAME):
body = {
"resource_type": "QosSwitchingProfile",
"tags": []
}
if qos_marking:
body = nsxlib._update_dscp_in_args(body, qos_marking, dscp)
body["display_name"] = test_constants_v3.FAKE_NAME
body["description"] = description
return body
def _body_with_shaping(self, shaping_enabled=False,
burst_size=None,
peak_bandwidth=None,
average_bandwidth=None,
description=test_constants_v3.FAKE_NAME,
qos_marking=None,
dscp=0):
body = test_constants_v3.FAKE_QOS_PROFILE
body["display_name"] = test_constants_v3.FAKE_NAME
body["description"] = description
for shaper in body["shaper_configuration"]:
# Neutron currently support only shaping of Egress traffic
if shaper["resource_type"] == "EgressRateShaper":
shaper["enabled"] = shaping_enabled
if burst_size:
shaper["burst_size_bytes"] = burst_size
if peak_bandwidth:
shaper["peak_bandwidth_mbps"] = peak_bandwidth
if average_bandwidth:
shaper["average_bandwidth_mbps"] = average_bandwidth
break
if qos_marking:
body = nsxlib._update_dscp_in_args(body, qos_marking, dscp)
return body
def test_create_qos_switching_profile(self):
"""
Test creating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.create_qos_switching_profile(
tags=[],
name=test_constants_v3.FAKE_NAME,
description=test_constants_v3.FAKE_NAME)
test_client.assert_json_call(
'post', api,
'https://1.2.3.4/api/v1/switching-profiles',
data=jsonutils.dumps(self._body(),
sort_keys=True))
def test_update_qos_switching_profile(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
original_profile = self._body()
new_description = "Test"
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the description of the profile
nsxlib.update_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'],
tags=[],
description=new_description)
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(self._body(description=new_description),
sort_keys=True))
def test_enable_qos_switching_profile_shaping(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
original_profile = self._body_with_shaping()
burst_size = 100
peak_bandwidth = 200
average_bandwidth = 300
qos_marking = "untrusted"
dscp = 10
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the bw shaping of the profile
nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking=qos_marking,
dscp=dscp)
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(
self._body_with_shaping(
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking="untrusted", dscp=10),
sort_keys=True))
def test_disable_qos_switching_profile_shaping(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
burst_size = 100
peak_bandwidth = 200
average_bandwidth = 300
original_profile = self._body_with_shaping(
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking="untrusted",
dscp=10)
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the bw shaping of the profile
nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=False, qos_marking="trusted")
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(
self._body_with_shaping(qos_marking="trusted"),
sort_keys=True))
def test_delete_qos_switching_profile(self):
"""
Test deleting qos-switching-profile
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.delete_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'])
test_client.assert_json_call(
'delete', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'])
| null |
vmware_nsx/tests/unit/nsxlib/v3/test_qos_switching_profile.py
|
test_qos_switching_profile.py
|
py
| 7,353 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "oslo_log.log.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "oslo_log.log",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.nsxlib_testcase.NsxClientTestCase",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.nsxlib_testcase",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3._update_dscp_in_args",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3._update_dscp_in_args",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "vmware_nsx.nsxlib.v3.create_qos_switching_profile",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_NAME",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client.assert_json_call",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "oslo_serialization.jsonutils.dumps",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "oslo_serialization.jsonutils",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "mock.patch.object",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3.client",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3.update_qos_switching_profile",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client.assert_json_call",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "oslo_serialization.jsonutils.dumps",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "oslo_serialization.jsonutils",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "mock.patch.object",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3.client",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3.update_qos_switching_profile_shaping",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client.assert_json_call",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "oslo_serialization.jsonutils.dumps",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "oslo_serialization.jsonutils",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 154,
"usage_type": "argument"
},
{
"api_name": "mock.patch.object",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3.client",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3.update_qos_switching_profile_shaping",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client.assert_json_call",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "oslo_serialization.jsonutils.dumps",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "oslo_serialization.jsonutils",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 185,
"usage_type": "argument"
},
{
"api_name": "vmware_nsx.nsxlib.v3.delete_qos_switching_profile",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.nsxlib.v3",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client.assert_json_call",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "vmware_nsx.tests.unit.nsxlib.v3.test_client",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants.FAKE_QOS_PROFILE",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "vmware_nsx.tests.unit.nsx_v3.test_constants",
"line_number": 193,
"usage_type": "name"
}
] |
146552310
|
#
# Python class file to manage archives
#
# Author: Kyle Manna < kmanna [at] fan [dot] tv >
import re
import os
import shutil
import logging
import tarfile
import subprocess
class ManifestArchive:
def __init__(self, filename):
self.filename = filename
base = os.path.basename(self.filename)
(root, ext) = os.path.splitext(base)
self.ext = ext.lstrip('.')
self.root = re.sub('\.tar.*$', '', root)
def extract(self, dst='.', strip=True):
# Leverage pxz if available
if shutil.which('pxz') and self.ext == 'xz':
logging.debug("Using pxz for compression")
if strip: xform = '--strip 1'
else: xform = ''
# Hand it over to cmd line tools since multiple pipes in python is painful
# and leverage pxz for best performance
cmd_tar = 'pxz -d < {} | tar -xf - {}'.format(self.filename, xform)
logging.debug("cmd: {}".format(cmd_tar))
subprocess.call(cmd_tar, shell=True)
def create(self, source, strip=True):
# Leverage pxz if available
if shutil.which('pxz') and self.ext == 'xz':
logging.debug("Using pxz for compression")
if strip:
xform = '--xform \'s:^{}/:{}/:\''.format(source.lstrip('/'), self.root)
else:
xform = ''
# Hand it over to cmd line tools since multiple pipes in python is painful
# and leverage pxz for best performance
cmd_tar = 'tar -cf - {} {} | pxz > {}'.format(source, xform, self.filename)
logging.debug("cmd: {}".format(cmd_tar))
subprocess.call(cmd_tar, shell=True)
else:
# Failsafe tarball + compression (slow!)
with tarfile.open(self.filename, 'w:{}'.format(self.ext)) as tar:
xform = None
if strip:
xform = self.root
tar.add(source, arcname=xform)
| null |
archives.py
|
archives.py
|
py
| 1,987 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.basename",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "shutil.which",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "shutil.which",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 54,
"usage_type": "call"
}
] |
646375541
|
from django.db.models import Q
from django.test import Client, TestCase
from django.urls import reverse
from unittest.mock import Mock, patch
from app.models import Book
from app.tests.tests_utils import GoogleBookResponse
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.add_url = reverse("db")
self.list_url = reverse("book-list")
self.id = 1
self.detail_url = reverse("book-detail", args=[str(self.id)])
Book.objects.create(
pk=self.id, title="book", authors=["author"], published_date="1999-01-01"
)
def test_book_list_GET_positive(self):
response = self.client.get(self.list_url)
expected_response = {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": self.id,
"title": "book",
"authors": ["author"],
"published_date": "1999-01-01",
"categories": None,
"average_rating": None,
"rating_count": None,
"thumbnail": None,
}
],
}
self.assertEquals(response.status_code, 200)
self.assertJSONEqual(str(response.content, encoding="utf8"), expected_response)
def test_book_list_GET_negative(self):
response = self.client.get(self.list_url)
expected_response = {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": self.id,
"title": "book",
"authors": ["author"],
"published_date": "1999-01-02",
"categories": None,
"average_rating": None,
"rating_count": None,
"thumbnail": None,
}
],
}
self.assertEquals(response.status_code, 200)
self.assertJSONNotEqual(str(response.content, encoding="utf8"), expected_response)
def test_book_detail_GET(self):
response = self.client.get(self.detail_url)
expected_response = {
"id": 1,
"title": "book",
"authors": ["author"],
"published_date": "1999-01-01",
"categories": None,
"average_rating": None,
"rating_count": None,
"thumbnail": None,
}
self.assertEquals(response.status_code, 200)
self.assertJSONEqual(str(response.content, encoding="utf8"), expected_response)
def test_book_add_POST_adds_new_book(self):
posting_object = {"q": "x"}
expected_response = {
"items": [
{
"volumeInfo": {
"title": "book0",
"authors": ["John", "F"],
"publishedDate": "1999-01-01",
}
},
{
"volumeInfo": {
"title": "book1",
"authors": ["Kennedy"],
"publishedDate": "2000-01-01",
}
},
]
}
with patch(
"app.views_utils.requests.get", Mock(return_value=GoogleBookResponse(expected_response))
):
self.client.post(self.add_url, posting_object)
books = Book.objects.filter(Q(title="book0") | Q(title="book1"))
for book in books:
assert book.title in ["book0", "book1"]
def test_book_add_POST_adds_new_book_empty_list(self):
posting_object = {"q": "x"}
expected_response = {"items": None}
with patch(
"app.views_utils.requests.get", Mock(return_value=GoogleBookResponse(expected_response))
):
response = self.client.post(self.add_url, posting_object)
self.assertEquals(response["status"], "books not added")
| null |
app/tests/test_views.py
|
test_views.py
|
py
| 4,065 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "app.models.Book.objects.create",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.models.Book.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "app.models.Book",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "app.tests.tests_utils.GoogleBookResponse",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "app.models.Book.objects.filter",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "app.models.Book.objects",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "app.models.Book",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "app.tests.tests_utils.GoogleBookResponse",
"line_number": 119,
"usage_type": "call"
}
] |
251873108
|
import json
import utils.response as res
from config import dynamodb
def handler(event, context):
try:
# Get input from user.
path_parameters = event.get('pathParameters')
# Activity
activity = path_parameters.get('activity')
# Check if Null.
if (activity is None): raise Exception("Activity not found.")
# Get response from database.
response = dynamodb.get_item(
TableName="Activities",
Key={ 'Identifier': { 'S': activity } }
).get("Item")
# Check item exists
if (response is None): raise Exception("Activity not found.")
# Build Response Object
response = {
'Identifier': activity,
'Cost': response.get('Cost', {}).get('N'),
'Description': response.get('Description', {}).get('S'),
'Host': response.get('Host', {}).get('S'),
'Image': response.get('Image', {}).get('S'),
'Location': response.get('Location', {}).get('S'),
'Name': response.get('Name', {}).get('S'),
'Virtual': response.get('Virtual', {}).get('BOOL'),
'Timestamp': response.get('Timestamp', {}).get('S'),
'Penalty': response.get('Penalty', {}).get('N'),
'Postcode': response.get('Postcode', {}).get('S'),
'Latitude': response.get('Latitude', {}).get('N'),
'Longitude': response.get('Longitude', {}).get('N')
}
return res.build(200, response)
except Exception as e:
print(str(e))
return res.build(400, None)
| null |
functions/activity.py
|
activity.py
|
py
| 1,669 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "config.dynamodb.get_item",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.dynamodb",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "utils.response.build",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "utils.response",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "utils.response.build",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "utils.response",
"line_number": 51,
"usage_type": "name"
}
] |
227745594
|
import argparse
import sys
import os
import pathlib
import tempfile
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
from pacman import init_env
from functools import reduce
from tqdm import tqdm
# Parse argument
parser = argparse.ArgumentParser(description='Training LSM with Q-Learning for balancing cartpole', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', default=1993, type=int, help='random seed')
parser.add_argument('--checkpoint_dir', default=None, help='name of checkpoint directory')
parser.add_argument('--overwrite', dest='overwrite', action='store_true', help='overwrite the existing log file')
# Additional arguments for q-learning
parser.add_argument('--lr', default=2e-4, type=float, help='learning rate')
parser.add_argument('--train_epoches', default=100, type=int, help='number of training epoches')
parser.add_argument('--train_steps', default=5e4, type=float, help='number of training steps per each training epoch')
parser.add_argument('--start_steps', default=1e2, type=float, help='number of training steps to start training')
parser.add_argument('--train_freq', default=1, type=int, help='frequency of training readout layer')
parser.add_argument('--final_eps', default=1e-1, type=float, help='final exploration rate')
parser.add_argument('--gamma', default=0.95, type=float, help='discount factor')
parser.add_argument('--log_freq', default=100, type=int, help='number of game episodes to log training progress')
parser.add_argument('--buffer_size', default=1e6, type=float, help='replay buffer size')
parser.add_argument('--batch_size', default=32, type=int, help='batch of experience replay')
parser.add_argument('--target_network_update_freq', default=1e4, type=float, help='frequency of updating target network')
parser.add_argument('--test', dest='test', action='store_true', help='test model without reward clipping')
parser.add_argument('--test_steps', default=1e3, type=float, help='number of testing steps per each training epoch')
# Additional arguments for liquid
parser.add_argument('--rate_scale', default=0.1, type=float, help='scaling factor of the input maximum firing rate')
parser.add_argument('--t_sim', default=0.100, type=float, help='single example simulation time')
parser.add_argument('--t_prb_start', default=0.000, type=float, help='time to start collecting spike activity')
parser.add_argument('--t_prb_stop', default=0.100, type=float, help='time to stop collecting spike activity')
parser.add_argument('--n_neurons', default=150, type=int, help='total number of LSM neurons')
parser.add_argument('--hidden_size', default=32, type=int, help='number of hidden neurons in readout for dimensional reduction')
parser.add_argument('--warmup_steps', default=100, type=int, help='number of the warmup samples for LSM before actual training')
# Additional argument for Pacman
parser.add_argument('--maze_path', default='./pacman/mazes/smallF3G1C0.maze', help='path to pacman maze')
def main():
# Parse argument
global args
args = parser.parse_args()
# Add extra arguments
args.env_name = 'Pacman'
args.total_steps = int(args.train_epoches*args.train_steps)
args.test_steps = int(args.test_steps)
args.checkpoint_freq = int(args.train_steps)
args.buffer_size = int(args.buffer_size) if args.buffer_size < args.total_steps else args.total_steps
args.exploration_steps = int(0.2*args.total_steps)
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Initialize random seed
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device == 'cuda': torch.cuda.manual_seed_all(args.seed)
# Create environment
env = init_env(maze_path=args.maze_path, save_frame=False, display=False)
if args.test:
test(env)
else:
learn(env)
# Linear scheduler for epsilon-greedy policy
# This class is replicated from https://github.com/openai/baselines/blob/master/baselines/common/schedules.py
class LinearSchedule(object):
def __init__(self, schedule_steps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_steps. After this many steps pass final_p is
returned.
Parameters
----------
schedule_steps: int
Number of steps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_steps = float(schedule_steps)
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_steps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
# Replay buffer for experience replay
# This class is modified from https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1):
data = (obs_t, action, reward, obs_tp1)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1 = [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1 = data
obses_t.append(obs_t)
actions.append(action)
rewards.append(reward)
obses_tp1.append(obs_tp1)
return obses_t, actions, rewards, obses_tp1
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
# Simple non-spiking model for readout layer
class MLP(nn.Module):
def __init__(self, inp_size, hidden_size, outp_size):
super(MLP, self).__init__()
list_m = [
nn.Linear(inp_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, outp_size),
]
self.classifier = nn.Sequential(*list_m)
def forward(self, inp):
outp = inp.view(inp.size(0), -1)
outp = self.classifier(outp)
return outp
# Wrapper for readout layer which is trained with Q-learning
# This class allows easy load and store for trained readout weights for evaluation
class ReadoutWrapper(object):
def __init__(self, network, optimizer, f, criterion=nn.SmoothL1Loss, gamma=1.0, grad_clipping=1, obs_size=0, hidden_size=0, act_size=0):
self.policy_net = network(inp_size=obs_size, hidden_size=hidden_size, outp_size=act_size).to(args.device)
f.write('{}\n'.format(self.policy_net))
self.target_net = network(inp_size=obs_size, hidden_size=hidden_size, outp_size=act_size).to(args.device)
self.target_net.eval()
self.optimizer = optimizer(self.policy_net.parameters(), lr=args.lr, eps=1e-6)
self.criterion = criterion().to(args.device)
self.gamma = gamma
self.grad_clipping = grad_clipping
def load(self, path='./checkpoint.pt'):
if os.path.isfile(path):
checkpoint = torch.load(path,map_location='cpu')
self.policy_net.load_state_dict(checkpoint['policy_net'])
self.target_net.load_state_dict(checkpoint['target_net'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
else:
print('=> No checkpoint found at {}'.format(path))
sys.exit(0)
def save(self, f=sys.stdout, path='./checkpoint.pt'):
state = {
'policy_net' : self.policy_net.state_dict(),
'target_net' : self.target_net.state_dict(),
'optimizer' : self.optimizer.state_dict()
}
torch.save(state, path)
# Function to select an action based on epsilon-greedy policy
def act(obs, readout, act_size, update_eps=0):
if random.random() > update_eps:
with torch.no_grad():
return readout.policy_net(obs).max(1)[1].item()
else:
return random.randrange(act_size)
# Train function
def train(obses_t, actions, rewards, obses_tp1, readout, batch_size):
# Compute mask for non-final states and create batch of inptus
done_obs_mask = torch.tensor(tuple(map(lambda s: s is not None, obses_tp1))).to(args.device)
done_next_obs_mask = torch.cat([s for s in obses_tp1 if s is not None]).to(args.device)
obses_t = torch.cat(obses_t).to(args.device)
actions = torch.cat(actions).to(args.device)
rewards = torch.cat(rewards).to(args.device)
# Compute Q(s_t, a)
state_action_values = readout.policy_net(obses_t).gather(1, actions)
# Compute V(s_{t+1}) for all next states.
next_state_values = torch.zeros(batch_size).to(args.device)
next_state_values[done_obs_mask] = readout.target_net(done_next_obs_mask).max(1)[0].detach()
# Compute expected Q values based on Bellman equation
expected_state_action_values = torch.add(torch.mul(next_state_values,readout.gamma),rewards)
# Compute loss
loss = readout.criterion(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the readout
readout.optimizer.zero_grad()
loss.backward()
for param in readout.policy_net.parameters():
param.grad.data.clamp_(-readout.grad_clipping, readout.grad_clipping)
readout.optimizer.step()
# Update target network function
def update_target(readout):
readout.target_net.load_state_dict(readout.policy_net.state_dict())
# Pre-processing function
def pre_process(obs):
return obs.view(-1)
def learn(env):
# Open log file to write
if args.checkpoint_dir is None:
# Use standard output if checkpoint directory is not specified
f = sys.stdout
else:
# Use checkpoint directory name as training log file
log_name = '{}_train.log'.format(args.checkpoint_dir)
# Do not proceed if training log file exists
if not args.overwrite and os.path.isfile(log_name):
print('==> File {} exists!'.format(log_name))
sys.exit(0)
f = open(log_name, 'w', buffering=1)
# Print all input arguments to training log file
f.write(str(args)+'\n')
# Create liquid model
import model_lsm
model_spike = model_lsm.LSM(f=f,
t_sim=args.t_sim, t_prb=(args.t_prb_start,args.t_prb_stop),
inp_size=int(reduce(lambda x, y: x*y, env.observation_space.shape)), rate_scale=args.rate_scale,
n_neurons=args.n_neurons,
k={'pe':3,'pi':0,'ei':4},
w={'pe':0.6,'pi':0.0,
'ee':0.05,'ei':0.25,
'ie':-0.3,'ii':-0.01})
model_spike.to(args.device)
model_spike.eval()
f.write('{}\n'.format(model_spike))
# Create readout
readout = ReadoutWrapper(f = f,
network = MLP,
optimizer = torch.optim.RMSprop,
criterion = nn.SmoothL1Loss,
gamma = args.gamma,
grad_clipping = 1.0,
obs_size = model_spike.n_e_neurons,
hidden_size = args.hidden_size,
act_size = env.action_space.n
)
# Create the replay buffer
replay_buffer = ReplayBuffer(args.buffer_size)
# Create the schedule for exploration starting from 1.0
exploration = LinearSchedule(schedule_steps=args.exploration_steps, initial_p=1.0, final_p=args.final_eps)
# Initialize the network parameters and copy them to the target network
update_target(readout)
# Start training
with tempfile.TemporaryDirectory() as temp_dir:
# Create directory for saving result
if args.checkpoint_dir is None:
# Use temp directory if checkpoint directory is not specified
args.checkpoint_dir = temp_dir
else:
pathlib.Path(os.path.abspath(args.checkpoint_dir)).mkdir(parents=True, exist_ok=True)
f.write('==> Checkpoint path : {}\n'.format(args.checkpoint_dir))
# Reset the environment
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Warm-up LSM
for t in range(args.warmup_steps):
new_obs, rew, done, _ = env.step(random.randrange(env.action_space.n))
new_obs = pre_process(new_obs)
with torch.no_grad():
sumspike_e = model_spike(new_obs.to(args.device))
new_obs = sumspike_e
new_inp = None if done else new_obs.unsqueeze(0)
obs = new_obs
inp = new_inp
# Reset environment
if done:
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Declare list for storing cumulative rewards for each game episode and episode counter
# Note that game episode is different from training epoch
# Game episode is a duration when agent start playing the game until the game concludes
# Training epoch is a duration when agent is trained and agent state is saved for evaluation
episode_rewards = [0.0]
num_episodes = 1
# Reset the again before start actual training
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
for t in tqdm(range(args.total_steps)):
# Update exploration to the newest value
update_eps = exploration.value(t)
# Take action based on epsilon-policy
action = act(inp, readout, env.action_space.n, update_eps)
new_obs, rew, done, _ = env.step(action)
new_obs = pre_process(new_obs)
with torch.no_grad():
sumspike_e = model_spike(new_obs.to(args.device))
new_obs = sumspike_e
new_inp = None if done else new_obs.unsqueeze(0)
# Track episode reward
episode_rewards[-1] += rew
# Store state transition into replay buffer and move on to next state
replay_buffer.add(inp, torch.tensor([[action]]), torch.tensor([rew],dtype=torch.float), new_inp)
obs = new_obs
inp = new_inp
# Minimize the error in Bellman's equation on a batch sampled from replay buffer
if t > args.start_steps and t % args.train_freq == 0:
obses_t, actions, rewards, obses_tp1 = replay_buffer.sample(args.batch_size)
train(obses_t, actions, rewards, obses_tp1, readout, args.batch_size)
# Update target network periodically
if t > args.start_steps and t % args.target_network_update_freq == 0:
update_target(readout)
# Reset environment
if done:
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Print progress to log file for tracking performance during training
if num_episodes % args.log_freq == 0:
f.write('step {} episode {:.3f} avg_reward {:.3f} max_reward {:.3f} percent_explore {}%\n'.format(t, num_episodes, np.mean(episode_rewards[-args.log_freq:]), np.max(episode_rewards[-args.log_freq:]), int(100*exploration.value(t))))
# Update cumulative reward list and episode counter
episode_rewards.append(0.0)
num_episodes += 1
# Save model for evaluation
if t % args.checkpoint_freq == 0:
readout.save(f, os.path.join(args.checkpoint_dir, '{}_checkpoint_{}.pt'.format(args.env_name, t)))
f.write('=> Save checkpoint at timestep {}\n'.format(t))
# Close training log file
if not args.checkpoint_dir is None:
f.close()
def test(env):
# Read train log file to get saved model path
checkpoint_steps = []
with open('{}_train.log'.format(args.checkpoint_dir), 'r') as f:
for line in f.readlines():
chunks = line.strip().split()
if chunks[0] != '=>': continue
checkpoint_steps.append(chunks[-1])
# Open test log file to write
log_name='{}_test.log'.format(args.checkpoint_dir)
if not args.overwrite and os.path.isfile(log_name):
print('==> File {} exists!'.format(log_name))
sys.exit(0)
f = open(log_name, 'w', buffering=1)
# Crate liquid
import model_lsm
model_spike = model_lsm.LSM(f=f,
t_sim=args.t_sim, t_prb=(args.t_prb_start,args.t_prb_stop),
inp_size=int(reduce(lambda x, y: x*y, env.observation_space.shape)), rate_scale=args.rate_scale,
n_neurons=args.n_neurons,
k={'pe':3,'pi':0,'ei':4},
w={'pe':0.6,'pi':0.0,
'ee':0.05,'ei':0.25,
'ie':-0.3,'ii':-0.01})
model_spike.to(args.device)
model_spike.eval()
f.write('{}\n'.format(model_spike))
# Create readout
readout = ReadoutWrapper(f = f,
network = MLP,
optimizer = torch.optim.RMSprop,
criterion = nn.SmoothL1Loss,
gamma = 0,
grad_clipping = 0,
obs_size = model_spike.n_e_neurons,
hidden_size = args.hidden_size,
act_size = env.action_space.n
)
# Reset the environment
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Warm-up LSM
for t in range(args.warmup_steps):
new_obs, rew, done, _ = env.step(random.randrange(env.action_space.n))
new_obs = pre_process(new_obs)
with torch.no_grad():
sumspike_e = model_spike(new_obs.to(args.device))
new_obs = sumspike_e
new_inp = None if done else new_obs.unsqueeze(0)
obs = new_obs
inp = new_inp
# Reset environment
if done:
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Run actual testing for every checkpoint
for checkpoint_step in tqdm(checkpoint_steps):
# Load readlayer for checkpoint
readout.load(os.path.join(args.checkpoint_dir, '{}_checkpoint_{}.pt'.format(args.env_name, checkpoint_step)))
# Declare list for storing cumulative rewards for each game episode and episode counter
episode_rewards = [0.0]
num_episodes = 1
# Reset environment
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
for t in range(args.test_steps):
# Take action based on epsilon-policy with probability of perform random action = 0.05%
action = act(inp, readout, env.action_space.n, 0.00)
new_obs, rew, done, _ = env.step(action)
new_obs = pre_process(new_obs)
with torch.no_grad():
sumspike_e = model_spike(new_obs.to(args.device))
new_obs = sumspike_e
new_inp = None if done else new_obs.unsqueeze(0)
# Track episode reward
episode_rewards[-1] += rew
# Move on to the next state
obs = new_obs
inp = new_inp
# Reset environment
if done:
obs = env.reset()
obs = pre_process(obs)
with torch.no_grad():
sumspike_e = model_spike(obs.to(args.device))
obs = sumspike_e
inp = obs.unsqueeze(0)
# Update cumulative reward list and episode counter
episode_rewards.append(0.0)
num_episodes += 1
# Print average cumulative reward per episodes to log file for tracking performance
# Drop last value in list of episode_rewards as the episode may not yet complete
f.write('step {} num_episode {} avg_reward {:.3f} max_reward {:.3f}\n'.format(checkpoint_step, num_episodes-1, np.mean(episode_rewards[:-1]), np.max(episode_rewards[:-1])))
# Close testing log file
if not args.checkpoint_dir is None:
f.close()
if __name__ == '__main__':
main()
| null |
run_pacman.py
|
run_pacman.py
|
py
| 22,417 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pacman.init_env",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "torch.nn.SmoothL1Loss",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.add",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.mul",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "model_lsm.LSM",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.SmoothL1Loss",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "model_lsm.LSM",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.SmoothL1Loss",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 478,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 523,
"usage_type": "call"
}
] |
596695232
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
About: SF program for SFC gap time measurements
Email: [email protected]
"""
import binascii
import logging
import multiprocessing, threading
import socket
import struct
import sys
import time
import kodo
import json
from config import SRC_MAC, DST_MAC, BUFFER_SIZE
#from config import CTL_IP, CTL_PORT, NEXT_IP
from config import ingress_iface, egress_iface
from config import SYMBOL_SIZE, GEN_SIZE, coding_mode, chain_position
from config import monitoring_mode, JSONL_FILE_PATH, probing_enabled
from config import DECODER_IP_REWRITE
############
# Config #
############
SRC_MAC_B = binascii.unhexlify(SRC_MAC.replace(':', ''))
DST_MAC_B = binascii.unhexlify(DST_MAC.replace(':', ''))
MAC_LEN = len(DST_MAC_B)
# Header lengths in bytes
ETH_HDL = 14
UDP_HDL = 8
COD_HDL_MAX = 22
#############
# Logging #
#############
fmt_str = '%(asctime)s %(levelname)-8s %(processName)s %(message)s'
level = {
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'ERROR': logging.ERROR
}
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt_str)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level['ERROR'])
#logger.setLevel(level['DEBUG'])
#####################
# Forward Program #
#####################
def bind_raw_sock_pair(in_iface, out_iface):
"""Create and bind raw socket pairs"""
try:
recv_sock = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3)
)
send_sock = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3)
)
except socket.error as error:
logger.error(error)
sys.exit(1)
recv_sock.bind((in_iface, 0))
send_sock.bind((out_iface, 0))
logger.debug('Bind in interface: %s, out interface: %s',
in_iface, out_iface)
return (recv_sock, send_sock)
def calc_ih_cksum(hd_b_arr):
"""Calculate IP header checksum
MARK: To generate a new checksum, the checksum field itself is set to zero
:para hd_b_arr: Bytes array of IP header
:retype: int
"""
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
s = 0
# set checksum field to zero
hd_b_arr[10:12] = struct.pack('>H', 0)
for i in range(0, len(hd_b_arr), 2):
a, b = struct.unpack('>2B', hd_b_arr[i:i + 2])
w = a + (b << 8)
s = carry_around_add(s, w)
return ~s & 0xffff
def forwards_forward(recv_sock, send_sock, factory=None):
"""forwards_forward"""
# Bytes array for a ethernet frame
pack_arr = bytearray(BUFFER_SIZE)
current_generation = 0
encoder = None
decoder = None
if coding_mode == "decode":
decoded_symbols = list()
if monitoring_mode:
time_log = {'encoder': [], 'recoder': [], 'decoder': []}
while True:
pack_len = recv_sock.recv_into(pack_arr, BUFFER_SIZE)
# MARK: Maybe too slow here
recv_time = time.perf_counter()
eth_typ = struct.unpack('>H', pack_arr[12:14])[0]
# IPv4 packet
if eth_typ != 2048:
continue
# Check IP version and calc header length
ver_ihl = struct.unpack('>B', pack_arr[ETH_HDL:ETH_HDL + 1])[0]
ihl = 4 * int(hex(ver_ihl)[-1])
# IP total length
ip_tlen = struct.unpack(
'>H', pack_arr[ETH_HDL + 2:ETH_HDL + 4])[0]
#logger.debug(
# 'Recv a IP packet, header len: %d, total len: %d', ihl,
# ip_tlen)
proto = struct.unpack(
'>B', pack_arr[ETH_HDL + 9:ETH_HDL + 10])[0]
# Check if is UDP packet
if proto != 17:
continue
udp_hd_offset = ETH_HDL+ihl # move to UDP header
source_port = struct.unpack('>H', pack_arr[udp_hd_offset:udp_hd_offset+2])[0]
dest_port = struct.unpack('>H', pack_arr[udp_hd_offset+2:udp_hd_offset+4])[0]
# filter out ctl packets
#if dest_port == CTL_PORT or source_port == CTL_PORT:
# logger.debug("Recv CTL packet. Ignoring.")
# continue
udp_pl_offset = udp_hd_offset + UDP_HDL
# Set checksum to zero
# MARK: If the checksum is cleared to zero, then checksuming is disabled.
pack_arr[udp_hd_offset + 6:udp_hd_offset + 8] = struct.pack('>H', 0)
# UDP payload length
udp_pl_len = struct.unpack(
'>H', pack_arr[udp_hd_offset + 4:udp_hd_offset + 6]
)[0] - UDP_HDL
logger.debug("UDP Payload: %s Bytes", udp_pl_len)
# extract payload
udp_payload = pack_arr[udp_pl_offset:udp_pl_offset+udp_pl_len]
if coding_mode == "encode":
assert chain_position == 0
if len(udp_payload) > SYMBOL_SIZE:
logger.error("Packet too big. Dropping")
continue
logger.debug("Encoding...")
if not encoder:
encoder = factory.build()
encoder.set_const_symbol(encoder.rank(), bytes(udp_payload))
packets_to_send = 1
if encoder.rank() == encoder.symbols():
red_pkts = max(2, int(encoder_info['gen_size']*redundancy/100))
logger.debug("Sending last packet + %d redundancy packets", red_pkts)
packets_to_send += red_pkts
for i in range(packets_to_send):
coded_payload = encoder.write_payload()
logger.debug("Building header...")
coding_header = build_header(**encoder_info,
redundancy=redundancy,
gen_seq=current_generation,
probing=probing_enabled)
coding_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug("Coding time: %d", coding_time)
udp_pl_len = len(coding_header) + len(coded_payload)
if probing_enabled:
udp_pl_len += 2
update_ip_header(pack_arr, ihl, udp_pl_len)
proc_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug('Process time: %d us.', proc_time)
if probing_enabled:
update_header(coding_header, proc_time=proc_time)
logger.debug("Header: %s", coding_header)
udp_payload = coding_header + coded_payload
pack_len = udp_pl_offset+udp_pl_len
pack_arr[udp_pl_offset : pack_len] = udp_payload
if pack_len-ETH_HDL >= 1450:
logger.error("Packet too big: %s. Not sending.", pack_len-ETH_HDL)
continue
pack_arr[0:MAC_LEN] = DST_MAC_B
send_sock.send(pack_arr[0:pack_len])
recv_time = time.perf_counter() # for multiple packets
if encoder.rank() == encoder.symbols():
logger.info("Generation full. Resetting encoder.")
encoder = None
current_generation = (current_generation+1)%4
decoded_symbols = []
logger.debug("Generation sequence number: %s", current_generation)
elif coding_mode == "forward":
coding_header = udp_payload[0:COD_HDL_MAX]
header_info = parse_header(coding_header, get_times=monitoring_mode)
if header_info['hop_log']['invalid']:
logger.debug("Hop log invalid. Dropping packet.")
continue
cod_hdl = header_info['header_size']
logger.debug("Coding header length: %s", cod_hdl)
coding_header = udp_payload[0:cod_hdl]
logger.debug("Forwarding...")
coded_payload = udp_payload[cod_hdl:]
update_header(coding_header, chain_position=chain_position)
udp_pl_len = len(coding_header) + len(coded_payload)
if header_info['probing']:
udp_pl_len += 2
update_ip_header(pack_arr, ihl, udp_pl_len)
proc_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug('Process time: %d us.', proc_time)
if header_info['probing']:
update_header(coding_header, proc_time=proc_time)
udp_payload = coding_header + coded_payload
pack_len = udp_pl_offset+udp_pl_len
pack_arr[udp_pl_offset : pack_len] = udp_payload
if pack_len-ETH_HDL >= 1450:
logger.error("Packet too big: %s. Not sending.", pack_len-ETH_HDL)
continue
pack_arr[0:MAC_LEN] = DST_MAC_B
send_sock.send(pack_arr[0:pack_len])
else:
coding_header = udp_payload[0:COD_HDL_MAX]
header_info = parse_header(coding_header, get_times=monitoring_mode)
if not all(header_info[i] == encoder_info[i] for i in encoder_info):
logger.debug("Header mismatch. Dropping packet.")
continue
if header_info['hop_log']['invalid']:
logger.debug("Hop log invalid. Dropping packet.")
continue
cod_hdl = header_info['header_size']
logger.debug("Coding header length: %s", cod_hdl)
coding_header = udp_payload[0:cod_hdl]
if header_info['gen_seq'] != current_generation:
logger.info("Packet from new generation arrived. Resetting decoder.")
decoder = None
current_generation = header_info['gen_seq']
decoded_symbols = []
logger.debug("Generation sequence number: %s", current_generation)
if not decoder:
decoder = factory.build()
if coding_mode == "recode":
logger.debug("Recoding...")
decoder.read_payload(bytes(udp_payload[cod_hdl:]))
logger.debug("Rank %s", decoder.rank())
packets_to_send = 1
if decoder.rank() == decoder.symbols():
red_pkts = max(2, int(header_info['gen_size']*header_info['redundancy']/100))
logger.debug("Sending last packet + %d redundancy packets", red_pkts)
packets_to_send += red_pkts
for i in range(packets_to_send):
coded_payload = decoder.write_payload()
coding_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug("Coding time: %d", coding_time)
update_header(coding_header, chain_position=chain_position)
udp_pl_len = len(coding_header) + len(coded_payload)
if header_info['probing']:
udp_pl_len += 2
update_ip_header(pack_arr, ihl, udp_pl_len)
proc_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug('Process time: %d us.', proc_time)
if header_info['probing']:
update_header(coding_header, proc_time=proc_time)
udp_payload = coding_header + coded_payload
pack_len = udp_pl_offset+udp_pl_len
pack_arr[udp_pl_offset : pack_len] = udp_payload
if pack_len-ETH_HDL >= 1450:
logger.error("Packet too big: %s. Not sending.", pack_len-ETH_HDL)
continue
pack_arr[0:MAC_LEN] = DST_MAC_B
send_sock.send(pack_arr[0:pack_len])
recv_time = time.perf_counter() # for multiple packets
if decoder.rank() == decoder.symbols():
logger.info("Generation full.")
elif coding_mode == "decode":
logger.debug("Decoding...")
decoder.read_payload(bytes(udp_payload[cod_hdl:]))
if decoder.rank() <= len(decoded_symbols):
if len(decoded_symbols) == decoder.symbols():
logger.debug("Generation already decoded.")
else:
logger.debug("Rank didn't increase. Waiting for more packets")
continue
logger.debug("Rank %s", decoder.rank())
if monitoring_mode and header_info['probing']:
time_log['encoder'].append(header_info['times'][0])
time_log['recoder'].append(header_info['times'][1])
for i in range(GEN_SIZE):
if i not in decoded_symbols and decoder.is_symbol_uncoded(i):
logger.debug("Decoding symbol %s", i)
udp_payload = decoder.copy_from_symbol(i)
udp_pl_len = len(udp_payload)
logger.debug("Payload (%s Bytes): %s", udp_pl_len, udp_payload)
if DECODER_IP_REWRITE:
pack_arr[ETH_HDL+16:ETH_HDL+20] = socket.inet_aton(DECODER_IP_REWRITE)
update_ip_header(pack_arr, ihl, udp_pl_len)
proc_time = int((time.perf_counter()-recv_time)*10**6)
logger.debug('Process time: %d us.', proc_time)
if monitoring_mode:
time_log['decoder'].append(proc_time)
pack_len = udp_pl_offset+udp_pl_len
pack_arr[udp_pl_offset : pack_len] = udp_payload
if pack_len-ETH_HDL >= 1450:
logger.error("Packet too big: %s. Not sending.", pack_len-ETH_HDL)
continue
pack_arr[0:MAC_LEN] = DST_MAC_B
send_sock.send(pack_arr[0:pack_len])
decoded_symbols.append(i)
recv_time = time.perf_counter()
logger.debug("Decoded symbols: %s", decoded_symbols)
if len(decoded_symbols) == decoder.symbols():
logger.info("All packets decoded.")
if monitoring_mode:
logger.debug("Writing time log to %s", JSONL_FILE_PATH)
with open(JSONL_FILE_PATH, 'a+') as jsonl_file:
jsonl_file.write(json.dumps(time_log))
jsonl_file.write('\n')
time_log = {'encoder': [], 'recoder': [], 'decoder': []}
def update_ip_header(pack_arr, ihl, udp_pl_len):
udp_hd_offset = ETH_HDL+ihl
new_udp_tlen = struct.pack(
'>H', (UDP_HDL + udp_pl_len)
)
pack_arr[udp_hd_offset+4 : udp_hd_offset+6] = new_udp_tlen
new_ip_tlen = struct.pack('>H', ihl + UDP_HDL + udp_pl_len)
pack_arr[ETH_HDL+2:ETH_HDL+4] = new_ip_tlen
logger.debug(
'Old IP header checksum: %s',
binascii.hexlify(
pack_arr[ETH_HDL+10 : ETH_HDL+12]
).decode()
)
cksm_start_time = time.perf_counter()
new_iph_cksum = calc_ih_cksum(pack_arr[ETH_HDL : ETH_HDL+ihl])
cksm_time = int((time.perf_counter()-cksm_start_time)*10**6)
logger.debug('New IP header checksum %s, time: %d', hex(new_iph_cksum), cksm_time)
pack_arr[ETH_HDL+10 : ETH_HDL+12] = struct.pack('<H', new_iph_cksum)
def echo_listen(socket):
while True:
payload, (ip, port) = socket.recvfrom(64)
logger.debug('Pong %s', payload)
if payload.startswith(b'PING'):
payload = b"ACK " + payload
socket.sendto(payload, (ip, port))
def test_error_rate(receiver, packet_num, timeout=0.5, wait_time=0.05):
def wait_for_ack():
global received_acks
received_acks = 0
while True:
sock.settimeout(timeout)
try:
reply, sender = sock.recvfrom(64)
if sender == receiver and reply.startswith(b"ACK PING"):
received_acks += 1
except socket.timeout:
pass
if all_sent == True:
return
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
all_sent = False
ack_thread = threading.Thread(target=wait_for_ack)
ack_thread.start()
for i in range(packet_num):
sock.sendto("PING {}".format(i).encode('ascii'), receiver)
time.sleep(wait_time)
all_sent = True
ack_thread.join()
sock.close()
return 1-received_acks/packet_num
def build_header(encoder, field_size, gen_seq, gen_size, symbol_len, redundancy=0, probing=False, proc_time=None):
if probing and proc_time:
header = bytearray(8)
else:
header = bytearray(6)
assert encoder in range(8)
assert field_size in range(4)
assert gen_seq in range(4)
enc_info = encoder<<4 | field_size<<2 | gen_seq
hop_log = 0b10000000
red_prob = redundancy<<1 | bool(probing)
header[0:6] = struct.pack('!BBBBH', enc_info, hop_log, gen_size,
red_prob, symbol_len)
if probing and proc_time:
header[6:8] = struct.pack('!H', proc_time)
return header
def parse_header(header, get_times=False):
hi = dict()
enc_info, hop_log, hi['gen_size'], \
red_prob, hi['symbol_len'] = struct.unpack('!BBBBH', header[0:6])
hi['encoder'] = enc_info>>4 & 0b1111
hi['field_size'] = enc_info>>2 & 0b11
hi['gen_seq'] = enc_info & 0b11
hi['hop_log'] = {'invalid': False, 'total_hops': 0}
for i in range(8):
hi['hop_log'][i] = bool(hop_log>>(7-i) & 0b1)
if hi['hop_log'][i]:
hi['hop_log']['total_hops'] += 1
if i>0 and not hi['hop_log'][i-1]:
hi['hop_log']['invalid'] == True
hi['probing'] = red_prob&0b1
hi['redundancy'] = red_prob>>1
hi['header_size'] = 6
if hi['probing']:
hi['header_size'] += 2*hi['hop_log']['total_hops']
if get_times:
pattern = '!'+'H'*hi['hop_log']['total_hops']
hi['times'] = struct.unpack(pattern, header[6:hi['header_size']])
return hi
def update_header(header, gen_seq=None, chain_position=None, proc_time=None):
if gen_seq != None:
assert gen_seq in range(4)
[first_byte] = struct.unpack('!B', header[:1])
first_byte = first_byte&0b11111100 | gen_seq
header[:1] = struct.pack('!B', first_byte)
if chain_position != None:
assert chain_position in range(8)
[hop_log] = struct.unpack('!B', header[1:2])
hop_log |= 0b1<<(7-chain_position)
header[1:2] = struct.pack('!B', hop_log)
if proc_time != None:
header.extend(struct.pack('!H', proc_time))
return header
def convert_encoder(kodo_object):
if not kodo_object:
return {'encoder': 0, 'field_size':0, 'symbol_len':0, 'gen_size':0}
name = type(kodo_object).__name__
available_encoders = {
'Fulcrum': 1,
'FullVector': 2,
'NoCode': 3,
'OnTheFly': 4,
'Perpetual': 5,
'SlidingWindow': 6,
'SparseFullVector': 7
}
available_sizes = {'4':1, '8':2, '16':3}
if name[-1] in available_sizes:
field_size = available_sizes[name[-1]]
name = name[:-1]
else:
field_size = 0
[encoder] = [available_encoders[i] for i in available_encoders if name.startswith(i)]
result = {'encoder': encoder, 'field_size': field_size}
result['symbol_len'] = kodo_object.symbol_size()
result['gen_size'] = kodo_object.symbols()
return result
if __name__ == "__main__":
if len(sys.argv) > 2:
encoder_decoder = sys.argv[1]
SYMBOL_SIZE = int(sys.argv[2])
if coding_mode == "encode":
if encoder_decoder == "FullVector":
fw_fac = kodo.FullVectorEncoderFactoryBinary(GEN_SIZE, SYMBOL_SIZE)
elif encoder_decoder == "SlidingWindow":
fw_fac = kodo.SlidingWindowEncoderFactoryBinary(GEN_SIZE, SYMBOL_SIZE)
redundancy = 10
elif coding_mode in ("decode", "recode") :
if encoder_decoder == "FullVector":
fw_fac = kodo.FullVectorDecoderFactoryBinary(GEN_SIZE, SYMBOL_SIZE)
elif encoder_decoder == "SlidingWindow":
fw_fac = kodo.SlidingWindowDecoderFactoryBinary(GEN_SIZE, SYMBOL_SIZE)
else:
fw_fac = None
encoder_info = convert_encoder(fw_fac)
JSONL_FILE_PATH = "pd_{}_{}_{}.jsonl".format(encoder_decoder, SYMBOL_SIZE, GEN_SIZE)
# Bind sockets and start forwards and backwards processes
recv_sock, send_sock = bind_raw_sock_pair(ingress_iface, egress_iface)
fw_proc = multiprocessing.Process(target=forwards_forward,
args=(recv_sock, send_sock, fw_fac))
fw_proc.start()
# Send a ready packet to SFC manager
#ctl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#ctl_sock.bind(('', CTL_PORT))
#echo_proc = multiprocessing.Process(target=echo_listen, args=(ctl_sock,))
#echo_proc.start()
#error_rate = test_error_rate((NEXT_IP, CTL_PORT), 50)
#logger.debug("Error rate: {}".format(error_rate))
#ctl_sock.sendto(b'ready', (CTL_IP, CTL_PORT))
fw_proc.join()
#echo_proc.join()
| null |
sfc-ostack/demo/sfp/gap_sf_nc.py
|
gap_sf_nc.py
|
py
| 22,269 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "binascii.unhexlify",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "config.SRC_MAC.replace",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "config.SRC_MAC",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "binascii.unhexlify",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "config.DST_MAC.replace",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "config.DST_MAC",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "logging.ERROR",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "socket.AF_PACKET",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_RAW",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "socket.htons",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "socket.AF_PACKET",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_RAW",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "socket.htons",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "socket.error",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "config.BUFFER_SIZE",
"line_number": 114,
"usage_type": "argument"
},
{
"api_name": "config.coding_mode",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "config.monitoring_mode",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "config.BUFFER_SIZE",
"line_number": 124,
"usage_type": "argument"
},
{
"api_name": "time.perf_counter",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "config.coding_mode",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "config.chain_position",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "config.probing_enabled",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "config.probing_enabled",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "config.probing_enabled",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "config.coding_mode",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "config.monitoring_mode",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "config.chain_position",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "config.monitoring_mode",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "config.coding_mode",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "config.chain_position",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "config.coding_mode",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "config.monitoring_mode",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 352,
"usage_type": "argument"
},
{
"api_name": "config.DECODER_IP_REWRITE",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "socket.inet_aton",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "config.DECODER_IP_REWRITE",
"line_number": 361,
"usage_type": "argument"
},
{
"api_name": "time.perf_counter",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "config.monitoring_mode",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "config.monitoring_mode",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "config.JSONL_FILE_PATH",
"line_number": 386,
"usage_type": "argument"
},
{
"api_name": "config.JSONL_FILE_PATH",
"line_number": 387,
"usage_type": "argument"
},
{
"api_name": "json.dumps",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "binascii.hexlify",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "socket.recvfrom",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "socket.sendto",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "socket.timeout",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "config.chain_position",
"line_number": 509,
"usage_type": "name"
},
{
"api_name": "config.chain_position",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "struct.unpack",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "config.chain_position",
"line_number": 512,
"usage_type": "name"
},
{
"api_name": "struct.pack",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 553,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 554,
"usage_type": "attribute"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 555,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 555,
"usage_type": "attribute"
},
{
"api_name": "config.coding_mode",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "kodo.FullVectorEncoderFactoryBinary",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 559,
"usage_type": "argument"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 559,
"usage_type": "argument"
},
{
"api_name": "kodo.SlidingWindowEncoderFactoryBinary",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 561,
"usage_type": "argument"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 561,
"usage_type": "argument"
},
{
"api_name": "config.coding_mode",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "kodo.FullVectorDecoderFactoryBinary",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 565,
"usage_type": "argument"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 565,
"usage_type": "argument"
},
{
"api_name": "kodo.SlidingWindowDecoderFactoryBinary",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 567,
"usage_type": "argument"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 567,
"usage_type": "argument"
},
{
"api_name": "config.JSONL_FILE_PATH",
"line_number": 573,
"usage_type": "name"
},
{
"api_name": "config.SYMBOL_SIZE",
"line_number": 573,
"usage_type": "argument"
},
{
"api_name": "config.GEN_SIZE",
"line_number": 573,
"usage_type": "argument"
},
{
"api_name": "config.ingress_iface",
"line_number": 576,
"usage_type": "argument"
},
{
"api_name": "config.egress_iface",
"line_number": 576,
"usage_type": "argument"
},
{
"api_name": "multiprocessing.Process",
"line_number": 577,
"usage_type": "call"
}
] |
453209211
|
from django.shortcuts import render
from .escenas_youtube import escenas as escenas_youtube
from pulp.settings import STATIC_DIR
import os
from django.http import HttpResponse
from PIL import Image
n = len(escenas_youtube)
escenas = {
'col1': escenas_youtube[:round(n/3)],
'col2': escenas_youtube[round(n/3):round(2*n/3)+1],
'col3': escenas_youtube[round(2*n/3)+1:],
}
escenas_dict = {e[0]:e[1] for e in escenas_youtube}
# Create your views here.
def archivos(request):
dict_out = {'escenas': escenas}
return render(request,'website/archivos.html',context=dict_out)
def get_foto(request, imgtype, imgfolder, imgname):
imgdir = os.path.join(STATIC_DIR, imgtype, imgfolder, imgname)
image_data = open(imgdir, "rb").read()
return HttpResponse(image_data, content_type="image/jpg")
def fotos(request):
dict_out = {'escenas': escenas}
tmbdir = os.path.join(STATIC_DIR, 'thumbnails')
imgnewdir = os.path.join(STATIC_DIR, 'img_new')
## folders and images
folders_img = []
for folder in os.listdir(tmbdir):
if os.path.isdir(tmbdir+'/'+folder):
imgs = []
for i in os.listdir(imgnewdir+'/'+folder):
image = Image.open(imgnewdir+'/'+folder+'/'+i)
imgs.append((i, image.width, image.height))
folders_img.append((folder, imgs))
dict_out['folders'] = folders_img
# print(dict_out['folders'])
return render(request,'website/fotos.html',context=dict_out)
def home(request):
dict_out = {'escenas': escenas}
return render(request,'website/inicio.html',context=dict_out)
def inicio(request):
dict_out = {'escenas': escenas}
return render(request,'website/inicio.html',context=dict_out)
def video1(request):
dict_out = {'escenas': escenas}
return render(request,'website/video1.html',context=dict_out)
def video2(request):
dict_out = {'escenas': escenas}
return render(request,'website/video2.html',context=dict_out)
def escena(request, escena_id):
dict_out = {'escenas': escenas, 'escena_id':escena_id, 'youtube':escenas_dict[escena_id]}
return render(request, 'website/escenas.html', context=dict_out)
| null |
website/views.py
|
views.py
|
py
| 2,187 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "escenas_youtube.escenas",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "escenas_youtube.escenas",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "escenas_youtube.escenas",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "escenas_youtube.escenas",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "escenas_youtube.escenas",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pulp.settings.STATIC_DIR",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pulp.settings.STATIC_DIR",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pulp.settings.STATIC_DIR",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 67,
"usage_type": "call"
}
] |
46425629
|
# -*- coding: utf-8 -*-
"""Reads logs from a BigQuery table."""
import tempfile
from typing import Optional
from google.auth import exceptions as google_auth_exceptions
from google.cloud import bigquery # type: ignore
import google.cloud.exceptions
from dftimewolf.lib import module
from dftimewolf.lib.containers import containers
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.state import DFTimewolfState
class BigQueryCollector(module.BaseModule):
"""Collector for BigQuery."""
def __init__(self,
state: DFTimewolfState,
name: Optional[str] = None,
critical: bool = False) -> None:
"""Initializes a GCP logs collector."""
super(BigQueryCollector, self).__init__(state, name=name, critical=critical)
self._project_name = ""
self._query = ""
self._description = ""
# pylint: disable=arguments-differ
def SetUp(self, project_name: str, query: str, description: str) -> None:
"""Sets up a BigQuery collector.
Args:
project_name (str): name of the project that contains the BigQuery tables.
query (str): The query to run.
description (str): A description of the query.
"""
self._project_name = project_name
self._query = query
self._description = description
def Process(self) -> None:
"""Collects data from BigQuery."""
output_file = tempfile.NamedTemporaryFile(
mode="w", delete=False, encoding="utf-8", suffix=".jsonl")
output_path = output_file.name
self.logger.info("Downloading results to {0:s}".format(output_path))
try:
if self._project_name:
bq_client = bigquery.Client(project=self._project_name)
else:
bq_client = bigquery.Client()
records = bq_client.query(self._query).to_dataframe().to_json(
orient="records", lines=True, date_format="iso")
output_file.write(records)
# pytype: disable=module-attr
except google.cloud.exceptions.NotFound as exception:
self.ModuleError(f"Error accessing project: {exception!s}",
critical=True)
# pytype: enable=module-attr
except (google_auth_exceptions.DefaultCredentialsError) as exception:
self.ModuleError(
"Something is wrong with your gcloud access token or "
"Application Default Credentials. Try running:\n "
"$ gcloud auth application-default login"
)
self.ModuleError(exception, critical=True)
self.PublishMessage(f'Downloaded logs to {output_path}')
output_file.close()
bq_report = containers.File(name=self._description, path=output_path)
self.state.StoreContainer(bq_report)
modules_manager.ModulesManager.RegisterModule(BigQueryCollector)
| null |
dftimewolf/lib/collectors/bigquery.py
|
bigquery.py
|
py
| 2,737 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dftimewolf.lib.module.BaseModule",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "dftimewolf.lib.module",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dftimewolf.lib.state.DFTimewolfState",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "google.auth.cloud",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "google.auth",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "google.auth.exceptions.DefaultCredentialsError",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "google.auth.exceptions",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "dftimewolf.lib.containers.containers.File",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "dftimewolf.lib.containers.containers",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "dftimewolf.lib.modules.manager.ModulesManager.RegisterModule",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "dftimewolf.lib.modules.manager.ModulesManager",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "dftimewolf.lib.modules.manager",
"line_number": 80,
"usage_type": "name"
}
] |
300466291
|
import cv2
import numpy as np
img = cv2.imread('images/gshock.jpg')
lap = cv2.Laplacian(img, cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
edge = cv2.Canny(img, 100, 200)
cv2.imshow('ori', img)
cv2.imshow('lap', lap)
cv2.imshow('sx', sobelx)
cv2.imshow('sy', sobely)
cv2.imshow('edge', edge)
cv2.waitKey(0)
cv2.destroyAllWindows()
| null |
CV/opencv/edge.py
|
edge.py
|
py
| 402 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.Laplacian",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.Sobel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.Sobel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.Canny",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 17,
"usage_type": "call"
}
] |
259982956
|
from bottle import route,run,template,static_file
from markdown_converter import MarkdownConverter
from env import *
import posixpath
converter = MarkdownConverter()
@route('/<resource>')
def gfmize(resource):
html_file_name = markdown_root + resource
if (posixpath.splitext(resource)[1] == '.md'):
html_file_name = converter.convert(resource)
return static_file(html_file_name, root=ms_root)
def main():
run(host=ms_host,port=ms_port,debug=ms_debug,reloader=ms_reloader)
if __name__ == '__main__':
main()
| null |
markdownserver/__init__.py
|
__init__.py
|
py
| 537 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "markdown_converter.MarkdownConverter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "posixpath.splitext",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bottle.static_file",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bottle.route",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bottle.run",
"line_number": 16,
"usage_type": "call"
}
] |
229022069
|
import cv2
import numpy as np
import numba
from numba import cuda
from pypylon import pylon
import time
import os
KEYS = [ 'UL','UR', 'DL', 'DR']
#_______________________________________________________________________________________________________________________________________________
# this Class Yield Frame of Camera
#_______________________________________________________________________________________________________________________________________________
class cameraGraber:
def __init__(self, idx):
tlFactory = pylon.TlFactory.GetInstance()
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
else:
print('Numbers Of Camera:', len( devices ))
#cameras = pylon.InstantCameraArray(len(devices))
self.camera = pylon.InstantCamera()
self.camera.Attach(tlFactory.CreateDevice(devices[idx]))
self.camera.StartGrabbing()#pylon.GrabStrategy_LatestImageOnly)
self.converter = pylon.ImageFormatConverter()
#--------------------------------------5-----------------------------------------------
self.camera.GainAuto.SetValue('Off')
self.camera.GainSelector.SetValue('All')
self.camera.GainRaw.SetValue(100)
self.camera.GammaSelector.SetValue('User')
#cameras['ul'].camera.BlackLevelSelector.SetValue('All')
#self.camera.LightSourceSelector.SetValue('Daylight 6500 Kelvin')
self.camera.BalanceWhiteAuto.SetValue('Off')
self.camera.ColorAdjustmentEnable.SetValue(False)
self.camera.ExposureAuto.SetValue('Off')
self.camera.ExposureTimeRaw.SetValue(1000)
self.camera.ExposureTimeAbs.SetValue(1000)
#self.camera.GammaSelect #user
#self.camera.BalanceRatioRaw.SetValue(130)
#self.camera.LightSourceSelector.Set(4)
#self.camera.TriggerMode.SetValue(1)
#self.camera.AutoExposureTimeAbsLowerLimit.SetValue(0)
#self.camera.BalanceRatioRaw.SetValue(135)
#self.camera.ExposureTimeRaw.SetValue(3000)
#self.camera.GammaSelector.SetIntValue(1)
#self.camera.GainRaw.SetValue(158)
#self.camera.DigitalShift.SetValue(3)
# converting to opencv bgr format
#cameras['ul'].camera.GainRaw.SetValue(80)
self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed
self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def grabber( self, rotate=None ):
i=0
while self.camera.IsGrabbing():
grabResult = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
# Access the image data
image = self.converter.Convert(grabResult)
img = image.GetArray()
#if rotate != None:
# img = cv2.rotate( img, rotate )
yield img
#________________________________________________________________________________________________________________________________________
# Main
#________________________________________________________________________________________________________________________________________
def getCamerasDict( ul=0, ur=0, dl=0, dr=0):
cams={}
delay = 0.2
cams['UL'] = cameraGraber(ul)
time.sleep(delay)
cams['UR'] = cameraGraber(ur)
time.sleep(delay)
cams['DL'] = cameraGraber(dl)
time.sleep(delay)
cams['DR'] = cameraGraber(dr)
time.sleep(delay)
return cams
#________________________________________________________________________________________________________________________________________
# Main
#________________________________________________________________________________________________________________________________________
def chessPerspectiveCalib(camera, cam_pos, chess_size = (9,6), chess_home_size = 30, border = 2):
while True:
img = next(camera.grabber(rotate=None))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, chess_size,None)
if not ret:
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5))
cv2.waitKey(100)
continue
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners= cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
corners = corners.reshape((-1,2))
org_pts = []
if cam_pos == 'UL':
org_pts.append( corners[0] )
org_pts.append( corners[chess_size[0] - 1] )
org_pts.append( corners[chess_size[0]*chess_size[1] - chess_size[0]] )
org_pts.append( corners[-1] )
for num,corner in enumerate(org_pts):
cv2.circle( img, tuple(corner), 5, (0,0,255), thickness=-1)
cv2.putText( img , str(num), tuple(corner), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (0,0,255), thickness=2)
cv2.imshow('image', cv2.resize( img, None, fx=0.5, fy=0.5))
key = cv2.waitKey(0)
if key==ord('n'):
continue
org_pts = np.array( org_pts, dtype = np.float32)
dst_pts = np.array( [[ border * chess_home_size, border * chess_home_size],
[(border + chess_size[0]-1)* chess_home_size , border * chess_home_size],
[ border * chess_home_size, (border + chess_size[1]-1)* chess_home_size ],
[(border + chess_size[0]-1)* chess_home_size, (border + chess_size[1]-1)* chess_home_size]]).astype(np.float32 )
M = cv2.getPerspectiveTransform( org_pts, dst_pts)
#dst_h = (2*border + chess_size[1]-1)* chess_home_size
#dst_w = (2*border + chess_size[0]-1)* chess_home_size
dst_w, dst_h = (2*border + chess_size[0]-1)* chess_home_size, (2*border + chess_size[1]-1)* chess_home_size
while True:
dst = cv2.warpPerspective( img, M, (dst_w, dst_h))
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5 ))
cv2.imshow('dst', cv2.resize(dst, None, fx=0.5, fy=0.5 ))
key = cv2.waitKey(50)
if key == ord('n') or key == 27 or key == ord('y'):
cv2.destroyAllWindows()
break
if key == 27 or key == ord('y'):
break
class calibration_perspective():
def __init__(self, path, chess_size = (9,6), chess_home_size = 30, border = 2, cam_positions = ['UL', 'UR', 'DL', 'DR'] ):
self.chess_size = chess_size
self.chess_home_size = chess_home_size
self.chess_home_size_px = self.chess_home_size
self.border = border
self.cam_positions = cam_positions
self.path = path
self.mtx= {}
self.dst_size=[]
self.px2mm = 1
def calc_accuracy(self, cameras, debug=True):
distances = []
for cam_pose,cam in cameras.items():
while True:
img = next( cam.grabber() )
#img = cv2.imread('data/cal_img/cal_1.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
print('Search For Chess')
ret, corners = cv2.findChessboardCorners(gray, self.chess_size,None)
if debug:
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5))
cv2.waitKey(10)
if not ret:
print('Not Find')
continue
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners= cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
corners = corners.reshape((-1,2))
for i in range(len( corners) - self.chess_size[0]):
pt1 = corners[i]
pt2 = corners[i+1]
pt3 = corners[i+self.chess_size[0]]
l1 = int( ((pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2)**0.5 )
l2 = int( ((pt1[0] - pt3[0])**2 + (pt1[1] - pt3[1])**2)**0.5 )
distances.append(l1)
distances.append(l2)
if debug:
res = np.copy(img)
cv2.circle( res, tuple(pt1), 2, (0,0,255), thickness=-1)
cv2.circle( res, tuple(pt2), 2, (0,0,255), thickness=-1)
cv2.circle( res, tuple(pt3), 2, (0,0,255), thickness=-1)
cv2.putText( res , str(l1), tuple(((pt1 + pt2)/2 + (-10,-10)).astype(np.int32)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, (0,0,255), thickness=2)
cv2.putText( res , str(l2), tuple(((pt1 + pt3)/2 + (-20,5)).astype(np.int32)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, (0,0,255), thickness=2)
cv2.imshow('image', cv2.resize( res, None, fx=0.75, fy=0.75))
key = cv2.waitKey(10)
if key == 27:
break
break
distances = np.array([distances])
if debug:
cv2.destroyAllWindows()
print( 'min px size: ', distances.min())
print( 'accuracy: ',self.chess_home_size / distances.min(),'mm')
self.chess_home_size_px = distances.min()
self.px2mm = self.chess_home_size / distances.min()
#np.savenp.array([self.chess_home_size / distances.min()])
return distances.min(), self.chess_home_size / distances.min()
def calc_calibration(self, cameras, debug=True):
for cam_pos,cam in cameras.items():
while True:
img = next( cam.grabber() )
#img = cv2.imread('data/cal_img/cal_1.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.chess_size,None)
#----------------------------------
if debug:
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5))
cv2.waitKey(10)
if not(ret):
continue
cv2.destroyAllWindows()
#----------------------------------
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners= cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
corners = corners.reshape((-1,2))
#----------------------------------
if debug:
img_ = np.copy(img)
for num,corner in enumerate(corners):
cv2.circle( img_, tuple(corner), 5, (0,0,255), thickness=-1)
cv2.putText( img_ , str(num), tuple(corner), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), thickness=2)
cv2.imshow('image_all_corners_' + cam_pos, cv2.resize( img_, None, fx=0.5, fy=0.5))
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key==ord('n'):
continue
#----------------------------------
org_pts = []
if cam_pos == 'UL':
org_pts.append( corners[0] )
org_pts.append( corners[self.chess_size[0] - 1] )
org_pts.append( corners[self.chess_size[0] * self.chess_size[1] - self.chess_size[0]] )
org_pts.append( corners[-1] )
if cam_pos == 'UR':
org_pts.append( corners[self.chess_size[0] * self.chess_size[1] - self.chess_size[0]] )
org_pts.append( corners[0] )
org_pts.append( corners[-1] )
org_pts.append( corners[self.chess_size[0] - 1] )
if cam_pos == 'DL':
org_pts.append( corners[self.chess_size[0] - 1] )
org_pts.append( corners[-1] )
org_pts.append( corners[0] )
org_pts.append( corners[self.chess_size[0] * self.chess_size[1] - self.chess_size[0]] )
if cam_pos == 'DR':
org_pts.append( corners[-1] )
org_pts.append( corners[self.chess_size[0] * self.chess_size[1] - self.chess_size[0]] )
org_pts.append( corners[self.chess_size[0] - 1] )
org_pts.append( corners[0] )
if debug:
img_ = np.copy(img)
for num,corner in enumerate(org_pts):
cv2.circle( img_, tuple(corner), 5, (0,0,255), thickness=-1)
cv2.putText( img_ , str(num), tuple(corner), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (0,0,255), thickness=2)
cv2.imshow('image_border_corners_' + cam_pos, cv2.resize( img_, None, fx=0.5, fy=0.5))
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key==ord('n'):
continue
#----------------------------------
org_pts = np.array( org_pts, dtype = np.float32)
dst_pts = np.array( [[ self.border * self.chess_home_size_px, self.border * self.chess_home_size_px],
[(self.border + self.chess_size[0]-1)* self.chess_home_size_px , self.border * self.chess_home_size_px],
[ self.border * self.chess_home_size_px, (self.border + self.chess_size[1]-1)* self.chess_home_size_px ],
[(self.border + self.chess_size[0]-1)* self.chess_home_size_px, (self.border + self.chess_size[1]-1)* self.chess_home_size_px]]).astype(np.float32 )
#----------------------------------
M = cv2.getPerspectiveTransform( org_pts, dst_pts)
dst_w, dst_h = (2*self.border + self.chess_size[0]-1)* self.chess_home_size_px, (2*self.border + self.chess_size[1]-1)* self.chess_home_size_px
self.dst_size = dst_w, dst_h
np.save( os.path.join(self.path,'dsize'), np.array([dst_w, dst_h] ))
np.save( os.path.join(self.path,'mtx_' + cam_pos ), M)
if debug:
while True:
img = next( cam.grabber())
dst = cv2.warpPerspective( img, M, (dst_w, dst_h))
cv2.imshow('image', cv2.resize(img, None, fx=0.75, fy=0.75 ))
cv2.imshow('dst', cv2.resize(dst, None, fx=0.75, fy=0.75 ))
key = cv2.waitKey(50)
if key == ord('n') or key == 27 or key == ord('y'):
cv2.destroyAllWindows()
break
if key == 27 or key == ord('y'):
break
def load_mtx(self, cameras):
for cam_pos,cam in cameras.items():
try:
self.mtx[cam_pos] = np.load( os.path.join( self.path, 'mtx_' + cam_pos + '.npy') )
except:
print('mtx_' + cam_pos + ' not found')
try:
self.dst_size = np.load('data/perspective_calib/dsize.npy')
except:
print('dst_size not found')
def correct_all_persective(self, imgs):
if len(self.dst_size)==0 or len(imgs)>len(self.mtx):
print('matrix or dst_size not loaded')
return -1
ress={}
for cam_pos,img in imgs.items():
ress[cam_pos] = cv2.warpPerspective( img, self.mtx[cam_pos], tuple(self.dst_size))
return ress
def correct_persective(self, img, cam_pos):
if len(self.dst_size)==0 or self.mtx.get( cam_pos ) is None:
print('matrix or dst_size not loaded')
return -1
res = cv2.warpPerspective( img, self.mtx[cam_pos], tuple(self.dst_size))
return res
if __name__ == '__main__':
cameras = getCamerasDict( ul=1,ur=3, dl=0,dr=2)
#cameras = {'UL':5, 'DL':30,'UR':1}
perspective = calibration_perspective('data/perspective_calib', chess_size = (6,6), chess_home_size = 20, border = 3)
perspective.calc_accuracy( cameras )
perspective.calc_calibration( cameras )
perspective.load_mtx(cameras)
'''
ret = np.load(path_cal + '/ret.npy')
mtx = np.load(path_cal + '/K.npy')
dist = np.load(path_cal + '/dist.npy')
rvecs = np.load(path_cal + '/rvecs.npy')
tvecs = np.load(path_cal + '/tvecs.npy')
cam_pos = 'UL'
chess_size = (9,6)
chess_home_size = 30
border = 2
while True:
img = cv2.imread('data/cal_img/cal_18.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, chess_size,None)
if not ret:
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5))
cv2.waitKey(100)
continue
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners= cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
corners = corners.reshape((-1,2))
org_pts = []
if cam_pos == 'UL':
org_pts.append( corners[0] )
org_pts.append( corners[chess_size[0] - 1] )
org_pts.append( corners[chess_size[0]*chess_size[1] - chess_size[0]] )
org_pts.append( corners[-1] )
for num,corner in enumerate(org_pts):
cv2.circle( img, tuple(corner), 5, (0,0,255), thickness=-1)
cv2.putText( img , str(num), tuple(corner), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (0,0,255), thickness=2)
cv2.imshow('image', cv2.resize( img, None, fx=0.5, fy=0.5))
key = cv2.waitKey(0)
if key==ord('n'):
continue
org_pts = np.array( org_pts, dtype = np.float32)
dst_pts = np.array( [[ border * chess_home_size, border * chess_home_size],
[(border + chess_size[0]-1)* chess_home_size , border * chess_home_size],
[ border * chess_home_size, (border + chess_size[1]-1)* chess_home_size ],
[(border + chess_size[0]-1)* chess_home_size, (border + chess_size[1]-1)* chess_home_size]]).astype(np.float32 )
M = cv2.getPerspectiveTransform( org_pts, dst_pts)
#dst_h = (2*border + chess_size[1]-1)* chess_home_size
#dst_w = (2*border + chess_size[0]-1)* chess_home_size
dst_w, dst_h = (2*border + chess_size[0]-1)* chess_home_size, (2*border + chess_size[1]-1)* chess_home_size
while True:
dst = cv2.warpPerspective( img, M, (dst_w, dst_h))
cv2.imshow('image', cv2.resize(img, None, fx=0.5, fy=0.5 ))
cv2.imshow('dst', cv2.resize(dst, None, fx=0.5, fy=0.5 ))
key = cv2.waitKey(50)
if key == ord('n') or key == 27 or key == ord('y'):
cv2.destroyAllWindows()
break
if key == 27 or key == ord('y'):
break
cv2.imshow('img', cv2.resize( img, None, fx=0.5, fy=0.5))
cv2.waitKey(50)
'''
| null |
profilemeter_alpha/archive codes/perspective_calbration.py
|
perspective_calbration.py
|
py
| 20,808 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pypylon.pylon.TlFactory.GetInstance",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pypylon.pylon.TlFactory",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pypylon.pylon",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.RuntimeException",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pypylon.pylon",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.InstantCamera",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pypylon.pylon",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.ImageFormatConverter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pypylon.pylon",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.PixelType_BGR8packed",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pypylon.pylon",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.OutputBitAlignment_MsbAligned",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pypylon.pylon",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "pypylon.pylon.TimeoutHandling_ThrowException",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pypylon.pylon",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "cv2.findChessboardCorners",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "cv2.cornerSubPix",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "cv2.getPerspectiveTransform",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "cv2.findChessboardCorners",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "cv2.cornerSubPix",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "cv2.findChessboardCorners",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "cv2.cornerSubPix",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "cv2.getPerspectiveTransform",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 369,
"usage_type": "call"
}
] |
333170670
|
import config
import mysql.connector
import redis
import requests
import time
from flask import Flask, request, session, jsonify, redirect, abort
from flask_cors import CORS
app = Flask(__name__)
app.config['SECRET_KEY'] = config.SECRET_KEY
app.config['DEBUG'] = False
#CORS(app, supports_credentials=True)
r = redis.Redis(host=config.redis['host'], port=config.redis['port'], db=config.redis['db'])
conn = mysql.connector.connect(host=config.db['host'], user=config.db['user'],
passwd=config.db['passwd'], database=config.db['database'], charset='utf8mb4', autocommit=1)
db = conn.cursor()
def checkWechatLogin():
if "user_id" not in session:
sess_id = request.cookies.get("PHPSESSID")
if sess_id is not None:
r = requests.get(config.wx_check_url, timeout=5, cookies=dict(PHPSESSID=sess_id))
try:
t = r.json()
if "openid" in t:
try:
conn.ping()
except mysql.connector.errors.InterfaceError:
conn.reconnect()
db.execute('select id from users where openid=%s', (t['openid'],))
result = db.fetchall()
if not result:
db.execute('insert into users (openid,nickname,headpic) value (%s,%s,%s)',(t['openid'],t['nickname'],t['headpic']))
db.execute('select id from users where openid=%s', (t['openid'],))
result = db.fetchall()
session['user_id'] = result[0][0]
except:
pass
if "user_id" not in session:
abort(401)
return session['user_id']
@app.errorhandler(401)
def page_not_found(error):
return "请先登录微信!", 401
# @app.route('/setuserid/<id>', methods=['get'])
# def setuserid(id):
# session['user_id'] = id
# return "OK", 204
# @app.route('/clearuserid', methods=['get'])
# def clearuserid():
# session.clear()
# return "OK",204
@app.route('/info', methods=['get'])
def getinfo():
checkWechatLogin()
keys = ['index', 'name', 'text', 'iconsrc', 'audiosrc']
anchors = []
try:
conn.ping()
except mysql.connector.errors.InterfaceError:
conn.reconnect()
db.execute("select id,name,declaration,imgsrc,audiosrc from anchors order by id")
result = db.fetchall()
db.execute("select a.id,ifnull(b.number,0) from `anchors` as a left join (select anchor_id,count(anchor_id) as number from votes group by anchor_id order by anchor_id) as b on a.id=b.anchor_id group by a.id order by a.id")
num = db.fetchall()
i = 0
for anchor in result:
d = dict(zip(keys, anchor))
d['audiosrc'] = config.audio_url + d['audiosrc']
d['iconsrc'] = config.pic_url + d['iconsrc']
d['number'] = num[i][1]
i = i + 1
anchors.append(d)
return jsonify(anchors), 200
@app.route('/vote/<id>', methods=['POST'])
def vote(id):
if int(id) not in range(1, 13):
return jsonify({
'errcode':6,
'errmsg':config.errmsg['index_err']
})
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if now > config.end_time:
return jsonify({
'errcode':5,
'errmsg':config.errmsg['end']
})
if now < config.start_time:
return jsonify({
'errcode':4,
'errmsg':config.errmsg['start']
})
user_id = checkWechatLogin()
try:
conn.ping()
except mysql.connector.errors.InterfaceError:
conn.reconnect()
p = (user_id,)
db.execute('select created_at from `votes` where `user_id` = %s order by created_at desc limit 1',p)
result=db.fetchall()
if result:
last = str(result[0][0])
now = time.strftime('%Y-%m-%d', time.localtime(time.time()))
if last > now:
return jsonify({
'errcode':3,
'errmsg':config.errmsg['day_limit']
})
luastr = """
if redis.call('exists',KEYS[1]) == 1 then
if tonumber(redis.call('get',KEYS[1])) >= tonumber(ARGV[1]) then
return 0
end
else
redis.call('set',KEYS[1],1)
redis.call('expire',KEYS[1],60)
end
redis.call('incr',KEYS[1])
return 1
"""
lua = r.register_script(luastr)
if int(lua(keys=['have' + str(id)], args=[config.vote_number_min])):
p = (session['user_id'], id)
db.execute("insert into votes (user_id,anchor_id) values (%s,%s)", p)
if db.rowcount:
return jsonify({
'errcode': 0,
'errmsg': config.errmsg['success']
}),200
else:
return jsonify({
'errcode': 1,
'errmsg': config.errmsg['insert_err']
})
else:
return jsonify({
'errcode':2,
'errmsg':config.errmsg['min_limit']
})
if __name__ == '__main__':
app.run(port=config.app_port)
| null |
backend/app.py
|
app.py
|
py
| 4,316 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.SECRET_KEY",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "redis.Redis",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "config.redis",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "config.db",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.db",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.request.cookies.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.cookies",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "config.wx_check_url",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "mysql.connector.connector",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "config.audio_url",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "config.pic_url",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "config.end_time",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "config.start_time",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "config.vote_number_min",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "config.errmsg",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "config.app_port",
"line_number": 156,
"usage_type": "attribute"
}
] |
109846729
|
import os
import json
import discord
ADMINS = {}
class EcosystemBots:
FullyAutomatedNutcracker = "FAN_"
def load_creds(is_debug, bot: str):
with open('config/creds.json') as fp:
creds = json.load(fp)
return creds[bot + "BETA_TOKEN"] if is_debug else creds[bot + "TOKEN"]
def load_admins():
global ADMINS
with open('config/admin.json') as fp:
ADMINS = json.load(fp)
def is_admin(user: discord.User):
return str(user.id) in ADMINS
def get_last_log_file_path():
scriptpath = os.path.realpath(__file__)
rootpath = os.path.abspath(os.path.join(scriptpath, '..', '..', '..'))
logspath = os.path.abspath(os.path.join(rootpath, 'logs'))
logs = [os.path.join(logspath, log) for log in os.listdir(logspath)]
latest = max(logs, key=os.path.getctime)
return latest
def tail(f, lines):
tail_log = []
at_idx = 0
for line in f.readlines():
if len(tail_log) == lines:
tail_log[at_idx] = line
at_idx = (at_idx + 1) % lines
else:
tail_log.append(line)
return tail_log[at_idx:] + tail_log[:at_idx]
def tail_error(f):
last_error = ""
listening = False
for line in f.readlines():
if "Catching exception in command error" in line:
listening = True
last_error = ""
if line.startswith("discord.ext.commands.errors.CommandInvokeError: Command raised an exception:"):
last_error += line
listening = False
if listening:
last_error += line
return last_error
| null |
src/utils/common.py
|
common.py
|
py
| 1,582 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.User",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
}
] |
627716920
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date, timedelta
import re
from openerp.osv import fields, osv
from openerp.tools.translate import _
def test_item_id(self, cr, uid, item_id, natureza):
db_product_product = self.pool.get('product.product')
obj_product = db_product_product.browse(cr, uid, item_id)
message = ''
if natureza in ['rec', 'ots']:
if hasattr(obj_product.product_tmpl_id, 'property_account_income'):
if obj_product.product_tmpl_id.property_account_income.id is False:
message += u'Contabilidade/Conta de Despesa associada ao cliente\n'
else:
message += u'Contabilidade/Conta de Despesa associada ao cliente\n'
if obj_product.product_tmpl_id.sale_ok is False:
message += u'Seleccionar opção "Pode ser vendido"\n'
elif natureza == 'des':
if hasattr(obj_product.product_tmpl_id, 'property_account_expense'):
if obj_product.product_tmpl_id.property_account_expense.id is False:
message += u'Contabilidade/Conta de Despesa associada ao fornecedor\n'
else:
message += u'Contabilidade/Conta de Despesa associada ao fornecedor\n'
if obj_product.product_tmpl_id.purchase_ok is False:
message += u'Seleccionar opção "Pode ser comprado"\n'
if hasattr(obj_product.product_tmpl_id, 'property_stock_account_input'):
if obj_product.product_tmpl_id.property_stock_account_input.id is False:
message += u'Contabilidade/Avaliação do Inventário/' \
u'Tempo Real(automatizado) e Conta Stock de Entrada\n'
else:
message += u'Contabilidade/Avaliação do Inventário/' \
u'Tempo Real(automatizado) e Conta Stock de Entrada\n'
if hasattr(obj_product.product_tmpl_id, 'property_stock_account_output'):
if obj_product.product_tmpl_id.property_stock_account_output.id is False:
message += u'Contabilidade/Avaliação do Inventário/' \
u'Tempo Real(automatizado) e Conta de saída de Stock\n'
else:
message += u'Contabilidade/Avaliação do Inventário/' \
u'Tempo Real(automatizado) e Conta de saída de Stock\n'
res_id = u'product.template,' + unicode(obj_product.product_tmpl_id.id)
cr.execute("""
SELECT value_float
FROM ir_property
WHERE name = 'standard_price' AND res_id = '%s'
""" % res_id)
standard_price = cr.fetchone()
if standard_price is None or standard_price[0] <= 0:
message += u'Procurements/Preço de Custo (valor positivo)\n'
if obj_product.default_code is False:
message += u'Informação/Referência Interna\n'
if len(obj_product.product_tmpl_id.taxes_id) == 0:
message += u'Contabilidade/Impostos a Cliente\n'
if len(obj_product.product_tmpl_id.supplier_taxes_id) == 0:
message += u'Contabilidade/Impostos do Fornecedor\n'
if obj_product.product_tmpl_id.type != 'product':
message += u' Informação/Tipo de Artigo/Artigo Armazenável\n'
if obj_product.product_tmpl_id.categ_id.id is not False:
x = obj_product.product_tmpl_id.categ_id.property_stock_valuation_account_id.id
if x is False:
message += u' Para as categorias dos artigos defina a Conta de avaliação de stock.'
if len(message) != 0:
raise osv.except_osv(_(u'Aviso'),
_(u'Para evitar futuros erros na execução do programa '
u'deverá preencher os seguintes campos do artigo:\n' + message))
return True
def get_sequence(self, cr, uid, context, text, value):
# tipos de sequencia
# Sequencia 'text' +_id
seq = self.pool.get('ir.sequence').get(cr, uid, 'seq_' + text + '_code_' + unicode(value))
if seq is False:
sequence_type = self.pool.get('ir.sequence.type')
values_type = {
'name': 'type_' + text + '_name_' + unicode(value),
'code': 'seq_' + text + '_code_' + unicode(value)}
sequence_type.create(cr, uid, values_type, context=context)
sequence = self.pool.get('ir.sequence')
values = {
'name': 'seq_' + text + '_name_' + unicode(value),
'code': 'seq_' + text + '_code_' + unicode(value),
'number_next': 1,
'number_increment': 1}
sequence.create(cr, uid, values, context=context)
seq = self.pool.get('ir.sequence').get(cr, uid, 'seq_' + text + '_code_' + unicode(value))
return seq
# ______________________________________________________Calendário_________________________________________
class sncp_comum_calendario(osv.Model):
_name = 'sncp.comum.calendario'
_description = u"Calendário"
def copiar_ano(self, cr, uid, ids, context):
self.write(cr, uid, ids, {'state': 1})
obj_calendario = self.browse(cr, uid, ids[0])
ano_calc = obj_calendario.name + 1
ano_calc_id = self.create(cr, uid, {'name': ano_calc})
db_sncp_comum_feriados = self.pool.get('sncp.comum.feriados')
feriados_ids = db_sncp_comum_feriados.search(cr, uid, [('ano_id', '=', ids[0]),
('tipo', '=', 'fix')])
for record in db_sncp_comum_feriados.browse(cr, uid, feriados_ids):
record_data = datetime.strptime(record.data, "%Y-%m-%d")
data = record_data.date()
data = data.replace(year=ano_calc)
vals = {'ano_id': ano_calc_id,
'name': record.name,
'data': unicode(data),
'tipo': 'fix'}
db_sncp_comum_feriados.create(cr, uid, vals)
number = ano_calc % 19 + 1
data = None
if number == 1:
data = date(ano_calc, 4, 14)
elif number == 2:
data = date(ano_calc, 4, 3)
elif number == 3:
data = date(ano_calc, 3, 23)
elif number == 4:
data = date(ano_calc, 4, 11)
elif number == 5:
data = date(ano_calc, 3, 31)
elif number == 6:
data = date(ano_calc, 4, 18)
elif number == 7:
data = date(ano_calc, 4, 8)
elif number == 8:
data = date(ano_calc, 3, 28)
elif number == 9:
data = date(ano_calc, 4, 16)
elif number == 10:
data = date(ano_calc, 4, 5)
elif number == 11:
data = date(ano_calc, 3, 25)
elif number == 12:
data = date(ano_calc, 4, 13)
elif number == 13:
data = date(ano_calc, 4, 2)
elif number == 14:
data = date(ano_calc, 3, 22)
elif number == 15:
data = date(ano_calc, 4, 10)
elif number == 16:
data = date(ano_calc, 3, 30)
elif number == 17:
data = date(ano_calc, 4, 17)
elif number == 18:
data = date(ano_calc, 4, 7)
elif number == 19:
data = date(ano_calc, 3, 27)
data_pascoa = data + timedelta(days=(6 - data.weekday()))
db_sncp_comum_feriados.create(cr, uid, {'ano_id': ano_calc_id,
'name': u'Páscoa',
'data': unicode(data_pascoa),
'tipo': 'mov'})
db_sncp_comum_feriados.create(cr, uid, {'ano_id': ano_calc_id,
'name': 'Carnaval',
'data': unicode(data_pascoa - timedelta(days=47)),
'tipo': 'mov'})
db_sncp_comum_feriados.create(cr, uid, {'ano_id': ano_calc_id,
'name': u'Sexta-Feira Santa',
'data': unicode(data_pascoa - timedelta(days=2)),
'tipo': 'mov'})
return True
_columns = {
'name': fields.integer(u'Ano', size=4),
'feriado_id': fields.one2many('sncp.comum.feriados', 'ano_id', u''),
'state': fields.integer(u'copiado')
# 0 -- botão copiar disponivel
# 1 -- ano ja esta copiado
}
def unlink(self, cr, uid, ids, context=None):
if self.browse(cr, uid, ids[0]).name == date.today().year:
raise osv.except_osv(_(u'Aviso'), _(u'Não pode apagar o ano corrente.'))
ano_futuro_id = self.search(cr, uid, [('name', '=', self.browse(cr, uid, ids[0]).name + 1)])
if len(ano_futuro_id) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Não pode apagar o ano do meio do calendário.'))
feriados_ids = self.pool.get('sncp.comum.feriados').search(cr, uid, [('ano_id', '=', ids[0])])
if len(feriados_ids) != 0:
self.pool.get('sncp.comum.feriados').unlink(cr, uid, feriados_ids)
# state 0 para anterior
ano_anterior_id = self.search(cr, uid, [('name', '=', self.browse(cr, uid, ids[0]).name - 1)])
if len(ano_anterior_id) != 0:
self.write(cr, uid, ano_anterior_id, {'state': 0})
return super(sncp_comum_calendario, self).unlink(cr, uid, ids)
_defaults = {'state': 0}
_sql_constraints = [
('ano_unique', 'unique (name)', u'Este ano já está registado'),
]
# ______________________________________________________FERIADOS_________________________________________
class sncp_comum_feriados(osv.Model):
_name = 'sncp.comum.feriados'
_description = u"Calendario/Feriados"
def on_change_create(self, cr, uid, ids, tipo):
if tipo == 'fix':
return {}
else:
return {'warning': {'title': u'Feriados móveis',
'message': u'Os feriados móveis são inseridos manualmente para cada ano. '
u'Para automatizar o processo consulte o administrador de sistema.'}}
_columns = {
'ano_id': fields.many2one('sncp.comum.calendario'),
'data': fields.date(u'Data', ),
'name': fields.char(u'Descrição', size=30, ),
'tipo': fields.selection([
('fix', u'Fixo'),
('mov', u'Móvel'), ], u'Tipo', ),
}
def create(self, cr, uid, vals, context=None):
data = None
if type(vals['data']) in [str, unicode]:
data = datetime.strptime(vals['data'], "%Y-%m-%d")
elif type(vals['data']) is tuple:
data = date(vals['data'][0], vals['data'][1], vals['data'][2])
vals['data'] = data
feriado_id = super(sncp_comum_feriados, self).create(cr, uid, vals, context=context)
return feriado_id
def unlink(self, cr, uid, ids, context=None):
return super(sncp_comum_feriados, self).unlink(cr, uid, ids)
_order = 'data'
_sql_constraints = [
('data_unique', 'unique (data)', u'Esta data já está registada como feriado'),
]
# __________________________________________________________CVP__________________________________________
class sncp_comum_cpv(osv.Model):
_name = 'sncp.comum.cpv'
_description = u"Vocabulário Comum para os Contratos Públicos"
_columns = {
'name': fields.char(u'Descrição', size=255, ),
'codigo_120': fields.char(u'Código CPV', size=10, ),
}
_order = 'codigo_120'
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_comum_codigos_contab
WHERE cpv_id = %d
""" % obj.id)
res_cod_contab = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_despesa_compromisso_dados_adic
WHERE cpv_id = %d
""" % obj.id)
res_comp = cr.fetchall()
if len(res_cod_contab) != 0 or len(res_comp):
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o cpv ' + obj.name
+ u' têm associação em:\n'
u'1. Códigos de Contabilização.\n'
u'2. Compromissos.'))
return super(sncp_comum_cpv, self).unlink(cr, uid, ids, context=context)
_sql_constraints = [
('codigo_cpv_unique', 'unique (codigo_120)', u'O código tem que ser único!'),
]
# ______________________________________________________MEIOS PAGAMENTO__________________________________
class sncp_comum_meios_pagamento(osv.Model):
_name = 'sncp.comum.meios.pagamento'
_description = u"Meios de Pagamento"
_rec_name = 'name'
_columns = {
'metodo': fields.char(u'Método', size=3, ),
'name': fields.char(u'Descrição', ),
'meio': fields.selection([('cx', u'Caixa'),
('bk', u'Banco'),
('fm', u'Fundo de Maneio'),
# ('dc', u'Documentos')
], u'Meio', ),
'tipo': fields.selection([('rec', 'Recebimento'), ('pag', 'Pagamento')],
u'Recebimento/Pagamento', ),
'echeque': fields.boolean(u'É cheque?'),
}
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_comum_param
WHERE otes_mpag = %d OR desp_mpag = %d
""" % (obj.id, obj.id))
res_param = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_despesa_pagamentos_ordem
WHERE meio_pag_id = %d
""" % obj.id)
res_ordem = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_despesa_pagamentos_reposicoes
WHERE meio_pag_id = %d
""" % obj.id)
res_repo = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_tesouraria_config_mapas
WHERE meio_pag_id = %d
""" % obj.id)
res_tes_mapa = cr.fetchall()
if len(res_ordem) != 0 or len(res_param) != 0 or len(res_repo) != 0 or len(res_tes_mapa) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o meio de pagamento ' + obj.name
+ u' têm associação em:\n'
u'1. Ordens de Pagamento.\n'
u'2. Parâmetros.\n'
u'3. Reposições.\n'
u'4. Tesouraria\Configurações\Mapas.'))
return super(sncp_comum_meios_pagamento, self).unlink(cr, uid, ids, context=context)
_order = 'metodo'
_sql_constraints = [
('metodo_pagamento_unique', 'unique (metodo)', u'Este método já está registado'),
]
# ______________________________________________________CONDIÇOES DE PAGAMENTO___________________________
class sncp_comum_cond_pagam(osv.Model):
_name = 'sncp.comum.cond.pagam'
_description = u"Condições de Pagamento"
def on_change_anual_true(self, cr, uid, ids, anual):
if anual is True:
return {
'value': {'quantidade': 0,
'estado': 1,
'tipo': 'nap',
'contagem': 'napl'}}
else:
return {
'value': {'estado': 0,
'dia': 0,
'mes': False}
}
def _restrict_contagem(self, cr, uid, ids):
obj = self.browse(cr, uid, ids[0])
if obj.anual is False and obj.contagem == 'napl':
raise osv.except_osv(_(u'Aviso'), _(u'A contagem não pode ser "Não aplicável".'))
return True
def teste_anos(self, ano):
if ano % 4 == 0:
if ano % 100 == 0 and ano % 400 != 0:
return False
else:
return True
else:
return False
def teste_meses(self, mes, ano):
if mes in [1, 3, 5, 7, 8, 10, 12]:
return 31
elif mes in [4, 6, 9, 11]:
return 30
elif mes in [2] and ano > 0:
bi = self.teste_anos(ano)
if bi:
return 29
elif bi is False:
return 28
elif mes in [2] and ano <= 0:
return 28
def check_mes(self, mes, ano):
novo_mes = mes
novo_ano = ano
while novo_mes > 12:
novo_mes -= 12
novo_ano += 1
return novo_mes, novo_ano
def da_data_vencimento(self, cr, uid, ids, vals, context=None):
# vals = {
# 'cond_pagam_id': ,
# 'dataemissao': datetime,
# }
obj = self.browse(cr, uid, vals['cond_pagam_id'])
data_ini = vals['dataemissao']
data_venc = False
if obj.anual is True:
ano = data_ini.year
mes_ini = data_ini.month
dia_ini = data_ini.day
if mes_ini > int(obj.mes):
ano += 1
if mes_ini == int(obj.mes):
if dia_ini > obj.dia:
ano += 1
if int(obj.mes) == 2:
if obj.dia > 28:
if self.teste_anos(ano) is True:
dia_ini = obj.dia
else:
dia_ini = 28
elif int(obj.mes) in [4, 6, 9, 11]:
if obj.dia > 30:
dia_ini = 30
else:
dia_ini = obj.dia
else:
if obj.dia > 31:
dia_ini = 31
else:
dia_ini = obj.dia
data_venc = date(ano, int(obj.mes), dia_ini)
elif obj.anual is False:
if obj.contagem == 'fmes':
if data_ini.month < 12:
data_ini = date(data_ini.year, data_ini.month + 1, 1) - timedelta(days=1)
else:
data_ini = date(data_ini.year + 1, 1, 1) - timedelta(days=1)
elif obj.contagem == 'imes':
data_ini = date(data_ini.year, data_ini.month, 1)
elif obj.contagem == 'napl':
raise osv.except_osv(_(u'receita/da_data_vencimento/contagem'),
_(u'Ocorreu um erro inesperado, contacte o administrador do sistema.'))
if obj.tipo == 'dia' and obj.contagem == 'imes':
data_ini = data_ini - timedelta(days=1)
# Teste ao tipo
if obj.tipo == 'dia':
data_venc = data_ini + timedelta(days=obj.quantidade)
elif obj.tipo == 'mes':
dia = data_ini.day
mes = obj.quantidade + data_ini.month
ano = data_ini.year
tup = self.check_mes(mes, ano)
ultimo_dia = self.teste_meses(tup[0], tup[1])
if dia > ultimo_dia:
dia = ultimo_dia
data_venc = date(tup[1], tup[0], dia)
else:
raise osv.except_osv(_(u'receita/da_data_vencimento/contagem'),
_(u'Ocorreu um erro inesperado, contacte o administrador do sistema.'))
if data_venc < vals['dataemissao']:
if obj.tipo == 'dia':
if data_venc.month < 12:
data_venc = date(data_venc.year, data_venc.month + 1, data_venc.day)
else:
data_venc = date(data_venc.year + 1, 1, data_venc.day)
elif obj.tipo == 'mes':
if data_venc.month < 12:
data_venc = date(data_venc.year, data_venc.month + 1, data_venc.day)
else:
data_venc = date(data_venc.year + 1, 1, data_venc.day)
elif obj.anual is True:
data_venc = date(data_venc.year + 1, data_venc.month, data_venc.day)
if obj.dias_descanso == 'mant':
return data_venc
else:
# Teste dos feriados e fim de semana
db_sncp_comum_feriados = self.pool.get('sncp.comum.feriados')
feriados_id = db_sncp_comum_feriados.search(cr, uid, [('data', '=', unicode(data_venc))])
while data_venc.weekday() >= 5 or len(feriados_id) != 0:
if obj.dias_descanso == 'ante':
data_venc = data_venc - timedelta(days=1)
elif obj.dias_descanso == 'adia':
data_venc = data_venc + timedelta(days=1)
feriados_id = db_sncp_comum_feriados.search(cr, uid, [('data', '=', unicode(data_venc))])
return data_venc
_columns = {
'name': fields.char(u'Código', size=3),
'descricao': fields.char(u'Descrição'),
'quantidade': fields.integer(u'Quantidade'),
'tipo': fields.selection([('dia', u'Dia'),
('mes', u'Mês'),
('nap', u'Não Aplicável'), ], u'Tipo'),
'contagem': fields.selection([('imed', u'Imediata'),
('imes', u'Início do mês'),
('fmes', u'Final do mês'),
('napl', u'Não Aplicável'), ], u'A contar de'),
'anual': fields.boolean(u'Anual'),
'dia': fields.integer(u'Dia'), # Testar as possibilidades quando tiver tempo
'mes': fields.selection([
('1', u'Janeiro'),
('2', u'Fevereiro'),
('3', u'Março'),
('4', u'Abril'),
('5', u'Maio'),
('6', u'Junho'),
('7', u'Julho'),
('8', u'Agosto'),
('9', u'Setembro'),
('10', u'Outubro'),
('11', u'Novembro'),
('12', u'Dezembro'), ], u'Mês'),
'dias_descanso': fields.selection([('mant', u'Mantém'),
('ante', u'Antecipa'),
('adia', u'Adia'), ], u'Nos dias de descanso'),
'payment_term_id': fields.many2one('account.payment.term', u'Standard'),
'estado': fields.integer(u'Variavel de controlo de aparencias'),
# 1: anual=true; tipo, contagem, quantidade readonly
# 0: anual=false; dia, mes readonly
}
_order = 'name'
_defaults = {
'estado': 0,
'dias_descanso': 'mant',
'dia': 1,
}
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_comum_codigos_contab
WHERE cond_pag_id = %d
""" % obj.id)
res_cod_contab = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_controlo_config
WHERE cond_pagam_id = %d
""" % obj.id)
res_controlo = cr.fetchall()
if len(res_cod_contab) != 0 or len(res_controlo) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se a condição de pagamento ' + obj.name
+ u' têm associação em:\n'
u'1. Códigos de Contabilização.\n'
u'2. Receita\Controlo de Receitas Renováveis\Configurações'
u'\Configuração Geral.'))
return super(sncp_comum_cond_pagam, self).unlink(cr, uid, ids, context=context)
def _name_limit(self, cr, uid, ids):
obj = self.browse(cr, uid, ids[0])
re_name = re.compile('^([A-Z0-9]){1,3}$')
if re.match(re_name, obj.name):
return True
else:
raise osv.except_osv(_(u'Aviso'), _(u'O campo Código é composto por maiúsculas ou algarismos'
u' no máximo de 3 carateres.'))
def _dia_valido(self, cr, uid, ids):
record = self.browse(cr, uid, ids[0])
if record.mes:
ultimo_dia = self.teste_meses(int(record.mes), 0)
if record.dia > ultimo_dia or record.dia <= 0:
raise osv.except_osv(_(u'Aviso'), _(u'Para aquele mês o dia é inválido.'))
return True
_constraints = [
(_name_limit, u'', ['name']),
(_restrict_contagem, u'', ['contagem']),
(_dia_valido, u'', ['mes', 'dia']),
]
_sql_constraints = [
('cond_pagam_unique', 'unique (name)', u'Este código já existe!'),
('payment_term_unique', 'unique (payment_term_id)', u'Este Termo de Pagamento já existe!')
]
# ______________________________________________________CODIGOS DE CONTABILIZAÇÃO________________________
class sncp_comum_codigos_contab(osv.Model):
_name = 'sncp.comum.codigos.contab'
_description = u"Codigos de Contabilização"
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'code'], context=context)
res = []
for record in reads:
if record['code'] is False:
code = ''
else:
code = record['code']
result = code + ' ' + record['name']
res.append((record['id'], result))
return res
def get_code(self, cr, uid, ids, fields, arg, context):
codigo = {}
db_sncp_comum_codigos_contab = self.pool.get('sncp.comum.codigos.contab')
for cod_contab_id in ids:
obj = db_sncp_comum_codigos_contab.browse(cr, uid, cod_contab_id)
codigo[cod_contab_id] = obj.item_id.default_code or None
return codigo
def get_ean13(self, cr, uid, ids, fields, arg, context):
ean13 = {}
db_sncp_comum_codigos_contab = self.pool.get('sncp.comum.codigos.contab')
for cod_contab_id in ids:
obj = db_sncp_comum_codigos_contab.browse(cr, uid, cod_contab_id)
ean13[cod_contab_id] = obj.item_id.ean13 or None
return ean13
def get_name_template(self, cr, uid, ids, fields, arg, context):
name_template = {}
db_sncp_comum_codigos_contab = self.pool.get('sncp.comum.codigos.contab')
for cod_contab_id in ids:
obj = db_sncp_comum_codigos_contab.browse(cr, uid, cod_contab_id)
name_template[cod_contab_id] = obj.item_id.name_template or None
return name_template
_columns = {
'item_id': fields.many2one('product.product', u'Item'),
'code': fields.function(get_code, arg=None, method=False, type="char",
string=u'Código', store=True),
'ean13': fields.function(get_ean13, arg=None, method=False, type="char",
string=u'Código EAN 13', store=True),
'natureza': fields.selection([('rec', u'Receita Orçamental'),
('des', u'Despesa Orçamental'),
('ots', u'Operações de tesouraria')], u'Natureza'),
'name': fields.function(get_name_template, arg=None, method=False, type="char",
string=u'Nome', store=True),
'conta_id': fields.many2one('account.account', u'Patrimonial',
domain=[('type', 'not in', ['view'])], ),
'organica_id': fields.many2one('account.analytic.account', u'Orgânica',
domain=[('tipo_dim', '=', 'uo'), ('type', '=', 'normal')], ),
'economica_id': fields.many2one('account.analytic.account', u'Económica',
domain=[('tipo_dim', '=', 'ce'), ('type', '=', 'normal')], ),
'funcional_id': fields.many2one('account.analytic.account', u'Funcional',
domain=[('tipo_dim', '=', 'cf'), ('type', '=', 'normal')], ),
'cond_pag_id': fields.many2one('sncp.comum.cond.pagam', u'Condições de Pagamento'),
# # addons/receita/herancas.py/sncp_comum_codigos_contab met_juros_id'
'cpv_id': fields.many2one('sncp.comum.cpv', u'Vocabulário Comum Contr. Pública'),
}
_order = 'item_id, natureza'
def _restrict_item_id(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0])
return test_item_id(self, cr, uid, obj.item_id.id, obj.natureza)
def write(self, cr, uid, ids, vals, context=None):
if 'natureza' in vals:
if vals['natureza'] == 'des':
vals['cond_pag_id'] = False
vals['met_juros_id'] = False
elif vals['natureza'] in ['rec', 'ots']:
vals['cpv_id'] = False
return super(sncp_comum_codigos_contab, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_despesa_descontos_retencoes
WHERE cod_contab_id = %d
""" % obj.id)
res_desc = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_controlo_config
WHERE cod_contab_id = %d
""" % obj.id)
res_controlo = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_fatura_modelo_linha
WHERE cod_contab_id = %d
""" % obj.id)
res_modelo_linha = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_guia_rec_linhas
WHERE cod_contab_id = %d
""" % obj.id)
res_guia_linhas = cr.fetchall()
if len(res_controlo) != 0 or len(res_desc) != 0 or len(res_modelo_linha) != 0 or \
len(res_guia_linhas):
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o código de contabilização '
+ obj.code + u' ' + obj.name
+ u' têm associação em:\n'
u'1. Receita\Controlo de Receitas Renováveis\Configuração'
u'\Configurações Gerais.\n'
u'2. Descontos e Retenções.\n'
u'3. Receita\Faturação Recorrente\Modelos de Faturas.\n'
u'4. Guias de Receita.'))
return super(sncp_comum_codigos_contab, self).unlink(cr, uid, ids, context=context)
_constraints = [
(_restrict_item_id, u'', ['item_id']),
]
# ========================================= REFERENCIAS GEOGRAFICAS =====================================
# __________________________________________ Freguesias ________________________________________________
class sncp_comum_freguesias(osv.Model):
_name = 'sncp.comum.freguesias'
_description = u"Freguesias"
_columns = {
'name': fields.char(u'Nome de Freguesia', size=64),
'coord_centro': fields.char(u'Coordenadas do Centro', size=30),
'populacao': fields.integer(u'População'),
'eleitores': fields.integer(u'Eleitores inscritos'),
}
def _restrict_populacao(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0])
if obj.populacao < 0:
raise osv.except_osv(_(u'Aviso'), _(u'A população duma freguesia não pode ser negativa.'))
else:
return True
def _restrict_eleitores(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0])
if obj.eleitores < 0:
raise osv.except_osv(_(u'Aviso'), _(u'O número de eleitores não pode ser negativo.'))
else:
return True
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_comum_bairros
WHERE freguesia_id = %d
""" % obj.id)
res_bairro = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_comum_arruamentos
WHERE freguesia1_id = %d OR freguesia2_id = %d
""" % (obj.id, obj.id))
res_arru = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_controlo
WHERE freguesia_id = %d
""" % obj.id)
res_controlo = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_regproc_aquis_alien_parcel
WHERE freguesia_id = %d
""" % obj.id)
res_alien_parcel = cr.fetchall()
if len(res_controlo) != 0 or len(res_alien_parcel) != 0 or len(res_arru) != 0 or \
len(res_bairro) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se a freguesia '
+ obj.name
+ u' têm associação em:\n'
u'1. Receita\Controlo de Receitas Renováveis.\n'
u'2. Aquisições e Alienações.\n'
u'3. Arruamentos.\n'
u'4. Bairros.'))
return super(sncp_comum_freguesias, self).unlink(cr, uid, ids, context=None)
_constraints = [
(_restrict_populacao, u'', ['populacao']),
(_restrict_eleitores, u'', ['eleitores']),
]
# __________________________________________ Bairros ________________________________________________
class sncp_comum_bairros(osv.Model):
_name = 'sncp.comum.bairros'
_description = u"Bairros"
_columns = {
'name': fields.char(u'Nome do Bairro', size=64),
'coord_centro': fields.char(u'Coordenadas do Centro', size=30),
'freguesia_id': fields.many2one('sncp.comum.freguesias', u'Freguesia'),
}
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_comum_arruamentos
WHERE bairro_id = %d
""" % obj.id)
res_arru = cr.fetchall()
cr.execute("""
SELECT id
FROM sncp_receita_controlo
WHERE bairro_id = %d
""" % obj.id)
res_controlo = cr.fetchall()
if len(res_arru) != 0 or len(res_controlo) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o bairro '
+ obj.name
+ u' têm associação em:\n'
u'1. Receita\Controlo de Receitas Renováveis.\n'
u'2. Arruamentos.'))
return super(sncp_comum_bairros, self).unlink(cr, uid, ids, context=context)
# __________________________________________ Arruamentos ________________________________________________
class sncp_comum_arruamentos(osv.Model):
_name = 'sncp.comum.arruamentos'
_description = u"Arruamentos"
def open_map(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj = self.browse(cr, uid, ids, context=context)[0]
url = "http://maps.google.com/maps?oi=map&q="
if obj.name:
url += obj.name.replace(' ', '+')
if obj.bairro_id:
url += '+' + obj.bairro_id.name.replace(' ', '+')
if obj.freguesia1_id:
url += '+' + obj.freguesia1_id.name.replace(' ', '+')
return {
'type': 'ir.actions.act_url',
'url': url,
'target': 'new'
}
def open_inicio(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj = self.browse(cr, uid, ids, context=context)[0]
url = "http://maps.google.com/maps?oi=map&q="
if obj.inicio_coord:
url += '+' + obj.inicio_coord.replace(' ', '+')
return {
'type': 'ir.actions.act_url',
'url': url,
'target': 'new'
}
def open_termo(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj = self.browse(cr, uid, ids, context=context)[0]
url = "http://maps.google.com/maps?oi=map&q="
if obj.termo_coord:
url += '+' + obj.termo_coord.replace(' ', '+')
return {
'type': 'ir.actions.act_url',
'url': url,
'target': 'new'
}
_columns = {
'name': fields.char(u'Nome de Arruamento', size=64),
'inicio_desc': fields.char(u'Início', size=64),
'inicio_coord': fields.char(u'Coord. do Início', size=30, ),
'termo_desc': fields.char(u'Termo', size=64),
'termo_coord': fields.char(u'Coordenadas do Termo', size=30),
'bairro_id': fields.many2one('sncp.comum.bairros', u'Bairro'),
'freguesia1_id': fields.many2one('sncp.comum.freguesias', u'Freguesia'),
'freguesia2_id': fields.many2one('sncp.comum.freguesias', u'Freguesia'),
'n1_freg1': fields.char(u'Primeiro número', size=6),
'n2_freg1': fields.char(u'Último número', size=6),
'n1_freg2': fields.char(u'Primeiro número', size=6),
'n2_freg2': fields.char(u'Último número', size=6),
}
def _check_bairro(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0])
if obj.bairro_id.freguesia_id.id == obj.freguesia1_id.id or\
obj.bairro_id.freguesia_id.id == obj.freguesia2_id.id:
return True
return False
_constraints = [
(_check_bairro, u'O bairro seleccionado não pertence a freguesia', ['bairro_id']),
]
def unlink(self, cr, uid, ids, context=None):
for nid in ids:
obj = self.browse(cr, uid, nid)
cr.execute("""
SELECT id
FROM sncp_receita_controlo
WHERE arruamento_id = %d
""" % obj.id)
res_controlo = cr.fetchall()
if len(res_controlo) != 0:
raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o arruamento '
+ obj.name
+ u' têm associação em:\n'
u'1. Receita\Controlo de Receitas Renováveis.'))
return super(sncp_comum_arruamentos, self).unlink(cr, uid, ids,)
# ========================================= Avisos e notificações =====================================
class sncp_comum_etiquetas(osv.Model):
_name = 'sncp.comum.etiquetas'
_description = u'Etiquetas de Avisos e Notificações'
_columns = {
'name': fields.char(u'Etiqueta', size=6),
'descr': fields.char(u'Descrição'),
'model_id': fields.many2one('ir.model', u'Modelo de dados'),
'path': fields.char(u'Caminho (notação de pontos)'),
}
| null |
addons/comum/geral.py
|
geral.py
|
py
| 41,282 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.one2many",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.date",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.boolean",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 511,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 561,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 562,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 564,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 567,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.boolean",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 571,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 573,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 586,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 589,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 623,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 645,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 663,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 663,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 708,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.function",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 709,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.function",
"line_number": 711,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 711,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.selection",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.function",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 716,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 718,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 718,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 720,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 720,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 728,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 784,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 805,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 805,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 810,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 810,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 811,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 811,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 812,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 812,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.integer",
"line_number": 813,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 813,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 819,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 826,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 826,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 826,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 868,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 886,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 886,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 891,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 892,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 893,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 893,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 916,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 927,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 927,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 979,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 979,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 980,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 980,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 981,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 981,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 982,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 982,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 983,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 983,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 984,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 984,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 985,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 985,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 986,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 986,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 987,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 987,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 988,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 988,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 989,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 989,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 990,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 990,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 1016,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv",
"line_number": 1016,
"usage_type": "name"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 1016,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.Model",
"line_number": 1026,
"usage_type": "attribute"
},
{
"api_name": "openerp.osv.osv",
"line_number": 1026,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 1031,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 1031,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 1032,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 1032,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.many2one",
"line_number": 1033,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 1033,
"usage_type": "name"
},
{
"api_name": "openerp.osv.fields.char",
"line_number": 1034,
"usage_type": "call"
},
{
"api_name": "openerp.osv.fields",
"line_number": 1034,
"usage_type": "name"
}
] |
166803375
|
from docutils import nodes
from os.path import sep
def rcparam_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
rendered = nodes.Text('rcParams["{}"]'.format(text))
source = inliner.document.attributes['source'].replace(sep, '/')
rel_source = source.split('/doc/', 1)[1]
levels = rel_source.count('/')
refuri = ('../' * levels +
'tutorials/introductory/customizing.html#matplotlib-rcparams')
ref = nodes.reference(rawtext, rendered, refuri=refuri)
return [nodes.literal('', '', ref)], []
def setup(app):
app.add_role("rc", rcparam_role)
return {"parallel_read_safe": True, "parallel_write_safe": True}
| null |
extraPackages/matplotlib-3.0.2/doc/sphinxext/custom_roles.py
|
custom_roles.py
|
py
| 678 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "docutils.nodes.Text",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "docutils.nodes",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.sep",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "docutils.nodes.reference",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "docutils.nodes",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "docutils.nodes.literal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "docutils.nodes",
"line_number": 16,
"usage_type": "name"
}
] |
456176711
|
#Author : Isanur Sardar
import requests
import csv
st_code = input('Write the station code with inverted comma')
url = 'http://emis.wbpcb.gov.in/airquality/JSP/aq/fetch_val_ajax.jsp'
myobj = {'stn_code': st_code,
'type': 'date'}
x = requests.post(url, data = myobj)
da = eval(x.text)
main = []
def boxx(x):
ix = int(x)
if (x - ix) > 0.5:
ix = ix +1
else:
pass
return ix
f_name = 'wbpcb-data-IS'+st_code+'.csv'
fxx = open(f_name, 'w', newline='')
writer = csv.writer(fxx)
writer.writerow(["DATE", "NO2","NO2-SI", "PM10","PM10-SI","SO2","SO2-SI"])
dates = da['list']
n = 0
for el in dates:
date = el['dates']
date_el = date.split("/")
day = date_el[1]
month = date_el[0]
year = date_el[2]
if eval(year) < 2010 or eval(year) > 2021 :
pass
else:
try:
d = day + '/'+ month +'/' + year
data = { 'stn_code': st_code,
'date': d,
'type': 'aqi'}
#print(data)
res = requests.post(url, data = data)
rest = eval(res.text)
#print(rest)
NO2 = rest['list'][0]['value']
vno2 = eval(NO2)
if (vno2<=40):
no2si=vno2*50/40
elif (vno2>40 and vno2<=80):
no2si=50+(vno2-40)*50/40
elif (vno2>80 and vno2<=180):
no2si=100+(vno2-80)*100/100
elif (vno2>180 and vno2<=280):
no2si=200+(vno2-180)*100/100
elif (vno2>280 and vno2<=400):
no2si=300+(vno2-280)*100/120
else:
no2si=400+(vno2-400)*100/120
PM10 = rest['list'][1]['value']
vpm10 = eval(PM10)
if vpm10<=100:
pm10si=vpm10
elif vpm10>100 and vpm10<=250:
pm10si=100+(vpm10-100)*100/150
elif vpm10>250 and vpm10<=350:
pm10si=200+(vpm10-250)
elif vpm10>350 and vpm10<=430:
pm10si=300+(vpm10-350)*100/80
else:
pm10si=400+(vpm10-430)*100/80
SO2 = rest['list'][2]['value']
vso2 = eval(SO2)
if (vso2<=40):
so2si=vso2*50/40
elif (vso2>40 and vso2<=80):
so2si=50+(vso2-40)*50/40
elif (vso2>80 and vso2<=380):
so2si=100+(vso2-80)*100/300
elif (vso2>380 and vso2<=800):
so2si=200+(vso2-380)*100/420
elif (vso2>800 and vso2<=1600):
so2si=300+(vso2-800)*100/800
else:
so2si=400+(vso2-1600)*100/800
texxt = [d, NO2,boxx(no2si), PM10,boxx(pm10si) ,SO2, boxx(so2si)]
main.append(texxt)
#try:
writer.writerow(texxt)
#print('ok')
#except:
# print("Unexpected error:", sys.exc_info()[0])
except:
pass
n = n +1
print(n)
| null |
Air-Quality_Data_Crunching/main.py
|
main.py
|
py
| 2,569 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.post",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 43,
"usage_type": "call"
}
] |
174160712
|
# shows a user's playlists (need to be authenticated via oauth)
import pprint
import sys
import os
import subprocess
import datetime
import time
import spotipy
import spotipy.util as util
#from requests.exceptions import ConnectionError
username = "ADD YOUR USERNAME HERE"
FreshNewShit = "ADD ID OF YOUR PLAYLIST HERE"
THRESHOLD = 7 #Get Tracks of THRESHOLD last days
if len(sys.argv) > 1:THRESHOLD = sys.argv[1]
def isNew(date):
split = date.split("-")
if len(split)==3:
release=datetime.datetime(int(split[0]),int(split[1]),int(split[2]))
tday = datetime.datetime.now().day
tmonth = datetime.datetime.now().month
tyear = datetime.datetime.now().year
today = datetime.datetime(tyear, tmonth, tday)
diff = abs((release - today).days)
if diff<=THRESHOLD:
return True
return False
def addAlbum(id,sp,tracksToAdd):
albumtracks = tryOnce(lambda: sp.album_tracks(id))['items']
for track in albumtracks:
addTrack(track,sp,tracksToAdd)
def addPartly(id,sp,tracksToAdd,artists):
for track in tryOnce(lambda: sp.album_tracks(id))['items']:
for a in track['artists']:
if a['id'] in artists:
addTrack(track,sp,tracksToAdd)
def addTrack(track,sp,tracksToAdd):
name = track['name']
if track['id'] not in tracksToAdd and 'Instrumental' not in name:
try:
tracksToAdd.append(track['id'])
except Exception as e:
print(e)
def replaceFreshNewShit():
if token:
sp = spotipy.Spotify(auth=token)
pp = pprint.PrettyPrinter(indent=4)
# Get all followed Artists
print("get Artists")
after = None
artists = []
while(True):
followed_artists = sp.current_user_followed_artists(limit=50,after=after)['artists']['items']
if(len(followed_artists)==0):break
for x in followed_artists:
#pp.pprint(x)
artists.append(x['id'])
after = x['id']
print("Artists:",len(artists))
#Get all relevant albums
print("get Albums")
album_list=[]
for artist in artists:
offset=0
while(True):
albums = tryOnce( lambda: sp.artist_albums(artist,limit=50,offset=offset)['items'] )
if(len(albums)==0):break
for x in albums:
#print(x['release_date'],x['name'])
if(isNew(x['release_date'])and x not in album_list):
album_list.append(x)
#pp.pprint(x)
offset += 50
print("Albums",len(album_list))
#Add Tracks
print("add Tracks")
tracksToAdd = []
singles = []
albums = []
appears_on = []
for album in album_list:
#add albums and singles
if album['album_group']=='album' :albums.append(album['id'])
if album['album_group']=='single':singles.append(album['id'])
else:appears_on.append(album['id'])
print("singles",len(singles),"albums",len(album),"appears_on",len(appears_on))
for x in singles:addAlbum(x,sp,tracksToAdd)
for x in appears_on:addPartly(x,sp,tracksToAdd,artists)
for x in albums:addAlbum(x,sp,tracksToAdd)
tryOnce(lambda: sp.user_playlist_replace_tracks(username,FreshNewShit,tracksToAdd))
else:
print("Can't get token for", username)
def tryOnce(fun):
try: return fun()
except Exception as e:
print("trying again in 3 sec ",e)
time.sleep(3)
tryOnce(fun)
scope='user-follow-read playlist-modify-public playlist-read-collaborative playlist-modify-private playlist-read-private '
token = util.prompt_for_user_token(username,scope)
replaceFreshNewShit()
| null |
newSongs.py
|
newSongs.py
|
py
| 3,914 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "spotipy.Spotify",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "spotipy.util.prompt_for_user_token",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "spotipy.util",
"line_number": 120,
"usage_type": "name"
}
] |
514509955
|
from enum import Enum
from abc import ABC
from abc import abstractclassmethod
import numpy as np
class OPTION_TYPE(Enum):
CALL_OPTION = 'call'
PUT_OPTION = 'put'
class OptionPricingModel(ABC):
"""Abstract class defining interface for option pricing models."""
def price(self, option_type, *args, **kwargs):
"""Calculates call/put option price according to the specified parameter."""
if option_type == OPTION_TYPE.CALL_OPTION.value:
return self._calculate_call_option_price(*args, **kwargs)
elif option_type == OPTION_TYPE.PUT_OPTION.value:
return self._calculate_put_option_price(*args, **kwargs)
else:
raise Exception("Wrong option type")
@abstractclassmethod
def _calculate_call_option_price(self, S, K, T, r, sigma):
"""Calculates option price for call option."""
raise NotImplementedError()
def _calculate_put_option_price(self, S, K, T, r, sigma):
"""
Calculates option price for put option.
Put option price is calculated from call price based on the Put-Call property.
https://www.investopedia.com/terms/p/putcallparity.asp
Formula: C + PV(x) = P + S
=============================
C price of the European call option
PV(x) the present value of the strike price (x),
P price of the European put
S spot price or the current market value of the underlying asset
"""
c = self._calculate_call_option_price(S, K, T, r, sigma)
pv_strike = np.exp(-r * T) * K
put_price = c + pv_strike - S
return put_price
| null |
options/models/base.py
|
base.py
|
py
| 1,688 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "enum.Enum",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "abc.abstractclassmethod",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 45,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.