seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25549551579
|
import logging
from core.connect_db import connect_db
from logger.logger import configLogger
from settings.settings import load_settings
logger = logging.getLogger()
class BaseFetcher(object):
def __init__(self):
super(BaseFetcher, self).__init__()
configLogger()
self._connect_to_db()
def run(self):
running = True
while running:
try:
self._run()
except Exception as e:
logger.error('Got error while running : %r' % e)
running = False
raise
def _run(self):
pass
def _connect_to_db(self):
settings = load_settings()
mongo_config = settings['dbs']['mongo']
con = connect_db(**mongo_config)
|
cipriantruica/news_diffusion
|
news-spreading-master/fetchers/base_fetcher.py
|
base_fetcher.py
|
py
| 774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logger.logger",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logger.logger.configLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logger.logger.error",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "settings.settings",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "settings.settings.load_settings",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "settings.settings",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "core.connect_db.connect_db",
"line_number": 34,
"usage_type": "call"
}
] |
22034953643
|
import pandas as pd
import s3fs
def main(event = None, context = None):
print("Start running LinkedInScraper")
values = [['Atreish Ramlakhan',
'New York, New York, United States',
'Katz School at Yeshiva University',
'Graduate Teaching Assistant',
'https://www.linkedin.com/company/16181365/'],
['Yuxiao (Henry) Shen',
'New York, New York, United States',
'The AAT Project (America’s Amazing Teens, LLC)',
'Full Stack PHP Web Developer',
'https://www.linkedin.com/search/results/all/?keywords=The+AAT+Project+%28America%E2%80%99s+Amazing+Teens%2C+LLC%29'],
['Shichao Zhou',
'New York, New York, United States',
'S&P Global Market Intelligence · Internship',
'Data Analyst',
'https://www.linkedin.com/company/162892/'],
['Mahlet Melese', 'New York, New York, United States', None, None, None]]
df = pd.DataFrame(values,columns = [["Full Name", "Location", "Most Recent Company", 'Job Title', 'Company Url']])
###LOAD THE FILE INTO S3####
# prepare csv file name
pathname = 'ia-final2022-csv/'#specify location of s3:/{my-bucket}/
filenames = f"{pathname}linkedIn_info.csv" #name of the filepath and csv file
#encoding must be adjusted to accommodate abnormal characters. Use s3fs to write to S3 bucket
print("Start adding LinkedIn data to csv")
byte_encoded_df = df.to_csv(None, index=False).encode() #encodes file as binary
s3 = s3fs.S3FileSystem(anon=False)
with s3.open(filenames, 'wb') as file:
file.write(byte_encoded_df) #writes byte-encoded file to s3 location
#print success message
print("Successfull uploaded file to location:"+str(filenames))
print("Complete running LinkedInScraper")
|
sczhou0705/IA-FinalProject-YUconnect
|
LambdaDeployment/Code/LinkedInScraper.py
|
LinkedInScraper.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "s3fs.S3FileSystem",
"line_number": 31,
"usage_type": "call"
}
] |
72478034429
|
"""HartPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.core.paginator import Paginator
from django.shortcuts import render
from art.models import Tag,Art
import json
from user import helper
import xadmin as admin
def toIndex(request):
tags1 = Tag.objects.all()
# locals将当前函数的局部变量转成字典的key-value结构
#{'request':request,'tags':tags}
tags = []
for tag in tags1:
#判断该类型中是否有文章,如果有就添加进tags
if Art.objects.filter(tag=tag):
tags.append(tag)
#annotate为每个tag对象增加一个字段(Count('art) 统计每种类型下文章数据)
#
#读取分类id
tag_id = request.GET.get('tag')
if tag_id:
tag_id = int(tag_id)
arts = Art.objects.filter(tag_id=tag_id) #exclude排除条件为真的数据
else:
arts = Art.objects.all()
# #加载所有文章
# arts = Art.objects.all()
#将文章进行分页处理
paginator = Paginator(arts,8) #分页器
page = request.GET.get('page')
page = int(page) if page else 1 # 读取请求参数中page参数,如果没有,默认为1
pager = paginator.page(page) # 获取当前页的数据
#获取登录用户的信息
login_user= helper.getLoginInfo(request)
return render(request,'index.html',locals())
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^ueditor/', include('DjangoUeditor.urls')),
url(r'^user/',include('user.urls')),
url(r'^art/',include('art.urls')),
url(r'^$', toIndex),
]
|
cjxxu/A_Fiction_web
|
HartPro/urls.py
|
urls.py
|
py
| 2,203 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "art.models.Tag.objects.all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "art.models.Tag.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "art.models.Tag",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "art.models.Art.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "art.models.Art.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "art.models.Art",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "art.models.Art.objects.filter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "art.models.Art.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "art.models.Art",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "art.models.Art.objects.all",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "art.models.Art.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "art.models.Art",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "user.helper.getLoginInfo",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "user.helper",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "xadmin.site",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "django.conf.urls.url",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 65,
"usage_type": "call"
}
] |
27264160200
|
"""
GenT2MF_Trapezoidal.py
Created 3/1/2022
"""
from __future__ import annotations
from typing import List
from juzzyPython.generalType2zSlices.sets.GenT2MF_Prototype import GenT2MF_Prototype
from juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal import IntervalT2MF_Trapezoidal
from juzzyPython.type1.sets.T1MF_Trapezoidal import T1MF_Trapezoidal
class GenT2MF_Trapezoidal(GenT2MF_Prototype):
"""
Class GenT2MF_Trapezoidal
Creates a new instance of GenT2zMF_Trapezoidal
Parameters:
primer
primer0
primer1
primers
numberOfzLevels
Functions:
getZSlice
"""
def __init__(self, name: str,primer: IntervalT2MF_Trapezoidal = None,primer0: IntervalT2MF_Trapezoidal = None, primer1: IntervalT2MF_Trapezoidal = None,primers: List[IntervalT2MF_Trapezoidal] = None, numberOfzLevels = None) -> None:
super().__init__(name)
self.DEBUG = False
if primer != None:
stepsize = [0] * 4
self.numberOfzLevels = numberOfzLevels
self.support = primer.getSupport()
self.primer = primer
slices_fs = [0] * numberOfzLevels
self.slices_zValues = [0] * numberOfzLevels
z_stepSize = 1.0/numberOfzLevels
self.zSlices = [0] * numberOfzLevels
stepsize[0] = (primer.getLMF().getA() - primer.getUMF().getA())/(numberOfzLevels-1)/2.0
stepsize[1] = (primer.getLMF().getB() - primer.getUMF().getB())/(numberOfzLevels-1)/2.0
stepsize[2] = (primer.getUMF().getC() - primer.getLMF().getC())/(numberOfzLevels-1)/2.0
stepsize[3] = (primer.getUMF().getD() - primer.getLMF().getD())/(numberOfzLevels-1)/2.0
inner = primer.getLMF().getParameters().copy()
outer = primer.getUMF().getParameters().copy()
self.zSlices[0] = IntervalT2MF_Trapezoidal("Slice 0",primer.getUMF(),primer.getLMF())
self.slices_zValues[0] = z_stepSize
if self.DEBUG:
print(self.zSlices[0].toString()+" Z-Value = "+str(self.slices_zValues[0]))
for i in range(1,numberOfzLevels):
self.slices_zValues[i] = self.slices_zValues[i-1]+z_stepSize
inner[0]-=stepsize[0]
inner[1]-=stepsize[1]
inner[2]+=stepsize[2]
inner[3]+=stepsize[3]
outer[0]+=stepsize[0]
outer[1]+=stepsize[1]
outer[2]-=stepsize[2]
outer[3]-=stepsize[3]
if(inner[0]<outer[0]):
inner[0] = outer[0]
if(inner[1]<outer[1]):
inner[1] = outer[1]
if(inner[2]>outer[2]):
inner[2] = outer[2]
if(inner[3]>outer[3]):
inner[3] = outer[3]
self.zSlices[i] = IntervalT2MF_Trapezoidal("Slice "+str(i), T1MF_Trapezoidal("upper_slice "+str(i),outer),T1MF_Trapezoidal("lower_slice "+str(i),inner))
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
elif primer0 != None and primer1 != None:
if self.DEBUG:
print("Number of zLevels: "+str(numberOfzLevels))
self.numberOfzLevels = numberOfzLevels
self.support = primer0.getSupport()
slices_fs = [0] * numberOfzLevels
self.slices_zValues = [0] * numberOfzLevels
self.zSlices = [0] * numberOfzLevels
self.zSlices[0] = primer0
self.zSlices[0].setName(self.getName()+"_Slice_0")
self.zSlices[-1] = primer1
z_stepSize = 1.0/(numberOfzLevels)
self.slices_zValues[0] = z_stepSize
self.slices_zValues[-1] = 1.0
lsu = (primer1.getUMF().getParameters()[0]-primer0.getUMF().getParameters()[0])/(numberOfzLevels-1)
lsl = (primer0.getLMF().getParameters()[0]-primer1.getLMF().getParameters()[0])/(numberOfzLevels-1)
rsu = (primer0.getUMF().getParameters()[3]-primer1.getUMF().getParameters()[3])/(numberOfzLevels-1)
rsl = (primer1.getLMF().getParameters()[3]-primer0.getLMF().getParameters()[3])/(numberOfzLevels-1)
if self.DEBUG:
print("lsu = "+str(lsu)+" lsl = "+str(lsl)+" rsu = "+str(rsu)+" rsl = "+str(rsl))
inner = primer0.getLMF().getParameters().copy()
outer = primer0.getUMF().getParameters().copy()
for i in range(1,numberOfzLevels-1):
self.slices_zValues[i] = self.slices_zValues[i-1]+z_stepSize
inner[0]-=lsl
inner[3]+=rsl
outer[0]+=lsu
outer[3]-=rsu
if self.DEBUG:
print("Slice "+str(i)+" , inner: "+str(inner[0])+" "+str(inner[1])+" "+str(inner[2])+" outer: "+str(outer[0])+" "+str(outer[1])+" "+str(outer[2]))
self.zSlices[i] = IntervalT2MF_Trapezoidal(self.getName()+"_Slice_"+str(i),T1MF_Trapezoidal("upper_slice "+str(i),outer),T1MF_Trapezoidal("lower_slice "+str(i),inner))
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
elif primers != None:
self.numberOfzLevels = len(primers)
self.support = primers[0].getSupport()
slices_fs = [0] * self.numberOfzLevels
self.slices_zValues = [0] * self.numberOfzLevels
z_stepSize = 1.0/self.numberOfzLevels
self.slices_zValues[0] = z_stepSize
self.zSlices = primers.copy()
for i in range(self.numberOfzLevels):
self.slices_zValues[i] = z_stepSize*(i+1)
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
def clone(self) -> GenT2MF_Trapezoidal:
"""Not implemented"""
print("Not implemented")
return None
def getZSlice(self, slice_number: int) -> IntervalT2MF_Trapezoidal:
"""Return the slice number"""
return self.zSlices[slice_number]
def getLeftShoulderStart(self) -> float:
"""Not implemented"""
print("Not implemented")
return float("Nan")
def getRightShoulderStart(self) -> float:
"""Not implemented"""
print("Not implemented")
return float("Nan")
|
LUCIDresearch/JuzzyPython
|
juzzyPython/generalType2zSlices/sets/GenT2MF_Trapezoidal.py
|
GenT2MF_Trapezoidal.py
|
py
| 6,556 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "juzzyPython.generalType2zSlices.sets.GenT2MF_Prototype.GenT2MF_Prototype",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal.IntervalT2MF_Trapezoidal",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal.IntervalT2MF_Trapezoidal",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal.IntervalT2MF_Trapezoidal",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "juzzyPython.type1.sets.T1MF_Trapezoidal.T1MF_Trapezoidal",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal.IntervalT2MF_Trapezoidal",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "juzzyPython.type1.sets.T1MF_Trapezoidal.T1MF_Trapezoidal",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal.IntervalT2MF_Trapezoidal",
"line_number": 144,
"usage_type": "name"
}
] |
22768172274
|
from backend import credential
import urllib.parse
from google.cloud import storage
import streamlit as st
import os
import json
import fnmatch
import file_io
import utils
import traceback
import io
def init():
creds_str = credential.google_creds()
if not os.path.exists('temp'):
os.makedirs('temp')
with open('temp/google-credentials.json', 'w') as f:
json.dump(creds_str, f)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'temp/google-credentials.json'
storage_client = storage.Client()
st.session_state['storage_client'] = storage_client
def upload_to_bucket(root_dir, file, uid, name, metadata=None, compress=None):
dir = f"{root_dir}/{uid}"
try:
# get file extension
extension = os.path.splitext(file.name)[1]
filename = name + extension
compressed_file_path = None
if compress:
# Compress file
if compress == 'gzip':
compressed_file_path = file_io.compress_to_gzip(file)
filename += '.gz' # Add '.gz' extension to the filename
elif compress == 'xz':
compressed_file_path = file_io.compress_to_xz(file)
filename += '.xz' # Add '.xz' extension to the filename
else:
raise ValueError(f'Unsupported compression type: {compress}. Supported types are "gzip" and "xz".'
f'if you do not want to compress the file, set compress=None')
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{dir}/{filename}")
if compress:
# Open the compressed file in read-binary mode for upload
with open(compressed_file_path, 'rb') as file_obj:
file_content = file_obj.read() # read file content once
default_meta = {
'md5_hash': utils.calculate_md5(file_content),
'size': utils.calculate_size(file_content),
'owner': st.session_state['student_number'],
'time': utils.get_current_time()
}
# Merge the default metadata with the given metadata
meta = {**default_meta, **metadata} if metadata else default_meta
# Set the blob metadata
blob.metadata = meta
blob.upload_from_file(io.BytesIO(file_content))
# Delete the compressed file
os.remove(compressed_file_path)
else:
# If compress is None or False, upload the file as is
# Convert file_content to a BytesIO object and upload
file_content = file.read()
default_meta = {
'md5_hash': utils.calculate_md5(file_content),
'size': utils.calculate_size(file_content),
'owner': st.session_state['student_number'],
'time': utils.get_current_time()
}
# Merge the default metadata with the given metadata
meta = {**default_meta, **metadata} if metadata else default_meta
# Set the blob metadata
blob.metadata = meta
blob.upload_from_file(io.BytesIO(file_content))
except Exception as e:
tb = traceback.format_exc()
st.error(f'❌Failed to upload to the bucket: **{e}** \n\n **Traceback**:\n ```{tb}```')
st.stop()
def delete_from_bucket(root_dir, filenames, uid):
for filename in filenames:
# Decode the filename to ensure spaces are handled correctly
decoded_filename = urllib.parse.unquote(filename)
try:
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{root_dir}/{uid}/{decoded_filename}")
blob.delete()
except Exception as e:
st.error(f'failed to delete file ({root_dir}/{uid}/{decoded_filename}) from bucket. **{e}**')
st.stop()
def download_from_bucket(root_dir, filename, uid):
try:
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{root_dir}/{uid}/{filename}")
if not os.path.exists('temp'):
os.makedirs('temp')
with open(f"temp/{filename}", 'wb') as f:
storage_client.download_blob_to_file(blob, f)
return f"temp/{filename}"
except Exception as e:
st.error(f'failed to download file from bucket. **{e}**')
st.stop()
def get_blobs(bucket, dir, name_pattern, extensions):
blobs = []
if '*' in name_pattern:
# If wildcard is present in name_pattern, process as pattern.
prefix, pattern = name_pattern.split('*', 1)
# List blobs whose names start with the given prefix
for blob in bucket.list_blobs(prefix=f"{dir}/{prefix}"):
for extension in extensions:
if blob.name.endswith(extension) and fnmatch.fnmatch(blob.name, f"{dir}/{name_pattern}"):
blobs.append(blob)
# Once a match is found, no need to check other extensions
break
else:
# If no wildcard is present, process name_pattern as exact file name.
for extension in extensions:
blob = bucket.blob(f"{dir}/{name_pattern}{extension}")
if blob.exists():
blobs.append(blob)
return blobs
def get_public_urls_from_blobs(blobs):
return [blob.public_url for blob in blobs]
def get_blob_md5(blobs):
return [blob.md5_hash for blob in blobs]
def get_blob_metadata(blobs):
return [blob.metadata for blob in blobs]
def get_blob_info(root_dir, uid, name_pattern, extensions, infos):
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
dir = f"{root_dir}/{uid}"
blobs = get_blobs(bucket, dir, name_pattern, extensions)
for info in infos:
if info == 'url':
return get_public_urls_from_blobs(blobs)
else:
metas = get_blob_metadata(blobs)
return [meta[info] for meta in metas]
|
sean1832/Mongrel-Assemblies-DB
|
src/backend/gcp_handler.py
|
gcp_handler.py
|
py
| 6,361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "backend.credential.google_creds",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "backend.credential",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.storage.Client",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "google.cloud.storage",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "streamlit.session_state",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "file_io.compress_to_gzip",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "file_io.compress_to_xz",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "streamlit.secrets",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "utils.calculate_md5",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "utils.calculate_size",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "utils.get_current_time",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "utils.calculate_md5",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "utils.calculate_size",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "utils.get_current_time",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "streamlit.error",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "streamlit.stop",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.unquote",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "streamlit.session_state",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "streamlit.secrets",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "streamlit.error",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "streamlit.stop",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "streamlit.secrets",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "streamlit.error",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "streamlit.stop",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "fnmatch.fnmatch",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "streamlit.secrets",
"line_number": 164,
"usage_type": "attribute"
}
] |
23541886221
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 15:37:26 2022
@author: jeros
Hu moments analysis
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import PercentFormatter
def plotter(huN = 1, bananas = None,oranges = None,lemons = None):
# if bananas is not None:
# plt.hist(bananas[0,:],bins, alpha=0.5, label='b',weights = weights_b )
# if oranges is not None:
# plt.hist(oranges[0,:],bins, alpha=0.5, label='o',weights = weights_o)
'''Hu moment number histogram'''
if huN == 0:
bins = np.linspace(2.85,3.22,100)
if huN == 1:
bins = np.linspace(5.5,12.5,100)
if huN == 2:
bins = np.linspace(10,16,100)
if huN == 3:
bins = np.linspace(9.8,19,100)
if huN == 4:
bins = np.linspace(-35,35,100)
if huN == 5:
bins = np.linspace(-25,25,100)
if huN == 6:
bins = np.linspace(-35,35,100)
#plt.hist([bananas[huN,:], oranges[huN,:],lemons[huN,:]],label=['B', 'O','L'])
plt.hist([bananas[huN,:], oranges[huN,:],lemons[huN,:]], bins,label=['B', 'O','L'],density = True)
plt.title('Hu'+str(huN))
'''Hu moment number 2 histogram'''
bins = np.linspace(10,16,100)
plt.legend(loc='upper right')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.show()
|
jeroserpa/FruitClassifier
|
histogram_analisys.py
|
histogram_analisys.py
|
py
| 1,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.autoscale",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
}
] |
29510374823
|
import re
import pandas as pd
import fool
from copy import copy
from starter_code1.NER.ner01 import *
test_data = pd.read_csv('../data/info_extract/test_data.csv', encoding='gb2312', header=0)
# print(test_data.head())
test_data['ner'] = None
ner_id = 1001
ner_dict_new = {} # 存储所有实体
ner_dict_reverse_new = {} # 储存所有实体
for i in range(len(test_data)):
sentence = copy(test_data.iloc[i, 1])
# TODO: 调用fool积极性实体识别,得到words和ners结果
words, ners = fool.analysis(sentence)
# print(words)
# print(ners)
ners[0].sort(key=lambda x: x[0], reverse=True)
for start, end, ner_type, ner_name in ners[0]:
if ner_type == 'company' or ner_type == 'person':
# ner_dict_new
lst = main_extract(ner_name, stop_word, d_4_delete, d_city_province)
company_main_name = ''.join(lst) # 对公司名提取主体部分,将包含相同主体部分的公司统一为一个实体
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
sentence = sentence[:start] + ' ner_' + str(ner_dict_new[company_main_name]) + '_ ' + sentence[end:]
test_data.iloc[i, -1] = sentence
X_test = test_data[['ner']]
# 处理train数据,利用开源工具进行实体识别和并使用实体统一函数储存实体
train_data = pd.read_csv('../data/info_extract/train_data.csv', encoding='gb2312', header=0)
train_data['ner'] = None
for i in range(len(train_data)):
# 判断正负样本
if train_data.iloc[i, :]['member1'] == '0' and train_data.iloc[i, :]['member2'] == '0':
sentence = copy(train_data.iloc[i, 1])
# TODO:调用fool进行实体识别,得到wods和ners结果
words, ners = fool.analysis(sentence)
ners[0].sort(key=lambda x: x[0], reverse=True)
for start, end, ner_type, ner_name in ners[0]:
# TODO:调用实体统一函数,储存统一后的实体
# 并自增ner_id
if ner_type == 'company' or ner_type == 'person':
company_main_name = ''.join(
main_extract(ner_name, stop_word, d_4_delete, d_city_province)) # 提取公司主体名称
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
# 在句子中用编号替换实体名
sentence = sentence[:start] + ' ner_' + str(ner_dict_new[company_main_name]) + '_ ' + sentence[end:]
train_data.iloc[i, -1] = sentence
else:
# 将训练集中正样本已经标注的实体也使用编码替换
sentence = copy(train_data.iloc[i, :])['sentence']
for company_main_name in [train_data.iloc[i, :]['member1'], train_data.iloc[i, :]['member2']]:
# TODO:调用实体统一函数,储存统一后的实体
# 并自增ner_id
company_main_name = ''.join(
main_extract(company_main_name, stop_word, d_4_delete, d_city_province)) # 提取公司主体名称
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
# 在句子中用编号替换实体名
sentence = re.sub(company_main_name, ' ner_%s_ ' % (str(ner_dict_new[company_main_name])), sentence)
train_data.iloc[i, -1] = sentence
y = train_data.loc[:, ['tag']]
train_num = len(train_data)
X_train = train_data[['ner']]
# 将train和test放在一起提取特征
# X = pd.concat([X_train, X_test])
# X.to_csv('./x.csv', index=False)
# print(X)
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
import numpy as np
# TODO: 定义需要遍历的参数
paramaeters = {'C': np.logspace(-3, 3, 7)}
# TODO:选择模型
lr = LogisticRegression()
# TODO:利用GridSearchCV
clf = GridSearchCV(lr, paramaeters, cv=5)
clf.fit(X_train, y)
# TODO:对Test_data进行分类
predict =clf.predict(X_test)
predict_prob = clf.predict_proba(X_test)
print(predict)
print(predict_prob)
|
jiangq195/tanxin
|
starter_code1/NER/ner02.py
|
ner02.py
|
py
| 4,477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fool.analysis",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "fool.analysis",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 106,
"usage_type": "call"
}
] |
63614571
|
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt # for making figures
import os
# read in all the words
current_dir = os.getcwd()
words = open(current_dir+'/makemore/names.txt', 'r').read().splitlines()
# print(f"{words[:8]}")
# build the vocabulary of characters and mappings to/from integers
chars = sorted(list(set(''.join(words))))
stoi = {s:i+1 for i,s in enumerate(chars)}
stoi['.'] = 0
itos = {i:s for s,i in stoi.items()}
# print(itos)
# build the dataset
block_size = 3 # context length: how many characters do we take to predict the next one?
def build_dataset(words):
X, Y = [], []
for w in words:
#print(w)
context = [0] * block_size
for ch in w + '.':
ix = stoi[ch]
X.append(context)
Y.append(ix)
#print(''.join(itos[i] for i in context), '--->', itos[ix])
context = context[1:] + [ix] # crop and append
X = torch.tensor(X)
Y = torch.tensor(Y)
# print(X.shape, Y.shape)
return X, Y
import random
random.seed(42)
random.shuffle(words)
n1 = int(0.8*len(words))
n2 = int(0.9*len(words))
Xtr, Ytr = build_dataset(words[:n1])
Xdev, Ydev = build_dataset(words[n1:n2])
Xte, Yte = build_dataset(words[n2:])
g = torch.Generator().manual_seed(42) # for reproducibility
C = torch.randn((27, 10), generator=g)
W1 = torch.randn((30, 200), generator=g)
W2 = torch.randn((200, 27), generator=g)
parameters = [C, W1, W2]
for p in parameters:
p.requires_grad = True
lri = []
lossi = []
stepi = []
batch = 32
for i in range(100):
# minibatch construct
ix = torch.randint(0, Xtr.shape[0], (batch,))
# forward pass
emb = C[Xtr[ix]] # (32, 3, 10)
h = torch.tanh(emb.view(-1, 30) @ W1) # (32, 200)
logits = h @ W2 # (32, 27)
loss = F.cross_entropy(logits, Ytr[ix])
#print(loss.item())
# backward pass
for p in parameters:
p.grad = None
loss.backward()
# update
lr = 0.1
for p in parameters:
p.data += -lr * p.grad
# track stats
#lri.append(lre[i])
stepi.append(i)
lossi.append(loss.item())
#print(loss.item())
plt.plot(stepi, lossi)
plt.show()
# sample from the model
g = torch.Generator().manual_seed(2147483647 + 10)
for _ in range(5):
out = []
context = [0] * block_size # initialize with all ...
while True:
emb = C[torch.tensor([context])] # (1,block_size,d)
h = torch.tanh(emb.view(1, -1) @ W1)
logits = h @ W2
probs = F.softmax(logits, dim=1)
ix = torch.multinomial(probs, num_samples=1, generator=g).item()
context = context[1:] + [ix]
out.append(ix)
if ix == 0:
break
print(''.join(itos[i] for i in out))
|
code-cp/bitesize_ai_rs
|
makemore/scripts/mlp.py
|
mlp.py
|
py
| 2,642 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.Generator",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.randint",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.Generator",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.multinomial",
"line_number": 106,
"usage_type": "call"
}
] |
86625823283
|
#! /usr/bin/env python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Linearly normalize intensity to between 0 and 255')
parser.add_argument("input_spec", type=str, help="Input specification")
parser.add_argument("out_version", type=str, help="Output image version")
args = parser.parse_args()
import sys
import os
sys.path.append(os.environ['REPO_DIR'] + '/utilities')
from utilities2015 import *
from data_manager import *
from metadata import *
from distributed_utilities import *
from learning_utilities import *
input_spec = load_ini(args.input_spec)
image_name_list = input_spec['image_name_list']
stack = input_spec['stack']
prep_id = input_spec['prep_id']
if prep_id == 'None':
prep_id = None
resol = input_spec['resol']
version = input_spec['version']
if version == 'None':
version = None
from scipy.ndimage.interpolation import map_coordinates
from skimage.exposure import rescale_intensity, adjust_gamma
from skimage.transform import rotate
# for section in set(metadata_cache['valid_sections_all'][stack]) - set(metadata_cache['valid_sections'][stack]):
# for section in metadata_cache['valid_sections'][stack]:
for image_name in image_name_list:
# print "Section", section
t = time.time()
img = DataManager.load_image_v2(stack=stack, prep_id=prep_id, fn=image_name, version=version, resol=resol)
sys.stderr.write('Load image: %.2f seconds.\n' % (time.time() - t))
t = time.time()
tb_mask = DataManager.load_thumbnail_mask_v3(stack=stack, prep_id=None, fn=image_name)
# raw_mask = rescale_by_resampling(tb_mask, new_shape=(img.shape[1], img.shape[0]))
raw_mask = resize(tb_mask, img.shape) > .5
save_data(raw_mask,
DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version='mask', resol=resol, ext='bp'),
upload_s3=False)
sys.stderr.write('Rescale mask: %.2f seconds.\n' % (time.time() - t))
t = time.time()
mean_std_all_regions = []
cx_cy_all_regions = []
region_size = 5000
region_spacing = 3000
# for cx in range(region_size/2, img.shape[1]-region_size/2+1, region_spacing):
# for cy in range(region_size/2, img.shape[0]-region_size/2+1, region_spacing):
for cx in range(0, img.shape[1], region_spacing):
for cy in range(0, img.shape[0], region_spacing):
region = img[max(cy-region_size/2, 0):min(cy+region_size/2+1, img.shape[0]-1),
max(cx-region_size/2, 0):min(cx+region_size/2+1, img.shape[1]-1)]
region_mask = raw_mask[max(cy-region_size/2, 0):min(cy+region_size/2+1, img.shape[0]-1),
max(cx-region_size/2, 0):min(cx+region_size/2+1, img.shape[1]-1)]
if np.count_nonzero(region_mask) == 0:
continue
mean_std_all_regions.append((region[region_mask].mean(), region[region_mask].std()))
cx_cy_all_regions.append((cx, cy))
sys.stderr.write('Compute mean/std for sample regions: %.2f seconds.\n' % (time.time() - t))
t = time.time()
mean_map = resample_scoremap(sparse_scores=np.array(mean_std_all_regions)[:,0],
sample_locations=cx_cy_all_regions,
gridspec=(region_size, region_spacing, img.shape[1], img.shape[0], (0,0)),
downscale=4,
interpolation_order=2)
sys.stderr.write('Interpolate mean map: %.2f seconds.\n' % (time.time() - t)) #10s
t = time.time()
mean_map = rescale_by_resampling(mean_map, new_shape=(img.shape[1], img.shape[0]))
sys.stderr.write('Scale up mean map: %.2f seconds.\n' % (time.time() - t)) #30s
t = time.time()
std_map = resample_scoremap(sparse_scores=np.array(mean_std_all_regions)[:,1],
sample_locations=cx_cy_all_regions,
gridspec=(region_size, region_spacing, img.shape[1], img.shape[0], (0,0)),
downscale=4,
interpolation_order=2)
sys.stderr.write('Interpolate std map: %.2f seconds.\n' % (time.time() - t)) #10s
t = time.time()
std_map = rescale_by_resampling(std_map, new_shape=(img.shape[1], img.shape[0]))
sys.stderr.write('Scale up std map: %.2f seconds.\n' % (time.time() - t)) #30s
# Save mean/std results.
fp = DataManager.get_intensity_normalization_result_filepath(what='region_centers', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
np.savetxt(fp, cx_cy_all_regions)
fp = DataManager.get_intensity_normalization_result_filepath(what='mean_std_all_regions', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
np.savetxt(fp, mean_std_all_regions)
fp = DataManager.get_intensity_normalization_result_filepath(what='mean_map', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
bp.pack_ndarray_file(mean_map.astype(np.float16), fp)
fp = DataManager.get_intensity_normalization_result_filepath(what='std_map', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
bp.pack_ndarray_file(std_map.astype(np.float16), fp)
# Export normalized image.
t = time.time()
raw_mask = raw_mask & (std_map > 0)
img_normalized = np.zeros(img.shape, np.float32)
img_normalized[raw_mask] = (img[raw_mask] - mean_map[raw_mask]) / std_map[raw_mask]
sys.stderr.write('Normalize: %.2f seconds.\n' % (time.time() - t)) #30s
t = time.time()
# FIX THIS! THIS only save uint16, not float16. Need to save as bp instead.
# img_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=None, version='NtbNormalizedFloat', resol='down8', section=section, )
# create_parent_dir_if_not_exists(img_fp)
# imsave(img_fp, img_normalized[::8, ::8].astype(np.float16))
save_data(img_normalized.astype(np.float16),
DataManager.get_intensity_normalization_result_filepath(what='normalized_float_map', stack=stack, fn=image_name),
upload_s3=False)
sys.stderr.write('Save float version: %.2f seconds.\n' % (time.time() - t)) #30s
# t = time.time()
# img_normalized_uint8 = rescale_intensity_v2(img_normalized, -1, 6)
# sys.stderr.write('Rescale to uint8: %.2f seconds.\n' % (time.time() - t)) #30s
# t = time.time()
# img_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=None, version='NtbNormalized', resol='raw', section=section)
# create_parent_dir_if_not_exists(img_fp)
# imsave(img_fp, img_normalized_uint8)
# sys.stderr.write('Save uint8 version: %.2f seconds.\n' % (time.time() - t)) #30s
# Export histogram.
plt.hist(img_normalized[raw_mask].flatten(), bins=100, log=True);
fp = DataManager.get_intensity_normalization_result_filepath(what='float_histogram_png', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
plt.savefig(fp)
plt.close();
# hist_fp = DataManager.get_intensity_normalization_result_filepath(what='float_histogram', stack=stack, section=section)
# create_parent_dir_if_not_exists(hist_fp)
# hist, bin_edges = np.histogram(img_normalized[valid_mask].flatten(), bins=np.arange(0,201,5));
# plt.bar(bin_edges[:-1], np.log(hist));
# plt.xticks(np.arange(0, 200, 20), np.arange(0, 200, 20));
# plt.xlabel('Normalized pixel value (float)');
# plt.title(metadata_cache['sections_to_filenames'][stack][section])
# plt.savefig(hist_fp)
# plt.close();
gamma_map = img_as_ubyte(adjust_gamma(np.arange(0, 256, 1) / 255., 8.))
low = -2.
high = 50.
for image_name in image_name_list:
img_normalized = load_data(
DataManager.get_intensity_normalization_result_filepath(what='normalized_float_map', stack=stack, fn=image_name),
download_s3=False)
t = time.time()
img_normalized_uint8 = rescale_intensity_v2(img_normalized, low, high)
sys.stderr.write('Rescale to uint8: %.2f seconds.\n' % (time.time() - t))
t = time.time()
raw_mask = load_data(DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version='mask', resol=resol, ext='bp'),
download_s3=False)
img_normalized_uint8[~raw_mask] = 0
sys.stderr.write('Load mask: %.2f seconds.\n' % (time.time() - t))
img = 255 - img_normalized_uint8
save_data(gamma_map[img],
DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version=args.out_version, resol=resol),
upload_s3=False)
|
mistycheney/MouseBrainAtlas
|
preprocess/normalize_intensity_adaptive.py
|
normalize_intensity_adaptive.py
|
py
| 8,733 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "skimage.exposure.adjust_gamma",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 195,
"usage_type": "attribute"
}
] |
4034606024
|
import argparse
import glob
import multiprocessing as mp
import os
import shutil
import time
import cv2
import tqdm
import numpy as np
from detectron2.config import get_cfg
from partseg import add_partseg_config
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.visualizer import ColorMode, Visualizer
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_partseg_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = (
args.confidence_threshold
)
# load weights from the default path
if not args.custom_weights:
default_weights = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
if os.path.exists(default_weights):
print("Use the default weights.")
cfg.MODEL.WEIGHTS = default_weights
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/mask_rcnn_R_50_FPN_chair.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--root-dir",
type=str,
help="root directory",
default="datasets/images/test/chair",
)
parser.add_argument(
"--output-dir",
type=str,
help="path to output",
default="datasets/predictions/test/chair",
)
parser.add_argument("--shape-list-fn", type=str, help="path to shape list")
parser.add_argument("--start", type=int, default=0)
parser.add_argument("--end", type=int, default=None)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--custom-weights", action="store_true", help="whether to use custom weights"
)
parser.add_argument(
"--include-image", action="store_true", help="whether to include input images"
)
parser.add_argument("--vis", action="store_true")
parser.add_argument("--with-score", action="store_true")
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
predictor = DefaultPredictor(cfg)
root_dir = args.root_dir
if args.shape_list_fn:
with open(args.shape_list_fn, "r") as f:
image_ids = f.readlines()
image_ids = [x.strip() for x in image_ids]
else:
image_ids = os.listdir(root_dir)
image_ids = [x for x in image_ids if os.path.isdir(os.path.join(root_dir, x))]
image_ids = sorted(image_ids)
image_ids = image_ids[args.start : args.end]
for image_id in tqdm.tqdm(image_ids[:]):
file_name = os.path.join(root_dir, image_id, "img.png")
image = read_image(file_name, format="BGR")
predictions = predictor(image)
instances = predictions["instances"].to("cpu")
pred_masks = instances.pred_masks.numpy() # [N, H, W]
pred_masks = (pred_masks * 255).astype(np.uint8)
# for pred_mask in pred_masks:
# cv2.imshow('mask', pred_mask)
# if cv2.waitKey(0) == 27:
# break # esc to quit
output_dir = os.path.join(args.output_dir, image_id)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# save
for idx, pred_mask in enumerate(pred_masks):
output_file_name = os.path.join(output_dir, f"partmask_{idx}.png")
cv2.imwrite(output_file_name, pred_mask)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image_rgb = image[:, :, ::-1]
visualizer = Visualizer(image_rgb, None, instance_mode=ColorMode.IMAGE)
if not args.with_score:
# workaround to suppress visualizing scores
instances.remove("scores")
vis_output = visualizer.draw_instance_predictions(predictions=instances)
if args.vis:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
else:
output_file_name = os.path.join(output_dir, f"partmask_all.png")
vis_output.save(output_file_name)
if args.include_image:
shutil.copy(file_name, os.path.join(output_dir, "img.png"))
|
hansongfang/CompNet
|
PartSeg/predict_net.py
|
predict_net.py
|
py
| 5,233 |
python
|
en
|
code
| 33 |
github-code
|
6
|
[
{
"api_name": "detectron2.config.get_cfg",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "partseg.add_partseg_config",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "argparse.REMAINDER",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.set_start_method",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.logger.setup_logger",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.logger.setup_logger",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "detectron2.engine.defaults.DefaultPredictor",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "detectron2.data.detection_utils.read_image",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.visualizer.Visualizer",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.visualizer.ColorMode.IMAGE",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "detectron2.utils.visualizer.ColorMode",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "cv2.namedWindow",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 154,
"usage_type": "attribute"
}
] |
386960757
|
import os
from flask import Response,Flask, request
from flask_cors import CORS
from insgraph import util, instagram
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
print("zhuangjb flask start.....:"+__name__)
CORS(app)
app.config.from_mapping(
# a default secret that should be overridden by instance config
SECRET_KEY='dev',
# store the database in the instance folder
DATABASE=os.path.join(app.instance_path, 'insgraph.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return 'Hello, World!'
@app.before_request
def option_replay():
if request.method =='OPTIONS':
resp = Response('')
print('xxx')
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Headers'] = '*'
resp.headers['Access-Control-Request-Method'] = request.headers['Access-Control-Request-Method']
return resp
# @app.after_request
# def set_allow_origin(resp):
# h = resp.headers
# if request.method != 'OPTIONS' and 'Origin' in request.headers:
# h['Access-Control-Allow-Origin'] = request.headers['Origin']
# register the database commands
from insgraph import db
db.init_app(app)
# apply the blueprints to the app
from insgraph import auth, user,case
app.register_blueprint(auth.bp)
app.register_blueprint(user.bp)
app.register_blueprint(case.bp)
app.register_blueprint(instagram.bp)
# make url_for('index') == url_for('blog.index')
# in another app, you might define a separate main index here with
# app.route, while giving the blog blueprint a url_prefix, but for
# the tutorial the blog will be the main index
app.add_url_rule('/', endpoint='index')
return app
|
jiebinzhuang/insgraph-flask
|
insgraph/__init__.py
|
__init__.py
|
py
| 2,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.Response",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "insgraph.db.init_app",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "insgraph.db",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "insgraph.auth.bp",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "insgraph.auth",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "insgraph.user.bp",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "insgraph.user",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "insgraph.case.bp",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "insgraph.case",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "insgraph.instagram.bp",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "insgraph.instagram",
"line_number": 61,
"usage_type": "name"
}
] |
30409488540
|
import os
import pytest
import logging
import cocotb
from cocotb.clock import Clock, Timer
from cocotb.binary import BinaryValue
from cocotb.runner import get_runner
from cocotb.triggers import FallingEdge
from cocotbext.uart import UartSource, UartSink
src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tests_dir = os.path.dirname(os.path.abspath(__file__))
sim_build = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sim_build", "soc")
@cocotb.test()
async def check_uart_recv(dut):
""" Test that UART is working """
clock = Clock(dut.clk, 10, units="ns") # Create a 10us period clock on port clk
cocotb.start_soon(clock.start()) # Start the clock
log = logging.getLogger(f"check_uart_recv")
dut.RESET.value = BinaryValue('1')
await FallingEdge(dut.clk)
dut.RESET.value = BinaryValue('0')
await FallingEdge(dut.clk)
rxd = UartSource(dut.RXD, baud=115200, bits=8)
txd = UartSink(dut.TXD, baud=115200, bits=8)
await rxd.write(b'ABCDE')
for i in range(int(1e9/115200/10) * 10):
await FallingEdge(dut.clk)
val = await txd.read()
assert val == b'E'
"""
LI(gp, 32'h0200_0000);
ADD(x12,x0,x0);
ADDI(x2,x0,65);
Label(L0_);
LW(x12, gp, 8);
BNE(x12, x2, LabelRef(L0_));
SW(x12, gp, 8);
EBREAK();
"""
@pytest.mark.skip(reason="no way of currently testing this")
def test_runner():
verilog_sources = [os.path.join(src_dir, "main", "soc.sv")]
sim = os.getenv("SIM", "icarus")
runner = get_runner(sim)()
os.makedirs(os.path.abspath(sim_build), exist_ok=True)
with open(os.path.abspath(os.path.join(sim_build, "cmd.f")), 'w') as cmd:
cmd.write('+timescale+1ns/1ps')
runner.build(
verilog_sources=verilog_sources,
toplevel="soc",
defines=["DEFINE=4", "BENCH=1"],
includes=[os.path.join(src_dir, "main")],
extra_args=[
'-s', 'soc',
'-f', os.path.abspath(os.path.join(sim_build, "cmd.f"))
],
build_dir=sim_build
)
runner.test(
python_search=[tests_dir],
toplevel="soc",
py_module="test_soc",
)
|
ryarnyah/zenika-fpga-pres
|
demo/fpga-risc-cpu/src/test/test_soc.py
|
test_soc.py
|
py
| 2,170 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cocotb.clock.Clock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cocotb.start_soon",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cocotb.binary.BinaryValue",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cocotb.triggers.FallingEdge",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cocotb.binary.BinaryValue",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cocotb.triggers.FallingEdge",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cocotbext.uart.UartSource",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cocotbext.uart.UartSink",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cocotb.triggers.FallingEdge",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cocotb.test",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cocotb.runner.get_runner",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pytest.mark.skip",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 52,
"usage_type": "attribute"
}
] |
32644614877
|
"""
Given a universal mesh, record the placements of guide nodes as it relative to
universal mesh. And then repoisition guides to that relative position should
the universal mesh change from character to character.
from mgear.shifter import relativeGuidePlacement
reload(relativeGuidePlacement)
Execute the following chunk to record initial placement ----------------------
relativeGuidePlacement.exportGuidePlacement(filepath="Y:/tmp/exampleFile.json",
skip_strings=["hair"])
Load new universal guide mesh with new proportions
Execute the following lines to move the guides to their new position ---------
relativeGuidePlacement.importGuidePlacement(filepath="Y:/tmp/exampleFile.json")
Attributes:
GUIDE_ROOT (str): name of the root guide node
SKIP_CONTAINS (list): nodes to skip if they contain the string
SKIP_CRAWL_NODES (list): nodes to skip crawling hierarchy
SKIP_NODETYPES (list): skip the query of certain node types
SKIP_PLACEMENT_NODES (TYPE): nodes to skip updating their positions
SKIP_SUFFIX (list): skip if node ends with
UNIVERSAL_MESH_NAME (str): default name of the universal mesh
"""
# python
import json
import math
# dcc
import maya.cmds as mc
import pymel.core as pm
import maya.OpenMaya as om
# mgear
from mgear.core import utils
from mgear.core import vector
from mgear.core import transform
from mgear.core import meshNavigation
# constants -------------------------------------------------------------------
# Designate the root of the hierarchy to crawl
GUIDE_ROOT = "guide"
# Nodes to avoid checking the hierarchy
DEFAULT_SKIP_CRAWL_NODES = ("controllers_org",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
# nodes that will not have their positions updated
DEFAULT_SKIP_PLACEMENT_NODES = ("controllers_org",
"global_C0_root",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
try:
SKIP_CRAWL_NODES
SKIP_PLACEMENT_NODES
except NameError:
SKIP_CRAWL_NODES = list(DEFAULT_SKIP_CRAWL_NODES)
SKIP_PLACEMENT_NODES = list(DEFAULT_SKIP_PLACEMENT_NODES)
# skip the node if it even contains the characters in the list
# eg SKIP_CONTAINS = ["hair"]
SKIP_CONTAINS = []
# Avoid nodes of a specified suffix
SKIP_SUFFIX = ["sizeRef", "crv", "crvRef", "blade"]
# Types of nodes to avoid
SKIP_NODETYPES = ["aimConstraint", "pointConstraint", "parentConstraint"]
UNIVERSAL_MESH_NAME = "skin_geo_setup"
# general functions -----------------------------------------------------------
def crawlHierarchy(parentNode,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=None):
"""recursive function to crawl a hierarchy of nodes to return decendents
Args:
parentNode (str): node to query
ordered_hierarchy (str): list to continuesly pass itself
skip_crawl_nodes (list): nodes to skip crawl
"""
if not skip_strings:
skip_strings = []
for node in mc.listRelatives(parentNode, type="transform") or []:
if node in skip_crawl_nodes or node in ordered_hierarchy:
continue
if node.endswith(tuple(SKIP_SUFFIX)):
continue
if mc.objectType(node) in SKIP_NODETYPES:
continue
if [True for skip_str in skip_strings
if skip_str.lower() in node.lower()]:
continue
ordered_hierarchy.append(node)
crawlHierarchy(node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
def getPostionFromLoop(vertList):
"""Get the center position from the list of edge ids provided
Args:
vertList (list): list of edge ids
Returns:
list: of translate XYZ, world space
"""
bb = mc.exactWorldBoundingBox(vertList)
pos = ((bb[0] + bb[3]) / 2, (bb[1] + bb[4]) / 2, (bb[2] + bb[5]) / 2)
return pos
def getVertMatrix(closestVert):
"""create a matrix from the closestVert and the normals of the surrounding
faces for later comparison
Args:
node (str): guide node to query
closestVert (str): closest vert to guide
Returns:
list: of matrices
"""
closestVert = pm.PyNode(closestVert)
faces = closestVert.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
orig_ref_matrix = pm.dt.TransformationMatrix()
orig_ref_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
orig_ref_matrix.setRotation(normal_rot)
return orig_ref_matrix
def getOrient(normal, tangent, ro=0):
"""convert normal direction into euler rotations
Args:
normal (list): of nomel values
ro (int, optional): rotate order
Returns:
list: of euler rotations
"""
kRotateOrders = [om.MEulerRotation.kXYZ, om.MEulerRotation.kYZX,
om.MEulerRotation.kZXY, om.MEulerRotation.kXZY,
om.MEulerRotation.kYXZ, om.MEulerRotation.kZYX, ]
cross = [normal[1] * tangent[2] - normal[2] * tangent[1],
normal[2] * tangent[0] - normal[0] * tangent[2],
normal[0] * tangent[1] - normal[1] * tangent[0]]
tMatrix = normal + [0] + tangent + [0] + cross + [0, 0, 0, 0, 1]
mMatrix = om.MMatrix()
om.MScriptUtil.createMatrixFromList(tMatrix, mMatrix)
tmMatrix = om.MTransformationMatrix(mMatrix)
rotate = tmMatrix.eulerRotation().reorder(kRotateOrders[ro])
RAD_to_DEG = (180 / math.pi)
return [rotate[0] * RAD_to_DEG,
rotate[1] * RAD_to_DEG,
rotate[2] * RAD_to_DEG]
def getRepositionMatrix(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
current_vert = pm.PyNode(closestVerts[0])
mr_current_vert = pm.PyNode(closestVerts[1])
current_length = vector.getDistance(current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
orig_length = vector.getDistance(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center = vector.linearlyInterpolate(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center_matrix = pm.dt.Matrix()
# orig_center_matrix.setTranslation(orig_center, pm.dt.Space.kWorld)
orig_center_matrix = transform.setMatrixPosition(
orig_center_matrix, orig_center)
current_center = vector.linearlyInterpolate(
current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
length_percentage = 1
if current_length != 0 or orig_length != 0:
length_percentage = current_length / orig_length
# refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix = pm.dt.Matrix()
# refPosition_matrix.setTranslation(current_center, pm.dt.Space.kWorld)
refPosition_matrix = transform.setMatrixPosition(
refPosition_matrix, current_center)
deltaMatrix = node_matrix * orig_center_matrix.inverse()
deltaMatrix = deltaMatrix * length_percentage
deltaMatrix = transform.setMatrixScale(deltaMatrix)
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
def getRepositionMatrixSingleRef(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
closestVerts = pm.PyNode(closestVerts[0])
faces = closestVerts.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
refPosition_matrix.setRotation(normal_rot)
deltaMatrix = node_matrix * orig_ref_matrix.inverse()
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionaryLegacy(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
mc.select(cl=True)
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def yieldGuideRelativeDictionary(mesh, guideOrder, relativeGuide_dict):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
yield relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionary(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for result in yieldGuideRelativeDictionary(
mesh, guideOrder, relativeGuide_dict):
pass
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def updateGuidePlacementLegacy(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
guideNode = pm.PyNode(guide)
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
@utils.viewport_off
@utils.one_undo
def yieldUpdateGuidePlacement(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
yield repoMatrix
@utils.viewport_off
@utils.one_undo
def updateGuidePlacement(guideOrder, guideDictionary, reset_scale=False):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
updateGen = yieldUpdateGuidePlacement(guideOrder, guideDictionary)
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
guideNode = pm.PyNode(guide)
scl = guideNode.getScale()
repoMatrix = next(updateGen)
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
if reset_scale:
guideNode.setScale([1, 1, 1])
else:
guideNode.setScale(scl)
yield True
# ==============================================================================
# Data export, still testing
# ==============================================================================
def _importData(filepath):
try:
with open(filepath, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print(e)
def _exportData(data, filepath):
try:
with open(filepath, 'w') as f:
json.dump(data, f, sort_keys=False, indent=4)
except Exception as e:
print(e)
def exportGuidePlacement(filepath=None,
reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=[]):
"""Export the position of the supplied root node to a file.
Args:
filepath (str, optional): path to export too
reference_mesh (str, optional): mesh to query verts
root_node (str, optional): name of node to query against
skip_crawl_nodes (list, optional): of nodes not to crawl
skip_strings (list, optional): strings to check to skip node
Returns:
list: dict, list, str
"""
if filepath is None:
filepath = pm.fileDialog2(fileMode=0,
startingDirectory="/",
fileFilter="Export position(*.json)")
if filepath:
filepath = filepath[0]
(relativeGuide_dict,
ordered_hierarchy) = recordInitialGuidePlacement(
reference_mesh=reference_mesh,
root_node=root_node,
skip_crawl_nodes=skip_crawl_nodes,
skip_strings=skip_strings)
data = {}
data["relativeGuide_dict"] = relativeGuide_dict
data["ordered_hierarchy"] = ordered_hierarchy
_exportData(data, filepath)
print("Guide position exported: {}".format(filepath))
return relativeGuide_dict, ordered_hierarchy, filepath
@utils.one_undo
def importGuidePlacement(filepath):
"""import the position from the provided file
Args:
filepath (str): file to the json
referenceMesh (str, optional): name of mesh to compare against
"""
data = _importData(filepath)
updateGuidePlacement(data["ordered_hierarchy"], data["relativeGuide_dict"])
return data["relativeGuide_dict"], data["ordered_hierarchy"]
def recordInitialGuidePlacement(reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=None):
"""convenience function for retrieving a dict of position
Args:
reference_mesh (str, optional): the mesh to query against
root_node (str, optional): root node to crawl
skip_crawl_nodes (list, optional): of nodes to avoid
skip_strings (list, optional): of strings to check if skip
Returns:
dict, list: dict of positions, list of ordered nodes
"""
ordered_hierarchy = []
relativeGuide_dict = {}
crawlHierarchy(root_node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
relativeGuide_dict = getGuideRelativeDictionary(reference_mesh,
ordered_hierarchy)
return relativeGuide_dict, ordered_hierarchy
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter/relative_guide_placement.py
|
relative_guide_placement.py
|
py
| 19,592 |
python
|
en
|
code
| 209 |
github-code
|
6
|
[
{
"api_name": "maya.cmds.listRelatives",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "maya.cmds.objectType",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "maya.cmds.exactWorldBoundingBox",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "pymel.core.select",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "maya.cmds.ls",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "pymel.core.select",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Vector",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.TransformationMatrix",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MEulerRotation",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MEulerRotation",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MEulerRotation",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MMatrix",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MScriptUtil.createMatrixFromList",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya.MScriptUtil",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MTransformationMatrix",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "math.pi",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "mgear.core.vector.getDistance",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "mgear.core.vector",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "mgear.core.vector.getDistance",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "mgear.core.vector",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "mgear.core.vector.linearlyInterpolate",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "mgear.core.vector",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "mgear.core.transform.setMatrixPosition",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "mgear.core.transform",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "mgear.core.vector.linearlyInterpolate",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "mgear.core.vector",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "mgear.core.transform.setMatrixPosition",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "mgear.core.transform",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "mgear.core.transform.setMatrixScale",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "mgear.core.transform",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "pymel.core.select",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "maya.cmds.ls",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "pymel.core.select",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Vector",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.TransformationMatrix",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "mgear.core.meshNavigation.getClosestVertexFromTransform",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "mgear.core.meshNavigation",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "mgear.core.meshNavigation.getClosestVertexFromTransform",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "mgear.core.meshNavigation",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "maya.cmds.select",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "mgear.core.meshNavigation.getClosestVertexFromTransform",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "mgear.core.meshNavigation",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "mgear.core.meshNavigation.getClosestVertexFromTransform",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "mgear.core.meshNavigation",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "maya.cmds.objExists",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 395,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "maya.cmds.objExists",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 419,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 420,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "pymel.core.dt.Matrix",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "pymel.core.dt",
"line_number": 421,
"usage_type": "attribute"
},
{
"api_name": "pymel.core",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 401,
"usage_type": "name"
},
{
"api_name": "maya.cmds.objExists",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "pymel.core.PyNode",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.viewport_off",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "pymel.core.fileDialog2",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "mgear.core.utils.one_undo",
"line_number": 509,
"usage_type": "attribute"
},
{
"api_name": "mgear.core.utils",
"line_number": 509,
"usage_type": "name"
}
] |
36347921741
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pyredatam
Tests for `pyredatam` module.
"""
from __future__ import unicode_literals
import unittest
import nose
import pyredatam
import queries
class RedatamTestCase(unittest.TestCase):
def test_arealist_query(self):
# Test case AREALIST1
area_level = "FRAC"
variables = "PERSONA.CONDACT"
area_filter = {"PROV": ["02", "03"]}
universe_filter = "1 = 1"
title = "El titulo"
query = pyredatam.arealist_query(area_level, variables, area_filter,
universe_filter, title)
self.assertEqual(query, queries.AREALIST1.strip())
# Test case AREALIST2
variables = ["PERSONA.CONDACT"]
query = pyredatam.arealist_query(area_level, variables)
self.assertEqual(query, queries.AREALIST2.strip())
# Test case AREALIST3
area_filter = {"PROV": "02"}
query = pyredatam.arealist_query(area_level, variables, area_filter)
self.assertEqual(query, queries.AREALIST3.strip())
def test_counter_query(self):
# Test case COUNTER1
area_level = "RADIO"
entity_count = "PERSONA"
area_filter = {"PROV": "02"}
universe_filter = "1 = 1"
title = "El titulo"
query = pyredatam.counter_query(area_level, entity_count, area_filter,
universe_filter, title)
self.assertEqual(query, queries.COUNTER1.strip())
# Test case COUNTER2
area_level = "DPTO"
entity_count = "FRAC"
incl_area_name = True
incl_total = True
query = pyredatam.counter_query(area_level, entity_count, area_filter,
universe_filter, title, incl_area_name,
incl_total)
self.assertEqual(query, queries.COUNTER2.strip())
def test_median_query(self):
# Test case MEDIAN1
variable = "PERSONA.P03"
by_var1 = "PERSONA.CONDACT"
by_var2 = "PERSONA.P02"
incl_name = True
area_break = "PROV"
area_filter = None
universe_filter = "1 = 1"
title = "El titulo"
query = pyredatam.median_query(variable, by_var1, by_var2, incl_name,
area_break, area_filter,
universe_filter, title)
self.assertEqual(query, queries.MEDIAN1.strip())
# Test case MEDIAN2
variable = "PERSONA.P03"
incl_name = None
area_break = None
universe_filter = None
title = None
query = pyredatam.median_query(variable, by_var1, by_var2, incl_name,
area_break, area_filter,
universe_filter, title)
self.assertEqual(query, queries.MEDIAN2.strip())
# Test case MEDIAN3
variable = "PERSONA.P03"
by_var1 = None
by_var2 = None
query = pyredatam.median_query(variable, by_var1, by_var2, incl_name,
area_break, area_filter,
universe_filter, title)
self.assertEqual(query, queries.MEDIAN3.strip())
if __name__ == '__main__':
nose.run(defaultTest=__name__)
|
abenassi/pyredatam
|
tests/test_pyredatam.py
|
test_pyredatam.py
|
py
| 3,362 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.arealist_query",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST1.strip",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST1",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.arealist_query",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST2.strip",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST2",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.arealist_query",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST3.strip",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "queries.AREALIST3",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.counter_query",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "queries.COUNTER1.strip",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "queries.COUNTER1",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.counter_query",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "queries.COUNTER2.strip",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "queries.COUNTER2",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.median_query",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN1.strip",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN1",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.median_query",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN2.strip",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN2",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pyredatam.median_query",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN3.strip",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "queries.MEDIAN3",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "nose.run",
"line_number": 109,
"usage_type": "call"
}
] |
22329941730
|
from discord.ext import commands, tasks
import discord
import asyncio
import os
import json
import sqlite3
from dotenv import load_dotenv
import requests
from datetime import datetime,time
load_dotenv()
class Birthday(commands.Cog):
"""Birthday commands."""
def __init__(self, client):
self.client = client
self.birthday_announcments.start()
@commands.command(hidden = True)
@commands.is_owner()
async def force_add_user(self, ctx, user: discord.Member, day: int, month: int):
"""Adds a user to the birthday list."""
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.send("Invalid date.")
return
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
cur.execute("SELECT * FROM birthday WHERE user_id = ?", (user.id,))
if cur.fetchone() is not None:
await ctx.send("User already exists.")
return
cur.execute("INSERT INTO birthday VALUES (?, ?, ?)", (user.id, day, month))
con.commit()
con.close()
await ctx.send("Added user to birthday list.")
@commands.command(hidden=True)
@commands.is_owner()
async def makeservertablebirthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("CREATE TABLE server(ServerID int, Servertoggle, birthdaychannel int,birthdaymessage text)")
con.commit()
con.close()
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
cur.execute("CREATE TABLE birthday(UsersID int, birthday)")
con.commit()
con.close()
await ctx.send("Done")
#
#@commands.command(hidden = True)
#@commands.is_owner()
#async def setallbithday(self,ctx):
# for i in self.client.guilds:
# con = sqlite3.connect("databases/server_brithdays.db")
# cur = con.cursor()
# cur.execute("INSERT INTO server(ServerID, Servertoggle,birthdaychannel) VALUES(?, ?,?)", (i.id, False,None))
# await ctx.send(f"{i} has been set")
# con.commit()
# con.close()
@commands.Cog.listener()
async def on_guild_join(self, guild):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("INSERT INTO server(ServerID, Servertoggle) VALUES(?, ?)", (guild.id, False))
con.commit()
con.close()
@commands.command(help = " enable and disable Birthday")
@commands.has_permissions(administrator=True)
async def toggle_birthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
datas = cur.fetchall()
toggle = datas[0][1]
if toggle == True:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (False, ctx.guild.id,))
con.commit()
con.close()
await ctx.send("Birthday reminders has been turned off")
if toggle == False:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (True, ctx.guild.id,))
con.commit()
con.close()
await ctx.send("Birthday reminders has been turrned on")
@commands.slash_command(name="toggle_birthday", description="enable and disable Birthday")
@commands.has_permissions(administrator=True)
async def _toggle_birthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
datas = cur.fetchall()
toggle = datas[0][1]
if toggle == True:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (False, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Birthday reminders has been turned off")
if toggle == False:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (True, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Birthday reminders has been turrned on")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
@commands.slash_command(name="setbirthday", description="Set your birthday use day then month")
async def setbirthday__slash(self, ctx, day: int, month: int):
tocken = os.getenv("TOPGG_TOKEN")
api = requests.get(f"https://top.gg/api/bots/902240397273743361/check?userId={ctx.author.id}", headers={"Authorization": tocken, "Content-Type": "application/json"})
data = api.json()
print(api)
print(data)
voted = data["voted"]
#if the api does not return a 200 status code
if api.status_code != 200:
voted = 1
print("api error")
if voted == 0:
await ctx.respond("You need to have voted for simplex in the last 24 hours to set your birthday. Please vote and then try again, you can vote here: https://top.gg/bot/902240397273743361/vote",ephemeral=True)
return
else:
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.respond("Invalid date.")
else:
#force 2 digit date
if day < 10:
day = f"0{day}"
if month < 10:
month = f"0{month}"
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (ctx.author.id,))
data = cur.fetchall()
if data == []:
cur.execute("INSERT INTO birthday(UsersID, birthday) VALUES(?, ?)", (ctx.author.id, f"{day}/{month}"))
con.commit()
con.close()
await ctx.respond("Your birthday has been set")
else:
cur.execute("UPDATE birthday SET birthday = ? WHERE UsersID=?", (f"{day}/{month}", ctx.author.id,))
con.commit()
con.close()
await ctx.respond("Your birthday has been updated")
@commands.command(name="setbirthday", help = "Set your birthday use day then month")
async def setbirthday_commands(self, ctx, day: int, month: int):
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.send("Invalid date.")
else:
#formate date 2 digit
if len(str(day)) == 1:
day = f"0{day}"
if len(str(month)) == 1:
month = f"0{month}"
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (ctx.author.id,))
data = cur.fetchall()
if data == []:
cur.execute("INSERT INTO birthday(UsersID, birthday) VALUES(?, ?)", (ctx.author.id, f"{day}/{month}"))
con.commit()
con.close()
await ctx.send("Your birthday has been set")
else:
cur.execute("UPDATE birthday SET birthday = ? WHERE UsersID=?", (f"{day}/{month}", ctx.author.id,))
con.commit()
con.close()
await ctx.send("Your birthday has been updated")
@commands.command(name="set_birthday_channel",help = "Set the birthday channel")
@commands.has_permissions(administrator=True)
async def set_birthday_channel_command(self,ctx, channel: commands.TextChannelConverter):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("UPDATE server SET birthdaychannel = ? WHERE ServerID=?", (channel.id, ctx.guild.id,))
con.commit()
con.close()
await ctx.send(f"Birthday channel has been set to {channel} \n To enable birthday reminders use the command `/toggle_birthday` \n To set a custom message use the command `/birthday_message`")
@commands.slash_command(name="set_birthday_channel",help = "Set the birthday channel")
@commands.has_permissions(administrator=True)
async def set_birthday_channel__slash(self,ctx, channel: commands.TextChannelConverter):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("UPDATE server SET birthdaychannel = ? WHERE ServerID=?", (channel.id, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond(f"Birthday channel has been set to {channel}")
@commands.slash_command(name="findbirthday", description="Find a users birthday")
async def findbirthday__slash(self, ctx, user: discord.Member):
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (user.id,))
data = cur.fetchall()
if data == []:
await ctx.respond(f"{user} has not set their birthday")
else:
await ctx.respond(f"{user} birthday is {data[0][1]}")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
@tasks.loop(time=time(7,00))
async def birthday_announcments(self):
print("Birthday announcments")
for server in self.client.guilds:
print(server)
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (server.id,))
datas = cur.fetchall()
if datas == []:
cur.execute("INSERT INTO server(ServerID, Servertoggle, birthdaychannel) VALUES(?, ?, ?)", (server.id, False, None))
con.commit()
con.close()
else:
pass
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday")
data = cur.fetchall()
if data == []:
print("No birthday")
#does not work below here
else:
for x in data:
if datas[0][1] == True:
if datas[0][2] == None:
pass
else:
user = await self.client.fetch_user(x[0])
if user in server.members:
channel = await self.client.fetch_channel(datas[0][2])
message = datas[0][3]
if message == None:
message = ":tada:"
print(channel)
print(x[1])
print(datetime.now().strftime("%d/%m"))
if x[1] == datetime.now().strftime("%d/%m"):
print("Birthday")
print(x[0])
await channel.send(f"Happy birthday <@{x[0]}>! \n {message}")
else:
username = await self.client.fetch_user(x[0])
print(f"User {username} not in server {x[0]} {server}")
else:
pass
#@commands.command()
#@commands.is_owner()
#async def foramt_all_birthdays(self,ctx):
# con = sqlite3.connect("databases/user_brithdays.db")
# cur = con.cursor()
# data = cur.execute("SELECT * FROM birthday")
# data = cur.fetchall()
# for i in data:
# day = i[1].split("/")[0]
# month = i[1].split("/")[1]
# if len(day) == 1:
# day = "0" + day
# if len(month) == 1:
# month = "0" + month
# cur.execute("UPDATE birthday SET Birthday = ? WHERE UsersID=?", (f"{day}/{month}", i[0],))
# con.commit()
# con.close()
#
@commands.command()
@commands.is_owner()
async def add_message_to_birthday(self,ctx,*,message):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
#creat a new column
cur.execute("ALTER TABLE server ADD COLUMN birthdaymessage TEXT")
#set the message
cur.execute("UPDATE server SET birthdaymessage = ?", (message,))
con.commit()
con.close()
await ctx.send("Done")
@commands.slash_command(name="birthday_message", description="Add a message to the birthday announcment")
@commands.has_permissions(administrator=True)
async def add_message_to_birthday__slash(self,ctx,*,message):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
data = cur.fetchall()
if data == []:
await ctx.respond("You have not set a birthday channel")
else:
cur.execute("UPDATE server SET birthdaymessage = ? WHERE ServerID=?", (message, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Done")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
def setup(bot):
bot.add_cog(Birthday(bot))
|
micfun123/Simplex_bot
|
cogs/birthday.py
|
birthday.py
|
py
| 14,104 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.slash_command",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.slash_command",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.TextChannelConverter",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.TextChannelConverter",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.slash_command",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.slash_command",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "discord.ext.tasks.loop",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.is_owner",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.slash_command",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 298,
"usage_type": "name"
}
] |
29279761170
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Ambre chamber
"""
__author__ = "Dennis van Gils"
__authoremail__ = "[email protected]"
__url__ = "https://github.com/Dennis-van-Gils/project-Ambre-chamber"
__date__ = "31-08-2020"
__version__ = "2.0"
# pylint: disable=bare-except, broad-except, try-except-raise
import os
import sys
import time
import numpy as np
import psutil
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as QtWid
from PyQt5.QtCore import QDateTime
import pyqtgraph as pg
from dvg_debug_functions import tprint, dprint, print_fancy_traceback as pft
from dvg_pyqt_controls import (
create_LED_indicator,
create_Toggle_button,
SS_TEXTBOX_READ_ONLY,
SS_GROUP,
)
from dvg_pyqt_filelogger import FileLogger
from dvg_pyqtgraph_threadsafe import (
HistoryChartCurve,
LegendSelect,
PlotManager,
)
from dvg_devices.Arduino_protocol_serial import Arduino
from dvg_qdeviceio import QDeviceIO
TRY_USING_OPENGL = True
if TRY_USING_OPENGL:
try:
import OpenGL.GL as gl # pylint: disable=unused-import
except:
print("OpenGL acceleration: Disabled")
print("To install: `conda install pyopengl` or `pip install pyopengl`")
else:
print("OpenGL acceleration: Enabled")
pg.setConfigOptions(useOpenGL=True)
pg.setConfigOptions(antialias=True)
pg.setConfigOptions(enableExperimental=True)
# Global pyqtgraph configuration
# pg.setConfigOptions(leftButtonPan=False)
pg.setConfigOption("foreground", "#EEE")
# Constants
# fmt: off
DAQ_INTERVAL_MS = 1000 # [ms]
CHART_INTERVAL_MS = 500 # [ms]
CHART_HISTORY_TIME = 3600 # [s]
# fmt: on
# Show debug info in terminal? Warning: Slow! Do not leave on unintentionally.
DEBUG = False
def get_current_date_time():
cur_date_time = QDateTime.currentDateTime()
return (
cur_date_time.toString("dd-MM-yyyy"), # Date
cur_date_time.toString("HH:mm:ss"), # Time
cur_date_time.toString("yyMMdd_HHmmss"), # Reverse notation date-time
)
# ------------------------------------------------------------------------------
# Arduino state
# ------------------------------------------------------------------------------
class State(object):
"""Reflects the actual readings, parsed into separate variables, of the
Arduino. There should only be one instance of the State class.
"""
def __init__(self):
self.time = np.nan # [s]
self.ds18b20_temp = np.nan # ['C]
self.dht22_temp = np.nan # ['C]
self.dht22_humi = np.nan # [%]
self.is_valve_open = False
# Automatic valve control
self.humi_threshold = np.nan # [%]
self.open_valve_when_super_humi = np.nan
state = State()
# ------------------------------------------------------------------------------
# MainWindow
# ------------------------------------------------------------------------------
class MainWindow(QtWid.QWidget):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setWindowTitle("Ambre chamber")
self.setGeometry(350, 50, 960, 800)
self.setStyleSheet(SS_TEXTBOX_READ_ONLY + SS_GROUP)
# -------------------------
# Top frame
# -------------------------
# Left box
self.qlbl_update_counter = QtWid.QLabel("0")
self.qlbl_DAQ_rate = QtWid.QLabel("DAQ: nan Hz")
self.qlbl_DAQ_rate.setStyleSheet("QLabel {min-width: 7em}")
vbox_left = QtWid.QVBoxLayout()
vbox_left.addWidget(self.qlbl_update_counter, stretch=0)
vbox_left.addStretch(1)
vbox_left.addWidget(self.qlbl_DAQ_rate, stretch=0)
# Middle box
self.qlbl_title = QtWid.QLabel(
"Ambre chamber",
font=QtGui.QFont("Palatino", 14, weight=QtGui.QFont.Bold),
)
self.qlbl_title.setAlignment(QtCore.Qt.AlignCenter)
self.qlbl_cur_date_time = QtWid.QLabel("00-00-0000 00:00:00")
self.qlbl_cur_date_time.setAlignment(QtCore.Qt.AlignCenter)
self.qpbt_record = create_Toggle_button(
"Click to start recording to file", minimumWidth=300
)
# fmt: off
self.qpbt_record.clicked.connect(lambda state: log.record(state)) # pylint: disable=unnecessary-lambda
# fmt: on
vbox_middle = QtWid.QVBoxLayout()
vbox_middle.addWidget(self.qlbl_title)
vbox_middle.addWidget(self.qlbl_cur_date_time)
vbox_middle.addWidget(self.qpbt_record)
# Right box
self.qpbt_exit = QtWid.QPushButton("Exit")
self.qpbt_exit.clicked.connect(self.close)
self.qpbt_exit.setMinimumHeight(30)
self.qlbl_recording_time = QtWid.QLabel(alignment=QtCore.Qt.AlignRight)
vbox_right = QtWid.QVBoxLayout()
vbox_right.addWidget(self.qpbt_exit, stretch=0)
vbox_right.addStretch(1)
vbox_right.addWidget(self.qlbl_recording_time, stretch=0)
# Round up top frame
hbox_top = QtWid.QHBoxLayout()
hbox_top.addLayout(vbox_left, stretch=0)
hbox_top.addStretch(1)
hbox_top.addLayout(vbox_middle, stretch=0)
hbox_top.addStretch(1)
hbox_top.addLayout(vbox_right, stretch=0)
# -------------------------
# Bottom frame
# -------------------------
# Charts
# -------------------------
self.gw = pg.GraphicsLayoutWidget()
# Plot: Temperature: DS18B20
p = {"color": "#EEE", "font-size": "10pt"}
self.pi_ds18b20_temp = self.gw.addPlot(row=0, col=0)
self.pi_ds18b20_temp.setLabel("left", text="temperature (°C)", **p)
# Plot: Temperature: DHT 22
self.pi_dht22_temp = self.gw.addPlot(row=1, col=0)
self.pi_dht22_temp.setLabel("left", text="temperature (°C)", **p)
# Plot: Humidity: DHT22
self.pi_dht22_humi = self.gw.addPlot(row=2, col=0)
self.pi_dht22_humi.setLabel("left", text="humidity (%)", **p)
self.plots = [
self.pi_ds18b20_temp,
self.pi_dht22_humi,
self.pi_dht22_temp,
]
for plot in self.plots:
plot.setClipToView(True)
plot.showGrid(x=1, y=1)
plot.setLabel("bottom", text="history (s)", **p)
plot.setMenuEnabled(True)
plot.enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)
plot.enableAutoRange(axis=pg.ViewBox.YAxis, enable=True)
plot.setAutoVisible(y=True)
plot.setRange(xRange=[-CHART_HISTORY_TIME, 0])
# Curves
capacity = round(CHART_HISTORY_TIME * 1e3 / DAQ_INTERVAL_MS)
PEN_01 = pg.mkPen(color=[255, 255, 0], width=3)
PEN_02 = pg.mkPen(color=[0, 255, 255], width=3)
self.tscurve_ds18b20_temp = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_ds18b20_temp.plot(
pen=PEN_01, name="DS18B20 temp."
),
)
self.tscurve_dht22_temp = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_dht22_temp.plot(
pen=PEN_01, name="DHT22 temp."
),
)
self.tscurve_dht22_humi = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_dht22_humi.plot(
pen=PEN_02, name="DHT22 humi."
),
)
self.tscurves = [
self.tscurve_ds18b20_temp,
self.tscurve_dht22_temp,
self.tscurve_dht22_humi,
]
# Group `Readings`
# -------------------------
legend = LegendSelect(
linked_curves=self.tscurves, hide_toggle_button=True
)
p = {
"readOnly": True,
"alignment": QtCore.Qt.AlignRight,
"maximumWidth": 54,
}
self.qlin_ds18b20_temp = QtWid.QLineEdit(**p)
self.qlin_dht22_temp = QtWid.QLineEdit(**p)
self.qlin_dht22_humi = QtWid.QLineEdit(**p)
# fmt: off
legend.grid.setHorizontalSpacing(6)
legend.grid.addWidget(self.qlin_ds18b20_temp , 0, 2)
legend.grid.addWidget(QtWid.QLabel("± 0.5 °C"), 0, 3)
legend.grid.addWidget(self.qlin_dht22_temp , 1, 2)
legend.grid.addWidget(QtWid.QLabel("± 0.5 °C"), 1, 3)
legend.grid.addWidget(self.qlin_dht22_humi , 2, 2)
legend.grid.addWidget(QtWid.QLabel("± 3 %") , 2, 3)
# fmt: on
qgrp_readings = QtWid.QGroupBox("Readings")
qgrp_readings.setLayout(legend.grid)
# Group 'Log comments'
# -------------------------
self.qtxt_comments = QtWid.QTextEdit()
grid = QtWid.QGridLayout()
grid.addWidget(self.qtxt_comments, 0, 0)
qgrp_comments = QtWid.QGroupBox("Log comments")
qgrp_comments.setLayout(grid)
# Group 'Charts'
# -------------------------
self.plot_manager = PlotManager(parent=self)
self.plot_manager.add_autorange_buttons(linked_plots=self.plots)
self.plot_manager.add_preset_buttons(
linked_plots=self.plots,
linked_curves=self.tscurves,
presets=[
{
"button_label": "00:30",
"x_axis_label": "history (sec)",
"x_axis_divisor": 1,
"x_axis_range": (-30, 0),
},
{
"button_label": "01:00",
"x_axis_label": "history (sec)",
"x_axis_divisor": 1,
"x_axis_range": (-60, 0),
},
{
"button_label": "10:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-10, 0),
},
{
"button_label": "30:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-30, 0),
},
{
"button_label": "60:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-60, 0),
},
],
)
self.plot_manager.add_clear_button(linked_curves=self.tscurves)
self.plot_manager.perform_preset(1)
qgrp_chart = QtWid.QGroupBox("Charts")
qgrp_chart.setLayout(self.plot_manager.grid)
# Group 'Valve control'
# -------------------------
self.LED_is_valve_open = create_LED_indicator()
self.qlin_humi_threshold = QtWid.QLineEdit(
"%d" % state.humi_threshold,
alignment=QtCore.Qt.AlignRight,
maximumWidth=36,
)
self.qlin_humi_threshold.editingFinished.connect(
self.process_qlin_humi_threshold
)
self.qpbt_open_when_super_humi = QtWid.QPushButton(
(
"humidity > threshold"
if state.open_valve_when_super_humi
else "humidity < threshold"
),
checkable=True,
checked=state.open_valve_when_super_humi,
)
self.qpbt_open_when_super_humi.clicked.connect(
self.process_qpbt_open_when_super_humi
)
# fmt: off
grid = QtWid.QGridLayout()
grid.addWidget(QtWid.QLabel("Is valve open?") , 0, 0)
grid.addWidget(self.LED_is_valve_open , 0, 1)
grid.addWidget(QtWid.QLabel("Humidity threshold"), 1, 0)
grid.addWidget(self.qlin_humi_threshold , 1, 1)
grid.addWidget(QtWid.QLabel("%") , 1, 2)
grid.addWidget(QtWid.QLabel("Open valve when") , 2, 0)
grid.addWidget(self.qpbt_open_when_super_humi , 2, 1, 1, 2)
grid.setAlignment(QtCore.Qt.AlignTop)
# fmt: on
qgrp_valve = QtWid.QGroupBox("Valve control")
qgrp_valve.setLayout(grid)
# Round up right frame
vbox = QtWid.QVBoxLayout()
vbox.addWidget(qgrp_readings)
vbox.addWidget(qgrp_comments)
vbox.addWidget(qgrp_valve) # , alignment=QtCore.Qt.AlignLeft)
vbox.addWidget(qgrp_chart, alignment=QtCore.Qt.AlignLeft)
vbox.addStretch()
# Round up bottom frame
hbox_bot = QtWid.QHBoxLayout()
hbox_bot.addWidget(self.gw, 1)
hbox_bot.addLayout(vbox, 0)
# -------------------------
# Round up full window
# -------------------------
vbox = QtWid.QVBoxLayout(self)
vbox.addLayout(hbox_top, stretch=0)
vbox.addSpacerItem(QtWid.QSpacerItem(0, 10))
vbox.addLayout(hbox_bot, stretch=1)
# --------------------------------------------------------------------------
# Handle controls
# --------------------------------------------------------------------------
@QtCore.pyqtSlot()
def process_qlin_humi_threshold(self):
try:
humi_threshold = float(self.qlin_humi_threshold.text())
except (TypeError, ValueError):
humi_threshold = 50
except:
raise
state.humi_threshold = np.clip(humi_threshold, 0, 100)
self.qlin_humi_threshold.setText("%.0f" % state.humi_threshold)
qdev_ard.send(ard.write, "th%.0f" % state.humi_threshold)
@QtCore.pyqtSlot()
def process_qpbt_open_when_super_humi(self):
if self.qpbt_open_when_super_humi.isChecked():
state.open_valve_when_super_humi = True
self.qpbt_open_when_super_humi.setText("humidity > threshold")
qdev_ard.send(ard.write, "open when super humi")
else:
state.open_valve_when_super_humi = False
self.qpbt_open_when_super_humi.setText("humidity < threshold")
qdev_ard.send(ard.write, "open when sub humi")
@QtCore.pyqtSlot()
def update_GUI(self):
str_cur_date, str_cur_time, _ = get_current_date_time()
self.qlbl_cur_date_time.setText(
"%s %s" % (str_cur_date, str_cur_time)
)
self.qlbl_update_counter.setText("%i" % qdev_ard.update_counter_DAQ)
self.qlbl_DAQ_rate.setText(
"DAQ: %.1f Hz" % qdev_ard.obtained_DAQ_rate_Hz
)
if log.is_recording():
self.qlbl_recording_time.setText(log.pretty_elapsed())
self.qlin_ds18b20_temp.setText("%.1f" % state.ds18b20_temp)
self.qlin_dht22_temp.setText("%.1f" % state.dht22_temp)
self.qlin_dht22_humi.setText("%.1f" % state.dht22_humi)
self.qlbl_title.setText(
"Interior: %.1f °C, %.1f %%"
% (state.dht22_temp, state.dht22_humi)
)
if state.is_valve_open:
self.LED_is_valve_open.setText("1")
self.LED_is_valve_open.setChecked(True)
else:
self.LED_is_valve_open.setText("0")
self.LED_is_valve_open.setChecked(False)
@QtCore.pyqtSlot()
def update_chart(self):
if DEBUG:
tprint("update_chart")
for tscurve in self.tscurves:
tscurve.update()
# ------------------------------------------------------------------------------
# Program termination routines
# ------------------------------------------------------------------------------
def stop_running():
app.processEvents()
qdev_ard.quit()
log.close()
print("Stopping timers................ ", end="")
timer_GUI.stop()
timer_charts.stop()
print("done.")
@QtCore.pyqtSlot()
def notify_connection_lost():
stop_running()
window.qlbl_title.setText("! ! ! LOST CONNECTION ! ! !")
str_cur_date, str_cur_time, _ = get_current_date_time()
str_msg = "%s %s\nLost connection to Arduino." % (
str_cur_date,
str_cur_time,
)
print("\nCRITICAL ERROR @ %s" % str_msg)
reply_ = QtWid.QMessageBox.warning(
window, "CRITICAL ERROR", str_msg, QtWid.QMessageBox.Ok
)
if reply_ == QtWid.QMessageBox.Ok:
pass # Leave the GUI open for read-only inspection by the user
@QtCore.pyqtSlot()
def about_to_quit():
print("\nAbout to quit")
stop_running()
ard.close()
# ------------------------------------------------------------------------------
# Your Arduino update function
# ------------------------------------------------------------------------------
def DAQ_function():
# Date-time keeping
str_cur_date, str_cur_time, str_cur_datetime = get_current_date_time()
# Query the Arduino for its state
success_, tmp_state = ard.query_ascii_values("?", delimiter="\t")
if not (success_):
dprint(
"'%s' reports IOError @ %s %s"
% (ard.name, str_cur_date, str_cur_time)
)
return False
# Parse readings into separate state variables
try:
(
state.time,
state.ds18b20_temp,
state.dht22_temp,
state.dht22_humi,
state.is_valve_open,
) = tmp_state
state.time /= 1000 # Arduino time, [msec] to [s]
state.is_valve_open = bool(state.is_valve_open)
except Exception as err:
pft(err, 3)
dprint(
"'%s' reports IOError @ %s %s"
% (ard.name, str_cur_date, str_cur_time)
)
return False
# We will use PC time instead
state.time = time.perf_counter()
# Add readings to chart histories
window.tscurve_ds18b20_temp.appendData(state.time, state.ds18b20_temp)
window.tscurve_dht22_temp.appendData(state.time, state.dht22_temp)
window.tscurve_dht22_humi.appendData(state.time, state.dht22_humi)
# Logging to file
log.update(filepath=str_cur_datetime + ".txt", mode="w")
# Return success
return True
def write_header_to_log():
log.write("[HEADER]\n")
log.write(window.qtxt_comments.toPlainText())
log.write("\n\n[DATA]\n")
log.write("time\tDS18B20 temp.\tDHT22 temp.\tDHT22 humi.\tvalve\n")
log.write("[s]\t[±0.5 °C]\t[±0.5 °C]\t[±3 pct]\t[0/1]\n")
def write_data_to_log():
log.write(
"%.1f\t%.1f\t%.1f\t%.1f\t%i\n"
% (
log.elapsed(),
state.ds18b20_temp,
state.dht22_temp,
state.dht22_humi,
state.is_valve_open,
)
)
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set priority of this process to maximum in the operating system
print("PID: %s\n" % os.getpid())
try:
proc = psutil.Process(os.getpid())
if os.name == "nt":
proc.nice(psutil.REALTIME_PRIORITY_CLASS) # Windows
else:
proc.nice(-20) # Other
except:
print("Warning: Could not set process to maximum priority.\n")
# --------------------------------------------------------------------------
# Connect to Arduino
# --------------------------------------------------------------------------
ard = Arduino(name="Ard", connect_to_specific_ID="Ambre chamber")
ard.serial_settings["baudrate"] = 115200
ard.auto_connect()
if not (ard.is_alive):
print("\nCheck connection and try resetting the Arduino.")
print("Exiting...\n")
sys.exit(0)
# Get the initial state of the valve control
success, reply = ard.query("th?")
if success:
state.humi_threshold = float(reply)
success, reply = ard.query("open when super humi?")
if success:
state.open_valve_when_super_humi = bool(int(reply))
# --------------------------------------------------------------------------
# Create application and main window
# --------------------------------------------------------------------------
QtCore.QThread.currentThread().setObjectName("MAIN") # For DEBUG info
app = QtWid.QApplication(sys.argv)
app.aboutToQuit.connect(about_to_quit)
window = MainWindow()
# --------------------------------------------------------------------------
# File logger
# --------------------------------------------------------------------------
log = FileLogger(
write_header_function=write_header_to_log,
write_data_function=write_data_to_log,
)
log.signal_recording_started.connect(
lambda filepath: window.qpbt_record.setText(
"Recording to file: %s" % filepath
)
)
log.signal_recording_stopped.connect(
lambda: window.qpbt_record.setText("Click to start recording to file")
)
# --------------------------------------------------------------------------
# Set up multithreaded communication with the Arduino
# --------------------------------------------------------------------------
# Create QDeviceIO
qdev_ard = QDeviceIO(ard)
# Create workers
# fmt: off
qdev_ard.create_worker_DAQ(
DAQ_function = DAQ_function,
DAQ_interval_ms = DAQ_INTERVAL_MS,
critical_not_alive_count = 1,
debug = DEBUG,
)
# fmt: on
qdev_ard.create_worker_jobs()
# Connect signals to slots
qdev_ard.signal_DAQ_updated.connect(window.update_GUI)
qdev_ard.signal_connection_lost.connect(notify_connection_lost)
# Start workers
qdev_ard.start(DAQ_priority=QtCore.QThread.TimeCriticalPriority)
# --------------------------------------------------------------------------
# Timers
# --------------------------------------------------------------------------
timer_GUI = QtCore.QTimer()
timer_GUI.timeout.connect(window.update_GUI)
timer_GUI.start(100)
timer_charts = QtCore.QTimer()
timer_charts.timeout.connect(window.update_chart)
timer_charts.start(CHART_INTERVAL_MS)
# --------------------------------------------------------------------------
# Start the main GUI event loop
# --------------------------------------------------------------------------
window.show()
sys.exit(app.exec_())
|
Dennis-van-Gils/project-Ambre-chamber
|
src_python/main.py
|
main.py
|
py
| 22,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyqtgraph.setConfigOptions",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.setConfigOptions",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.setConfigOptions",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.setConfigOption",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QDateTime.currentDateTime",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QDateTime",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "numpy.nan",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "dvg_pyqt_controls.SS_TEXTBOX_READ_ONLY",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "dvg_pyqt_controls.SS_GROUP",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QFont",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "dvg_pyqt_controls.create_Toggle_button",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.GraphicsLayoutWidget",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.ViewBox",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "pyqtgraph.ViewBox",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "pyqtgraph.mkPen",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.mkPen",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "dvg_pyqtgraph_threadsafe.HistoryChartCurve",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "dvg_pyqtgraph_threadsafe.HistoryChartCurve",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "dvg_pyqtgraph_threadsafe.HistoryChartCurve",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "dvg_pyqtgraph_threadsafe.LegendSelect",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTextEdit",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "dvg_pyqtgraph_threadsafe.PlotManager",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "dvg_pyqt_controls.create_LED_indicator",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSpacerItem",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "numpy.clip",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "dvg_debug_functions.tprint",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.warning",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "dvg_debug_functions.dprint",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "dvg_debug_functions.print_fancy_traceback",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "dvg_debug_functions.dprint",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "psutil.Process",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 573,
"usage_type": "attribute"
},
{
"api_name": "psutil.REALTIME_PRIORITY_CLASS",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "dvg_devices.Arduino_protocol_serial.Arduino",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread.currentThread",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 605,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 605,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 607,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 607,
"usage_type": "attribute"
},
{
"api_name": "dvg_pyqt_filelogger.FileLogger",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "dvg_qdeviceio.QDeviceIO",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 652,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 652,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 662,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 671,
"usage_type": "call"
}
] |
11579227616
|
import cv2
import numpy as np
from imageclassifier import ImageClassifier
n_clusters = [3, 4, 5, 6, 7, 8]
kmeans_keys = [
[0], [1], [2],
[0, 1], [0, 2], [1, 2],
[0, 1, 2]
]
sorting_lambdas = [
lambda pixel: pixel[0],
lambda pixel: pixel[1],
lambda pixel: pixel[2],
lambda pixel: sum(pixel),
lambda pixel: max(pixel)
]
cl_sorting_lambdas = [
lambda cluster: cluster[0][0][0],
lambda cluster: cluster[0][0][1],
lambda cluster: cluster[0][0][2],
lambda cluster: sum(cluster[0][0]),
lambda cluster: max(cluster[0][0])
]
coeffs = []
for i in range(5):
for j in range(5):
for k in range(5):
coeffs.append([i, j, k])
sorting_keys = [i for i in range(len(sorting_lambdas))]
colorspaces = [None, cv2.COLOR_BGR2HSV, cv2.COLOR_BGR2LAB, cv2.COLOR_BGR2HLS]
def str_colorspace(colorspace):
if colorspace == None:
return "BGR"
if colorspace == cv2.COLOR_BGR2HSV:
return "HSV"
if colorspace == cv2.COLOR_BGR2LAB:
return "LAB"
if colorspace == cv2.COLOR_BGR2HLS:
return "HLS"
def save(folder, img, n_cluster, key, color_in, sorting_key, color_sort):
filename = folder + "/c{0}_k".format(n_cluster)
filename = filename + '-'.join([str(s) for s in key])
filename = filename + '_' + str_colorspace(color_in) + "_"
filename = filename + 's{0}_'.format(sorting_key)
filename = filename + str_colorspace(color_sort) + ".png"
cv2.imwrite(filename, img)
print("saved: " + filename)
def bruteforce(target, folder):
for n_cluster in n_clusters:
classifier = ImageClassifier(n_cluster, target)
for color_in in colorspaces:
df = classifier.get_dataframe(colorspace=color_in)
for key in kmeans_keys:
cluster_map = classifier.run_kmeans(df, key)
clusters = classifier.get_clusters(cluster_map)
clusters_bak = clusters.copy()
for color_sort in colorspaces:
for sorting_key in sorting_keys:
cmp1 = sorting_lambdas[sorting_key]
cmp2 = cl_sorting_lambdas[sorting_key]
clusters = classifier.sort_clusters(clusters, cmp1, color_sort=color_sort)
res = classifier.merge_clusters(clusters, cmp2)
save(folder, res, n_cluster, key, color_in, sorting_key, color_sort)
clusters = clusters_bak.copy()
def process():
n_cluster = 4
classifier = ImageClassifier(n_cluster, 'src.jpg')
df = classifier.get_dataframe(colorspace=cv2.COLOR_BGR2HSV)
cluster_map = classifier.run_kmeans(df, [0])
clusters = classifier.get_clusters(cluster_map)
clusters_bak = clusters.copy()
#cmp = lambda pixel: (255 - int(pixel[1])) * 2 - (200 if pixel[1] < pixel[2] else 0)
cmp = lambda pixel: int(pixel[0])
#cmp = lambda pixel: pixel[1]
clusters = classifier.sort_clusters(clusters, cmp, color_sort=cv2.COLOR_BGR2LAB)
res = classifier.merge_clusters(clusters, lambda cluster: sum(cluster[0][0]))
#filename = 'res_sort/res_{0}_{1}_{2}.png'.format(coeff[0], coeff[1], coeff[2])
filename="res.png"
cv2.imwrite(filename, res)
print('saved {0}'.format(filename))
clusters = clusters_bak.copy()
def compare(target1, target2):
cl1 = ImageClassifier(4, target1)
cl2 = ImageClassifier(4, target2)
df1 = cl1.get_dataframe()
df2 = cl2.get_dataframe()
print(df1.describe())
print(df2.describe())
exit()
img1 = cv2.imread(target1)
img2 = cv2.imread(target2)
shape1 = img1.shape
shape2 = img2.shape
img1 = np.reshape(img1, (shape1[0] * shape1[1], 3))
img2 = np.reshape(img2, (shape2[0] * shape2[1], 3))
img1 = sorted(img1, key = lambda pixel: sum(pixel))
img2 = sorted(img2, key = lambda pixel: sum(pixel))
img1 = np.reshape(img1, (shape1))
img2 = np.reshape(img2, (shape2))
cv2.imwrite('img1.png', img1)
cv2.imwrite('img2.png', img2)
# bruteforce("town.jpg", "result/town")
# compare("res.png", "town.jpg")
process()
|
elraffray/pyImage
|
classifier.py
|
classifier.py
|
py
| 4,167 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2HLS",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2HLS",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "imageclassifier.ImageClassifier",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "imageclassifier.ImageClassifier",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "imageclassifier.ImageClassifier",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "imageclassifier.ImageClassifier",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 134,
"usage_type": "call"
}
] |
42992886102
|
import gspread
import numpy as np
import pandas as pd
from datetime import date
from datetime import datetime
import csv
import pytz
from oauth2client.service_account import ServiceAccountCredentials
import requests
#authorization
service_account = gspread.service_account(filename = 'capstone-362722-f3745d9260b7.json' )
worksheet = service_account.open('TeamLiftCyberPhysical').sheet1
rows = worksheet.row_count
scope = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/spreadsheets"]
credentials = ServiceAccountCredentials.from_json_keyfile_name('capstone-362722-f3745d9260b7.json', scope)
gc = gspread.authorize(credentials)
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/10g0fkjjrK0k9sa_ynw3O0Stdfp3leNJiJWS0MOM_b94/edit#gid=0')
#this function gets the last time the spreadsheet was updated
def getLastTimeModified():
revisions_uri = f'https://www.googleapis.com/drive/v3/files/{wb.id}/revisions'
headers = {'Authorization': f'Bearer {credentials.get_access_token().access_token}'}
response = requests.get(revisions_uri, headers=headers).json()
return response['revisions'][-1]['modifiedTime']
#this function adds data row to spreadsheets with given params
def addData(rowEntry):
worksheet.append_row(rowEntry)
#sends a csv file line by line to the spreadhseets file on the cloud
def sendFile(filename):
#mod_time_before = getLastTimeModified()
sent_data = np.loadtxt(filename,delimiter=",",dtype = str, ndmin = 2)
#lines= data_file.readlines()
#for iter in range(len(lines)):
#lines[iter] = lines[iter].replace('\n' , '')
#lines[iter] = lines[iter].split(',')
worksheet.append_rows(sent_data.tolist());
print("sent to spreadsheet");
def replaceNewline(str):
return str.replace("\n","")
#this function gets acknowledgement from google spreadsheets, by retreiving the last n-nows that were previously populated on the spreadsheet
# and doing an elementwise comparison with the numpy array that was just sent
def getSpreadsheetAck(filename):
ackSuccess = False
agg_array= np.loadtxt(filename,delimiter=",",dtype=str, ndmin = 2)
print(agg_array)
rowsSent = np.shape(agg_array)[0]
colsSent = np.shape(agg_array)[1]
#rowsSent = np.shape(agg_array)[0]
#colsSent = 3
#if(len(np.shape(agg_array)) == 2):
#colsSent = np.shape(agg_array)[1]
#else:
#colsSent = len(agg_array)
all_data = np.array(worksheet.get_all_values())
all_data_rows = np.shape(all_data)[0]
numRemoteFields = np.shape(all_data)[1]
print("rowsSent = ",rowsSent,"colsSent = ",colsSent,"rows in database= ", all_data_rows)
if((numRemoteFields - 1) == rowsSent):
print("The Number of Fields match between the local and remote database")
remote_array = all_data[all_data_rows -rowsSent :all_data_rows:1 , 0:colsSent]
print(remote_array)
correctDataSent = np.array_equal(agg_array,remote_array)
if(correctDataSent == True):
print("The Correct Data was sent to the Database\n")
ackSuccess = True
if(correctDataSent == False):
print("The Wrong Data was Sent\n")
print("Attempting to send data again")
print(agg_array == remote_array)
ackSuccess = False
return ackSuccess
# timezone_oregon = pytz.timezone('US/Pacific')
# time_now = (datetime.now(timezone_oregon)).strftime('%Y-%m-%d %H:%M:%S')
# print("Data Was Updated at " + str(time_now) )
#this function updates a row in the spreadsheets file, by looking up the value of a column
#parameter columtype is the column of the data we are updating
#column val is the value of the column to look for
#rowdata is the new data that we are updating it to
def updateData(columntype,columnval,rowdata):
mod_time_before = getLastTimeModified()
#gets all the tabulated data is a 2D array
full_data = worksheet.get_all_values()
# print(full_data)
num_rows = len(full_data)
index = 0
#depending on the columntype, we assign an index,
#this index tells us which column to look inside of
if(columntype == 'pumpvelocity'):
index = 0
if(columntype == 'pressure'):
index = 1
if(columntype == 'timestamp'):
index = 2
#iterates through data
for k in range(0,num_rows):
# print((worksheet.row_values(k))[index])
#finds the row with the target value
#updates that row's data with new values
if((full_data[k])[index] == columnval):
# print("yes")
worksheet.update_cell(k+1,1,rowdata[0])
worksheet.update_cell(k+1,2,rowdata[1])
worksheet.update_cell(k+1,3,rowdata[2])
break
mod_time_after = getLastTimeModified()
print("mod time before update",mod_time_before)
print("mod time after update",mod_time_after)
if(mod_time_before != mod_time_after):
print("Modified at ",mod_time_after )
#this method fetches a data point given the value of a certain column
#for example it might search the data point where flow is equal to 55
def getRecord(columntype,columnval):
full_data = worksheet.get_all_values()
# print(full_data)
num_rows = len(full_data)
index = 0
if(columntype == 'pumpvelocity'):
index = 0
if(columntype == 'pressure'):
index = 1
if(columntype == 'timestamp'):
index = 2
#iterates through data and returns data point that has certain value
for k in range(0,num_rows):
# print((worksheet.row_values(k))[index])
if((full_data[k])[index] == columnval):
# print("yes")
print(full_data[k])
record = full_data[k]
printed_record = {"pumpvelocity":record[0],"pressure":record[1],"timestamp":record[2] }
print(printed_record)
return printed_record
|
mcenek/TeamLiftCSWaterProject
|
CloudUpload/datapusher.py
|
datapusher.py
|
py
| 5,965 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "gspread.service_account",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "gspread.authorize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 75,
"usage_type": "call"
}
] |
35379919905
|
from flask import Flask
from flask_apscheduler import APScheduler
# config scheduling class
from statuschecker import get_health_status
class Config(object):
JOBS = [
{
'id': 'check_health',
'func': 'app:check_health',
'trigger': 'interval',
'seconds': 1800
}
]
SCHEDULER_API_ENABLED = True
# function triggered every 30 minutes
def check_health():
return get_health_status();
# flask startup
app = Flask(__name__)
app.config.from_object(Config())
# initiate scheduler
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
tynorantoni/HealthCheckService
|
app.py
|
app.py
|
py
| 687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "statuschecker.get_health_status",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_apscheduler.APScheduler",
"line_number": 33,
"usage_type": "call"
}
] |
29010500134
|
import functools
import os
import sys
from typing import Any, Callable, Iterable, Optional, TextIO, Tuple
import click
from click import Command
from click_option_group import MutuallyExclusiveOptionGroup
from . import __version__
from .core import (
CheckHashLineError,
HashFileReader,
HashFileWriter,
ParseHashLineError,
check_hash_line,
generate_hash_line,
)
from .hasher import HashContext, Hasher
from .utils.click import CommandX, PathWithSuffix
from .utils.glob import glob_filters, sorted_path
class ParseHashFileError(ValueError):
def __init__(self, hash_line: str, lineno: int) -> None:
super().__init__(hash_line, lineno)
self.hash_line = hash_line
self.lineno = lineno
class Output:
"""Determine the output mode and provide the output interface."""
def __init__(
self, agg: Optional[str] = None, sep: Optional[bool] = None, null: Optional[bool] = None, sync: bool = False
) -> None:
if (agg and sep) or (agg and null) or (sep and null):
raise ValueError("require exactly one argument")
# Use the null mode by default.
if not (agg or sep or null):
null = True
# Determine the output mode and dump method.
if agg:
self.agg_file = HashFileWriter(agg)
self._dump = self.output_agg
elif sep:
self._dump = self.output_sep
elif null:
self._dump = self.output_null
self.sync = sync
self.maxmtime = 0.0
def close(self) -> None:
try:
agg_file = self.agg_file
except AttributeError:
pass
else:
agg_file.close()
if self.sync:
os.utime(agg_file.name, (self.maxmtime, self.maxmtime))
def dump(self, hash_line: str, hash_path: str, path: str) -> None:
self._dump(hash_line, hash_path, path)
def output_agg(self, hash_line: str, hash_path: str, path: str) -> None:
self.agg_file.write_hash_line(hash_line)
if self.sync:
mtime = os.path.getmtime(path)
self.maxmtime = max(self.maxmtime, mtime)
def output_sep(self, hash_line: str, hash_path: str, path: str) -> None:
with HashFileWriter(hash_path) as f:
f.write_hash_line(hash_line)
if self.sync:
mtime = os.path.getmtime(path)
os.utime(hash_path, (mtime, mtime))
def output_null(self, hash_line: str, hash_path: str, path: str) -> None:
pass
class Gethash:
"""Provide uniform interface for CLI scripts."""
stdout: TextIO
stderr: TextIO
glob_mode: int
glob_type: str
inplace: bool
root: Optional[str]
start: Optional[int]
stop: Optional[int]
dir_ok: bool
def __init__(self, ctx: HashContext, **kwargs: Any) -> None:
self.ctx = ctx
self.sync = kwargs.pop("sync", False)
self.suffix = kwargs.pop("suffix", ".sha")
self.stdout = kwargs.pop("stdout", sys.stdout)
self.stderr = kwargs.pop("stderr", sys.stderr)
self.glob_mode = kwargs.pop("glob", 1)
self.glob_type = kwargs.pop("type", "a")
# Determine the path format.
self.inplace = kwargs.pop("inplace", False)
self.root = kwargs.pop("root", None)
# Determine the output mode.
agg = kwargs.pop("agg", None)
sep = kwargs.pop("sep", None)
null = kwargs.pop("null", None)
self.output = Output(agg, sep, null, sync=self.sync)
# Prepare arguments and construct the hash function.
self.start = kwargs.pop("start", None)
self.stop = kwargs.pop("stop", None)
self.dir_ok = kwargs.pop("dir", False)
tqdm_args = {
"file": self.stderr,
"ascii": kwargs.pop("tqdm_ascii", False),
"disable": kwargs.pop("tqdm_disable", False),
"leave": kwargs.pop("tqdm_leave", False),
}
self.hasher = Hasher(ctx, tqdm_args=tqdm_args)
def __call__(self, files: Iterable[str], *, check: bool) -> None:
if check:
self.check_hash(files)
else:
self.generate_hash(files)
def __enter__(self) -> "Gethash":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def close(self) -> None:
self.output.close()
def generate_hash(self, patterns: Iterable[str]) -> None:
for path in self.glob_function(patterns):
try:
root = self.check_root(path)
hash_line = generate_hash_line(path, self.hash_function, root=root)
hash_path = path + self.suffix
self.output.dump(hash_line, hash_path, path)
except Exception as e:
self.echo_exception(path, e)
else:
# The hash line already has a newline.
self.echo(hash_line, nl=False)
def check_hash(self, patterns: Iterable[str]) -> None:
for hash_path in self.glob_function(patterns):
try:
self._check_hash(hash_path)
except ParseHashFileError as e:
# Strip newline for pretty printing.
hash_line = e.hash_line.rstrip("\n")
msg = f"[ERROR] invalid hash '{hash_line}' in '{hash_path}' at line {e.lineno}"
self.echo_error(msg, fg="white", bg="red")
except Exception as e:
self.echo_exception(hash_path, e)
def _check_hash(self, hash_path: str) -> None:
maxmtime = 0.0
for i, hash_line in enumerate(HashFileReader(hash_path)):
try:
root = self.check_root(hash_path)
path = check_hash_line(hash_line, self.hash_function, root=root)
maxmtime = max(maxmtime, os.path.getmtime(path))
except ParseHashLineError as e:
raise ParseHashFileError(e.hash_line, i)
except CheckHashLineError as e:
self.echo(f"[FAILURE] {e.path}", fg="red")
else:
self.echo(f"[SUCCESS] {path}", fg="green")
if self.sync:
os.utime(hash_path, (maxmtime, maxmtime))
def check_root(self, path: str) -> Optional[str]:
if self.inplace:
return os.path.dirname(path)
return self.root
def glob_function(self, paths: Iterable[str]) -> Iterable[str]:
return sorted_path(
glob_filters(paths, mode=self.glob_mode, type=self.glob_type, recursive=True, user=True, vars=True)
)
def hash_function(self, path: str) -> bytes:
return self.hasher(path, self.start, self.stop, dir_ok=self.dir_ok)
def echo(self, msg: str, **kwargs: Any) -> None:
click.secho(msg, file=self.stdout, **kwargs)
def echo_error(self, msg: str, **kwargs: Any) -> None:
click.secho(msg, file=self.stderr, **kwargs)
def echo_exception(self, path: str, exc: Exception) -> None:
msg = f"[ERROR] {path}\n\t{type(exc).__name__}: {exc}"
click.secho(msg, file=self.stderr, fg="red")
def script_main(ctx: HashContext, files: Tuple[str, ...], **options: Any) -> None:
"""Execute the body for the main function."""
no_stdout = options.pop("no_stdout", False)
no_stderr = options.pop("no_stderr", False)
stdout = open(os.devnull, "w") if no_stdout else sys.stdout # noqa
stderr = open(os.devnull, "w") if no_stderr else sys.stderr # noqa
check = options.pop("check", False)
with Gethash(ctx, stdout=stdout, stderr=stderr, **options) as gethash:
gethash(files, check=check)
def gethashcli(command_name: str, display_name: str, **extras: Any) -> Callable[[Callable], Command]:
"""Apply click decorators to the main function."""
suffix = extras.pop("suffix", "." + command_name.replace("-", "_"))
doc = extras.pop("doc", None)
def decorator(func: Callable) -> Command:
if doc is not None:
func.__doc__ = doc
context_settings = {"help_option_names": ["-h", "--help"], "max_content_width": 120}
path_format = MutuallyExclusiveOptionGroup("Path Format")
output_mode = MutuallyExclusiveOptionGroup("Output Mode")
@click.command(command_name, cls=CommandX, context_settings=context_settings, no_args_is_help=True)
@click.argument("files", nargs=-1)
@click.option(
"-c",
"--check",
is_flag=True,
help=f"Read {display_name} from FILES and check them.",
)
@click.option(
"-y",
"--sync",
is_flag=True,
help="Update mtime of hash files to the same as data files.",
)
@click.option(
"-g",
"--glob",
type=click.IntRange(0, 2),
metavar="[0|1|2]",
default=1,
show_default=True,
help="Set glob mode. If ``0``, disable glob pathname pattern; if ``1``, "
"resolve ``*`` and ``?``; if ``2``, resolve ``*``, ``?`` and ``[]``.",
)
@click.option(
"-t",
"--type",
type=click.Choice(["a", "d", "f"]),
default="a",
show_default=True,
help="Set file type. If ``a``, include all types; if ``d``, include "
"directories; if ``f``, include files.",
)
@path_format.option("-i", "--inplace", is_flag=True, help="Use basename in checksum files.")
@path_format.option(
"-z",
"--root",
type=click.Path(exists=True, file_okay=False),
help="The path field in checksum files is relative to the root directory.",
)
@output_mode.option(
"-o",
"--agg",
type=PathWithSuffix(suffix=suffix, dir_okay=False),
help="Set the aggregate output file.",
)
@output_mode.option("-s", "--sep", is_flag=True, help="Separate output files.")
@output_mode.option(
"-n",
"--null",
is_flag=True,
help="Do not output to files. This is the default output mode.",
)
@click.option("--start", type=click.IntRange(min=0), help="The start offset of files.")
@click.option("--stop", type=click.IntRange(min=0), help="The stop offset of files.")
@click.option(
"-d",
"--dir",
is_flag=True,
help="Allow checksum for directories. Just xor each checksum of files in a given directory.",
)
@click.option("--no-stdout", is_flag=True, help="Do not output to stdout.")
@click.option("--no-stderr", is_flag=True, help="Do not output to stderr.")
@click.option("--tqdm-ascii", type=click.BOOL, default=False, show_default=True)
@click.option("--tqdm-disable", type=click.BOOL, default=False, show_default=True)
@click.option("--tqdm-leave", type=click.BOOL, default=False, show_default=True)
@click.version_option(__version__, "-V", "--version", prog_name=command_name)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
kwargs.setdefault("suffix", suffix)
return func(*args, **kwargs)
return wrapper
return decorator
|
xymy/gethash
|
src/gethash/script.py
|
script.py
|
py
| 11,381 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "core.HashFileWriter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.utime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "core.HashFileWriter",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.utime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "typing.TextIO",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.TextIO",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "hasher.HashContext",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "hasher.Hasher",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "core.generate_hash_line",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "core.HashFileReader",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "core.check_hash_line",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "core.ParseHashLineError",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "core.CheckHashLineError",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "os.utime",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "utils.glob.sorted_path",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "utils.glob.glob_filters",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "click.secho",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "click.secho",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "click.secho",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "hasher.HashContext",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "os.devnull",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "os.devnull",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "click_option_group.MutuallyExclusiveOptionGroup",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "click_option_group.MutuallyExclusiveOptionGroup",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "click.command",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "utils.click.CommandX",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "click.argument",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "click.IntRange",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "click.Choice",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "click.Path",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "utils.click.PathWithSuffix",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "click.IntRange",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "click.IntRange",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "click.BOOL",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "click.option",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "click.BOOL",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "click.option",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "click.BOOL",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "click.version_option",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "click.Command",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "click.Command",
"line_number": 228,
"usage_type": "name"
}
] |
42307014223
|
import os
import sys
import time
from acbbs.drivers.ate.ClimCham import ClimCham
from acbbs.drivers.ate.DCPwr import DCPwr
from acbbs.drivers.ate.PwrMeter import PwrMeter
from acbbs.drivers.ate.RFSigGen import RFSigGen
from acbbs.drivers.ate.RFSigGenV import RFSigGenV
from acbbs.drivers.ate.SpecAn import SpecAn
from acbbs.drivers.ate.Swtch import Swtch
from acbbs.tools.log import get_logger
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError, DuplicateKeyError
import configuration
from .drivers.PwrMeterCal import PowerMeterCal
from .drivers.RFSigGenCal import RFSigGenCal
logger = get_logger('calib')
CHANNELS = configuration.CHANNELS
INPUTS = configuration.INPUTS
OUTPUTS = configuration.OUTPUTS
CONF_PATH = configuration.CONF_PATH
LIST_PATH = configuration.LIST_PATH
class NetworkEquipment(object):
def __init__(self, simu):
logger.info('class Ping init')
self.PwrMeter = PwrMeter(simulate=simu)
self.SpecAn = SpecAn(simulate=simu)
self.RFSigGen = RFSigGen(simulate=simu)
self.RFSigGenV = RFSigGenV(simulate=simu)
self.Swtch = Swtch(simulate=simu)
self.ClimCham = ClimCham(simulate=simu)
self.DCPwr = DCPwr(simulate=simu)
self.PwrMeterCal = PowerMeterCal(simulate=simu)
self.RFSigGenCal = RFSigGenCal(simulate=simu)
self.get_ip()
def get_ip(self):
ip_specAn = self.SpecAn.SpecAnConf['ip']
ip_sigGen = self.RFSigGen.sigGenConf['ip']
ip_pwMeter = self.PwrMeter.PwrMeterConf['ip']
ip_sigGenV = self.RFSigGenV.sigGenConf['ip']
ip_ClimCham = self.ClimCham.dcConf['ip']
ip_dc1 = self.DCPwr.dcConf['powerDevice1-ip']
ip_dc2 = self.DCPwr.dcConf['powerDevice2-ip']
self.listIP = {'rx': {'RFSigGen': ip_sigGen, 'RFSigGenV': ip_sigGenV},
'tx': {'PwrMeter': ip_pwMeter, 'SpecAn': ip_specAn},
'DC': {'DC1': ip_dc1, 'DC2': ip_dc2},
'Chamber': {'climCham': ip_ClimCham},
}
def ping_one(self, IP):
response = os.system("ping -c 1 " + IP)
if response == 0:
logger.info("Network Equipement Active at adresse:{0}".format(IP))
return 0
else:
logger.error('Network Equipement Error : {0}'.format(IP))
return 1
def check_one_instrument(self, instrum):
global result
for mode, instrums in self.listIP.items():
if instrum in instrums.keys():
result = self.ping_one(self.listIP[mode][instrum])
break
return result
def ping_all(self):
list_pingReturn = self.listIP
for mode, instrums in self.listIP.items():
for instrum, ip in instrums.items():
list_pingReturn[mode][instrum] = self.ping_one(ip)
return list_pingReturn
def check_all_instruments(self):
listPing = self.ping_all()
if all(i == 0 for i in listPing):
return 0
else:
return 1 # renvoyer un tableau qui indique quel instrument est disconnected
class database(object):
def __init__(self):
self.__openDataBase()
def __openDataBase(self):
# get server, port and database from json configuration file
server = configuration.DATABASE_IP
port = configuration.DATABASE_PORT
database = configuration.DATABASE_NAME_CALIB
maxSevSelDelay = configuration.DATABASE_MAXDELAY
try:
# open MongoDB server
self.client = MongoClient(server, int(port), serverSelectionTimeoutMS=maxSevSelDelay)
# check if connection is well
self.client.server_info()
except ServerSelectionTimeoutError as err:
print("{0}".format(err))
exit(0)
# open MongoDB database
self.db = self.client[database]
def get_available_collection(self):
return self.db.list_collection_names()
def get_collection(self, collection):
if collection not in self.get_available_collection():
print("Error: conf {0} does not exist. You can list available collection with --list".format(collection))
return self.db[collection].find({})
def writeDataBase(self, document, collection):
if collection in self.get_available_collection():
print("Error: conf {0} exist. You can delete it with --delete {0}".format(collection))
self.db_collection = self.db[collection]
try:
self.db_collection.insert_one(document).inserted_id
except DuplicateKeyError as err:
print("{0}".format(err))
def delete_collection(self, collection):
if collection not in self.get_available_collection():
print("Error: conf {0} does not exist. You can list available collection with --list".format(collection))
self.db.drop_collection(collection)
class MatrixCal(object):
def __init__(self):
self.calibFile = {"date": "", "loss": {}}
self.db = database()
def get_cal(self, date):
for doc in self.db.get_collection(date):
calibFile = doc
return calibFile
def getlossPath(self, port_in, port_out, date):
cal = self.get_cal(date)
data = cal[port_in][port_out]
return data
def write_cal(self, data):
self.calibFile["loss"] = data
self.calibFile["date"] = time.strftime("%Y-%m-%d %H:%M:%S")
self.db.writeDataBase(self.calibFile["loss"], self.calibFile["date"])
def readPath_loss(self, port_in, port_out):
return self.data["loss"][port_in][port_out]
def del_cal(self, cal_name):
self.db.delete_collection(cal_name)
def history(self):
return self.db.get_available_collection()
class Calibration(object):
def __init__(self, simu):
self.equipement = NetworkEquipment(simu=simu)
self.channels = CHANNELS
self.simu = simu
self.iteration = 0
self.totalProgress = 0
self.paths = LIST_PATH
self.message = ""
self.response = 0
self.matrixCal = MatrixCal()
self.loss = {INPUTS[4]: {}, INPUTS[2]: {}, INPUTS[3]: {}, INPUTS[0]: {}, INPUTS[1]: {}, INPUTS[5]: {}}
self.delta = {}
self.pathlist = list()
for i in self.paths.keys():
self.pathlist.append(i)
def calibrate(self, tab_freq, pwr):
self.tab_freq = tab_freq
self.OUTPUT_POWER_CALIBRATION = int(pwr)
self.totalProgress = (len(INPUTS) - 2 + len(OUTPUTS)) * len(tab_freq)
print('calibration start')
self.SMBCal()
self.SMBVCal()
self.PwrMeterCal()
self.FSWCal()
self.NoiseCal()
self.makeDelta()
self.makeMatrixCal()
self.matrixCal.write_cal(self.loss)
def SMBCal(self):
loss = configuration.PORT_SMB
pathJ4Jx = self.pathlist[1]
# calibration of J4_20dB - J9
print("calibration of SMB, plug the power meter cal to J9")
while self.response == 0:
self.message = " calibration of SMB, plug the power meter cal to J9 "
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ4Jx]["sw3"], sw4=self.paths[pathJ4Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGen.freq = freq
self.equipement.RFSigGen.power = self.OUTPUT_POWER_CALIBRATION
self.equipement.RFSigGen.status = 1
time.sleep(1)
loss["J4_20dB"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGen.status = 0
self.iteration += 1
self.loss["J4_20dB"]["J9"] = loss["J4_20dB"]
# calibration of J4 - Jx
for channel in self.channels:
print(" plug the power meter cal to J{0}".format(channel + 8))
while self.response == 0:
self.message = " plug the power meter cal to {0}".format(channel + 8)
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
port = pathJ4Jx.replace("Jx", "J" + str(channel + 8))
self.equipement.Swtch.setSwitch(sw1=channel, sw3=self.paths[pathJ4Jx]["sw3"],sw4=self.paths[pathJ4Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGen.freq = freq
self.equipement.RFSigGen.power = self.OUTPUT_POWER_CALIBRATION
self.equipement.RFSigGen.status = 1
time.sleep(1)
loss["J4"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGen.status = 0
self.iteration += 1
self.loss["J4"]["J" + str(channel + 8)] = loss["J4"]
def SMBVCal(self):
loss = configuration.PORT_SMBV
pathJ3Jx = self.pathlist[3]
print(" calibration of SMBV, plug the power meter of the cal to J9")
while self.response == 0:
self.message = "plug the power meter cal to J9 "
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J3 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ3Jx]["sw3"], sw4=self.paths[pathJ3Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGenV.freq = freq
self.equipement.RFSigGenV.power = self.OUTPUT_POWER_CALIBRATION
# self.equipement.PowerMeterCal = freq
self.equipement.RFSigGenV.status = 1
time.sleep(1)
loss["J3"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGenV.status = 0
self.iteration += 1
self.loss["J3"]["J9"] = loss["J3"]
def PwrMeterCal(self):
loss = configuration.PORT_PowerMeter
pathJ2Jx = self.pathlist[5]
print(" calibration of Power Meter, plug the RF generator cal to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J2 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ2Jx]["sw3"], sw4=self.paths[pathJ2Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.PwrMeter.freq = freq
time.sleep(1)
loss["J2"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeter.power
self.iteration += 1
self.loss["J2"]["J9"] = loss["J2"]
def FSWCal(self):
loss = configuration.PORT_FSW
pathJ2Jx = self.pathlist[4]
print(" calibration of FSW, plug the RF generator cal to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J5 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ2Jx]["sw3"], sw4=self.paths[pathJ2Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.SpecAn.freqSpan = 10000000
pic = self.equipement.SpecAn.markerPeakSearch()
time.sleep(1)
loss["J5"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - pic[1]
self.iteration += 1
self.loss["J5"]["J9"] = loss["J5"]
######### NON CODE ################
def NoiseCal(self):
loss = configuration.PORT_NOISE
pathJ18Jx = self.pathlist[0]
print(" calibration of Noise, plug the RF generator cal to J18 and the power meter to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J18 and the power meter to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J5 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ18Jx]["sw3"], sw4=self.paths[pathJ18Jx]["sw4"])
for freq in self.tab_freq:
loss["J18"][str(freq)] = self.OUTPUT_POWER_CALIBRATION
self.iteration += 1
self.loss["J18"]["J9"] = loss["J18"]
def makeDelta(self):
for channel in self.channels:
Jout = "J" + str(channel + 8)
delta_freq = {}
self.delta[Jout] = {}
for freq in self.tab_freq:
delta_freq[str(freq)] = self.loss["J4"][Jout][str(freq)] - self.loss["J4"]["J9"][str(freq)]
self.delta[Jout] = delta_freq
def makeMatrixCal(self):
for Jin in self.loss.keys():
for channel in self.channels[1:]:
Jout = "J" + str(channel + 8)
self.loss[Jin][Jout] = {}
estimate_loss = {}
for freq in self.tab_freq:
estimate_loss[str(freq)] = self.loss[Jin]["J9"][str(freq)] + self.delta[Jout][str(freq)]
self.loss[Jin][Jout] = estimate_loss
|
Wonters/IHMweb
|
calib/tasks.py
|
tasks.py
|
py
| 13,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "acbbs.tools.log.get_logger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "configuration.CHANNELS",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "configuration.INPUTS",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "configuration.OUTPUTS",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "configuration.CONF_PATH",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "configuration.LIST_PATH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "acbbs.drivers.ate.PwrMeter.PwrMeter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.SpecAn.SpecAn",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.RFSigGen.RFSigGen",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.RFSigGenV.RFSigGenV",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.Swtch.Swtch",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.ClimCham.ClimCham",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "acbbs.drivers.ate.DCPwr.DCPwr",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "drivers.PwrMeterCal.PowerMeterCal",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "drivers.RFSigGenCal.RFSigGenCal",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "configuration.DATABASE_IP",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "configuration.DATABASE_PORT",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "configuration.DATABASE_NAME_CALIB",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "configuration.DATABASE_MAXDELAY",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pymongo.errors.ServerSelectionTimeoutError",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "pymongo.errors.DuplicateKeyError",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "configuration.PORT_SMB",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "configuration.PORT_SMBV",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "configuration.PORT_PowerMeter",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "configuration.PORT_FSW",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "configuration.PORT_NOISE",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 340,
"usage_type": "call"
}
] |
29128123138
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import embed_video.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracks', '0006_auto_20150604_1856'),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(default=b'untitled', max_length=128, verbose_name='Title')),
('video', embed_video.fields.EmbedVideoField(help_text=b'Link to youtube or vimeo', verbose_name='Video Link')),
('user', models.ForeignKey(related_name='videos', to=settings.AUTH_USER_MODEL)),
],
),
]
|
TimBest/ComposersCouch
|
tracks/migrations/0007_video.py
|
0007_video.py
|
py
| 924 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "embed_video.fields.fields.EmbedVideoField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "embed_video.fields.fields",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "embed_video.fields",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 23,
"usage_type": "name"
}
] |
21396441749
|
import os
from django.conf import settings
from django.db import connection, close_old_connections
from django.db.utils import OperationalError
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from racetrack_client.utils.shell import shell, CommandError
from lifecycle.django.registry.database import db_access
from lifecycle.config import Config
def setup_health_endpoint(api: FastAPI, config: Config):
@api.get("/live", tags=['root'])
async def _live():
"""Report service liveness: whether it has started"""
return {
'service': 'lifecycle',
'live': True,
}
@api.get("/ready", tags=['root'])
async def _ready():
"""Report service readiness: whether it's available for accepting traffic"""
return {
'service': 'lifecycle',
'ready': True,
}
@api.get("/health", tags=['root'])
def _health():
"""Report current application status"""
db_connected = is_database_connected()
status_code = 200 if db_connected else 500
content = {
'service': 'lifecycle',
'live': True,
'ready': db_connected,
'database_connected': db_connected,
'git_version': os.environ.get('GIT_VERSION', 'dev'),
'docker_tag': os.environ.get('DOCKER_TAG', ''),
'auth_required': config.auth_required,
}
return JSONResponse(content=content, status_code=status_code)
@db_access
def is_database_connected() -> bool:
try:
django_db_type = os.environ.get('DJANGO_DB_TYPE', 'sqlite')
if django_db_type == 'postgres':
db_name = settings.DATABASES['default']['NAME']
user = settings.DATABASES['default']['USER']
host = settings.DATABASES['default']['HOST']
port = settings.DATABASES['default']['PORT']
shell(f'pg_isready -h {host} -p {port} -U {user} -d {db_name}', print_stdout=False)
close_old_connections()
with connection.cursor() as cursor:
cursor.execute('select 1')
cursor.fetchone()
cursor.close()
connection.close()
return True
except CommandError:
return False
except OperationalError:
return False
|
TheRacetrack/racetrack
|
lifecycle/lifecycle/endpoints/health.py
|
health.py
|
py
| 2,317 |
python
|
en
|
code
| 27 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "lifecycle.config.Config",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "fastapi.responses.JSONResponse",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "racetrack_client.utils.shell.shell",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.db.close_old_connections",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.db.connection.cursor",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.db.connection",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.db.connection.close",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.db.connection",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "racetrack_client.utils.shell.CommandError",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.db.utils.OperationalError",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "lifecycle.django.registry.database.db_access",
"line_number": 49,
"usage_type": "name"
}
] |
8655705907
|
import errno
import os
import requests
from pathlib import Path
import sly_globals as g
import supervisely as sly
from supervisely.app.v1.widgets.progress_bar import ProgressBar
progress5 = ProgressBar(g.task_id, g.api, "data.progress5", "Download weights", is_size=True, min_report_percent=5)
local_weights_path = None
def get_models_list():
from train import model_list
res = []
for name, data in model_list.items():
res.append({
"model": name,
"description": data["description"]
})
return res
def get_table_columns():
return [
{"key": "model", "title": "Model", "subtitle": None},
{"key": "description", "title": "Description", "subtitle": None},
]
def get_model_info_by_name(name):
models = get_models_list()
for info in models:
if info["model"] == name:
return info
raise KeyError(f"Model {name} not found")
def init(data, state):
models = get_models_list()
data["models"] = models
data["modelColumns"] = get_table_columns()
state["selectedModel"] = models[0]["model"]
state["weightsInitialization"] = "random" # "custom"
state["collapsed5"] = True
state["disabled5"] = True
progress5.init_data(data)
state["weightsPath"] = ""
data["done5"] = False
def restart(data, state):
data["done5"] = False
@g.my_app.callback("download_weights")
@sly.timeit
@g.my_app.ignore_errors_and_show_dialog_window()
def download_weights(api: sly.Api, task_id, context, state, app_logger):
#"https://download.pytorch.org/models/vgg11-8a719046.pth" to /root/.cache/torch/hub/checkpoints/vgg11-8a719046.pth
from train import model_list
global local_weights_path
try:
if state["weightsInitialization"] == "custom":
weights_path_remote = state["weightsPath"]
if not weights_path_remote.endswith(".pth"):
raise ValueError(f"Weights file has unsupported extension {sly.fs.get_file_ext(weights_path_remote)}. "
f"Supported: '.pth'")
# get architecture type from previous UI state
prev_state_path_remote = os.path.join(str(Path(weights_path_remote).parents[1]), "info/ui_state.json")
prev_state_path = os.path.join(g.my_app.data_dir, "ui_state.json")
api.file.download(g.team_id, prev_state_path_remote, prev_state_path)
prev_state = sly.json.load_json_file(prev_state_path)
api.task.set_field(g.task_id, "state.selectedModel", prev_state["selectedModel"])
local_weights_path = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote))
if sly.fs.file_exists(local_weights_path) is False:
file_info = g.api.file.get_info_by_path(g.team_id, weights_path_remote)
if file_info is None:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), weights_path_remote)
progress5.set_total(file_info.sizeb)
g.api.file.download(g.team_id, weights_path_remote, local_weights_path, g.my_app.cache, progress5.increment)
progress5.reset_and_update()
else:
weights_url = model_list[state["selectedModel"]].get("pretrained")
if weights_url is not None:
default_pytorch_dir = "/root/.cache/torch/hub/checkpoints/"
#local_weights_path = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_url))
local_weights_path = os.path.join(default_pytorch_dir, sly.fs.get_file_name_with_ext(weights_url))
if sly.fs.file_exists(local_weights_path) is False:
response = requests.head(weights_url, allow_redirects=True)
sizeb = int(response.headers.get('content-length', 0))
progress5.set_total(sizeb)
os.makedirs(os.path.dirname(local_weights_path), exist_ok=True)
sly.fs.download(weights_url, local_weights_path, g.my_app.cache, progress5.increment)
progress5.reset_and_update()
sly.logger.info("Pretrained weights has been successfully downloaded",
extra={"weights": local_weights_path})
except Exception as e:
progress5.reset_and_update()
raise e
fields = [
{"field": "data.done5", "payload": True},
{"field": "state.collapsed6", "payload": False},
{"field": "state.disabled6", "payload": False},
{"field": "state.activeStep", "payload": 6},
]
g.api.app.set_fields(g.task_id, fields)
def restart(data, state):
data["done5"] = False
|
supervisely-ecosystem/unet
|
supervisely/train/src/ui/step05_models.py
|
step05_models.py
|
py
| 4,736 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "supervisely.app.v1.widgets.progress_bar.ProgressBar",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sly_globals.task_id",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.api",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "train.model_list.items",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "train.model_list",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "supervisely.Api",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.get_file_ext",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.team_id",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "supervisely.json.load_json_file",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "supervisely.json",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.task_id",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.get_file_name_with_ext",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.file_exists",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.api.file.get_info_by_path",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sly_globals.api",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.team_id",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "errno.ENOENT",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.strerror",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sly_globals.api.file.download",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sly_globals.api",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.team_id",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "train.model_list",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.get_file_name_with_ext",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.file_exists",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "requests.head",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "supervisely.fs.download",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "supervisely.fs",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "supervisely.logger.info",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "supervisely.logger",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.api.app.set_fields",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sly_globals.api",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.task_id",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app.callback",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sly_globals.my_app",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "supervisely.timeit",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "sly_globals.my_app.ignore_errors_and_show_dialog_window",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sly_globals.my_app",
"line_number": 62,
"usage_type": "attribute"
}
] |
11353167972
|
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
from astropy.table import Table, Column
from .import_modules import *
##----- ----- ----- ----- ----- ----- ----- ----- ----- -----##
## Miscellaneous utilities
## Contain functions that do not pertain to a particular class.
##----- ----- ----- ----- ----- ----- ----- ----- ----- -----##
def Fit_linear(y, x=None, err=1.0, m=None, b=None, output=None, inline=False):
"""
Fit_linear(y, x=None, err=1.0, m=None, b=None, output=None, inline=False):
return (sol, res, rank, s)
Uses the scipy.linalg.lstsq function to solve the equation y = mx + b
sol -> [b, m]
N.B. Uses the scipy.linalg.lstsq algorithm.
If inline = True, flattens the results.
"""
#x = array([52997., 53210., 53310., 53380.])
#y = array([1.66, 1.54, 1.4, 1.4])
# standard error of the y-variable:
#sy = array([0.05, 0.05, 0.05, 0.05])
if x is None:
x = np.arange(y.shape[0], dtype=float)
if (b is not None) and (m is not None):
sol = [b, m]
res = (((b + m*x - y)/err)**2).sum()
rank = 0.
s = 0.
else:
if b is not None:
A = np.reshape(x/err,(x.shape[0],1))
y1 = y-b
y1 /= err
sol, res, rank, s = scipy.linalg.lstsq(A, y1)
sol = [b,sol[0]]
elif m is not None:
A = np.resize(1/err,(x.shape[0],1))
y1 = y-m*x
y1 /= err
sol, res, rank, s = scipy.linalg.lstsq(A, y1)
sol = [sol[0],m]
else:
A = (np.vstack([np.ones(x.shape[0], dtype=float),x])/err).T
y1 = y/err
sol, res, rank, s = scipy.linalg.lstsq(A, y1)
if output:
b, m = sol
fit_y = b + m*x
print('b -> ' + str(b))
print('m -> ' + str(m))
print('Reduced chi-square: ' + str(res/(len(y)-rank)))
plotxy(y, x, line=None, symbol=2, color=2)
plotxy(fit_y, x)
if res.shape == (0,):
res = np.r_[0.]
if inline:
return np.hstack((sol, res, rank, s))
else:
return (sol, res, rank, s)
def Pprint(arr, show_index=False, max_lines=None):
arr = np.atleast_2d(arr)
if show_index:
cols = np.arange(arr.shape[1]).astype(str)
#rows = np.arange(arr.shape[0]).astype(str)
rows = np.array([r+' |' for r in np.arange(arr.shape[0]).astype(str)])
t = Table(data=arr, names=cols, copy=True)
t.add_column(Column(data=rows, name=' '), index=0)
else:
t = Table(data=arr, copy=True)
t.pprint(show_name=show_index, max_lines=max_lines)
def Sort_list(lst, cols):
"""Sort_list(lst, cols)
Sorts inplace a list by multiple columns.
lst: List to be sorted.
cols: Columns to be sorted, cols[0] first,
cols[1] second, etc.
>>> lst = [(1,2,4),(3,2,1),(2,2,2),(2,1,4),(2,4,1)]
>>> Sort_list(lst, [2,1])
"""
from operator import itemgetter
for keycolumn in reversed(cols):
lst.sort(key=itemgetter(keycolumn))
return
|
bretonr/Icarus
|
Icarus/Utils/Misc.py
|
Misc.py
|
py
| 3,104 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "astropy.table.Table",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "astropy.table.Column",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 93,
"usage_type": "call"
}
] |
20503848569
|
# Необходимо парсить страницу со свежими статьями (вот эту) и выбирать те статьи, в которых встречается хотя бы одно из ключевых слов (эти слова определяем в начале скрипта). Поиск вести по всей доступной preview-информации (это информация, доступная непосредственно с текущей страницы). Вывести в консоль список подходящих статей в формате: <дата> - <заголовок> - <ссылка>.
# определяем список ключевых слов
KEYWORDS = ['дизайн', 'фото', 'web', 'python']
import requests
from bs4 import BeautifulSoup
# from pprint import pprint
# import string
import re
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,sv;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': '_ym_uid=1661790138398573269; _ym_d=1661790138; habr_web_home_feed=/all/; hl=ru; fl=ru; _ym_isad=1; _ga=GA1.2.1864422457.1661790139; _gid=GA1.2.2059705457.1661790139; _gat_gtag_UA_726094_1=1',
'DNT': '1',
'Host': 'habr.com',
'Referer': 'https://yandex.ru/',
'sec-ch-ua': '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
}
url = 'https://habr.com'
responce = requests.get(url+'/ru/all', headers=headers)
text = responce.text
soup = BeautifulSoup(text, 'html.parser')
articles = soup.find_all(class_='tm-articles-list__item')
for article in articles:
preview = article.find(class_=['article-formatted-body article-formatted-body article-formatted-body_version-2', 'article-formatted-body article-formatted-body article-formatted-body_version-1']).text
# Вариант со сравнением множеств
# for p in string.punctuation:
# if p in preview:
# preview = preview.replace(p, '')
# preview = set(preview.split())
# if preview & set(KEYWORDS):
# data_1 = article.find(class_='tm-article-snippet__datetime-published')
# data_2 = data_1.find('time')
# data = data_2.attrs['title']
# print(f'Дата статьи: {data}')
# title = article.find(class_='tm-article-snippet__title-link').text.strip()
# print(f'Название статьи: {title}')
# link = article.find(class_='tm-article-snippet__title tm-article-snippet__title_h2')
# link = link.find('a')
# href = link.attrs['href']
# print(f'Ссылка на статью: {url + href}')
# print()
# Вариант с регуляркой
for i in KEYWORDS:
if re.search(i, preview):
data = article.find(class_='tm-article-snippet__datetime-published').find('time').attrs['title']
print(f'Дата: {data}')
title = article.find(class_='tm-article-snippet__title-link').text.strip()
print(f'Заголовок: {title}')
link = article.find(class_='tm-article-snippet__title tm-article-snippet__title_h2').find('a').attrs['href']
print(f'Ссылка: {url + link}')
print()
|
Dimasuz/HW_4.3
|
HW_4.3.py
|
HW_4.3.py
|
py
| 3,750 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 69,
"usage_type": "call"
}
] |
70097868029
|
import pygame as pg
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, screen, settings):
super(Ship, self).__init__()
self.screen = screen
self.settings = settings
self.sprite = pg.image.load('./assets/spaceship.png')
self.scale_factor = 10
self.sprite = pg.transform.scale(self.sprite, (self.sprite.get_width() // self.scale_factor , self.sprite.get_height() // self.scale_factor))
self.rect = self.sprite.get_rect()
self.screen_rect = self.screen.get_rect()
self.isMovingRight = False
self.isMovingLeft = False
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom - 5
def update(self):
if self.isMovingRight and (self.rect.right < self.screen_rect.right):
self.rect.centerx += self.settings.space_ship_speed
if self.isMovingLeft and (self.rect.left > self.screen_rect.left):
self.rect.centerx -= self.settings.space_ship_speed
def draw(self):
self.screen.blit(self.sprite, self.rect)
def center_ship(self):
self.rect.centerx = self.screen_rect.centerx
|
hoangdesu/Alien-Invasion-Pygame
|
ship.py
|
ship.py
|
py
| 1,239 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite.Sprite",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 12,
"usage_type": "attribute"
}
] |
33198762995
|
import ConfigParser
import io
import sys
import os
import numpy as np
from scipy.stats import cumfreq
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.basemap import Basemap
from matplotlib.backends.backend_pdf import PdfPages
import pickle
configFile = sys.argv[1]
def readConfigFile(configFileName):
global config
with open(configFileName) as f:
sample_config = f.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
return config
def stackedPlotHistogram(metric, catchmentSize, title, legendLoc = 2, ymax=3500):
plotData = []
lims = [0,10**4,25000,50000,10**5,25*10**4,25*10**10]
for lim in range(1,len(lims)):
sel1 = catchmentSize/10**6 < lims[lim]
sel2 = catchmentSize/10**6 > lims[lim-1]
sel = [x and y for x, y in zip(sel1, sel2)]
plotData.append(metric[sel])
ax1 = plt.hist(plotData, bins=np.arange(-1,1.01,0.1), width = 0.1, stacked=True, color=plt.get_cmap("Blues")(np.linspace(0, 1, 6)), label = ["$<10*10^3$","$<25*10^3$","$<50*10^3$","$<100*10^3$","$<250*10^3$","$\geq250*10^3$"], edgecolor = "none")
ax1 = plt.legend(prop={'size': 10}, title="Catchment size ($km^2$)", loc = legendLoc)
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("Frequency")
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(0, ymax)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotHistogram(metric, title):
ax1 = plt.hist(metric, bins=np.arange(-1,1.01,0.1))
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("Frequency")
ax1 = plt.xlim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotCDF(forecast, validation, title, xlims = [-1,1]):
forecast[forecast < -1.01] = -1.01
vals, x1, x2, x3 = cumfreq(forecast, len(forecast))
ax1 = plt.plot(np.linspace(np.min(forecast), np.max(forecast), len(forecast)), vals/len(forecast), label=str(config.get('Main options', 'RunName')))
validation[validation < -1.01] = -1.01
vals, x1, x2, x3 = cumfreq(validation, len(validation))
ax2 = plt.plot(np.linspace(np.min(validation), np.max(validation), len(validation)), vals/len(validation), label=str(config.get('Reference options', 'RunName')))
ax2 = plt.legend(prop={'size': 10}, loc=2)
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("ECDF")
ax1 = plt.xlim(xlims[0], xlims[1])
ax1 = plt.ylim(0, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotScatter(forecast, validation, title):
ax1 = plt.plot(validation, forecast, "ro", markersize=8)
ax1 = plt.plot([-100,100], [-100,100])
ax1 = plt.title(title)
ax1 = plt.xlabel(str(config.get('Reference options', 'RunName')))
ax1 = plt.ylabel(str(config.get('Main options', 'RunName')))
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotHexBin(forecast, validation, title):
forecast[forecast < -1.1] = -1.1
validation[validation < -1.1] = -1.1
ax1 = plt.hexbin(validation, forecast, gridsize=20, vmin=1, vmax=20, cmap="OrRd")
ax1 = plt.plot([-100,100], [-100,100])
ax1 = plt.title(title)
ax1 = plt.xlabel(str(config.get('Reference options', 'RunName')))
ax1 = plt.ylabel(str(config.get('Main options', 'RunName')))
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotWorldMap(data, lons, lats, title, vmin = -1., vmax = 1., s=5):
plt.figure(figsize=(8, 4))
m = Basemap(projection='mill',lon_0=0, llcrnrlon=-20., llcrnrlat=20.,
urcrnrlon=50., urcrnrlat=75.)
x,y = m(lons, lats)
m.drawcountries(zorder=0, color="white")
#m.drawcoastlines(zorder=0, color="black")
m.fillcontinents(color = 'black',zorder=-1)
m.scatter(x,y, c=data, cmap='RdBu', vmin=vmin, vmax=vmax, s=s, edgecolors='none')
m.colorbar()
plt.title(title)
plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
plt.figure(figsize=(8, 6))
config = readConfigFile(configFile)
runName = str(config.get('Main options', 'RunName'))
refName = str(config.get('Reference options', 'RunName'))
output, output2 = pickle.load(open('validationResultsPool_%s_%s.obj' %(runName, refName), 'rb') )
sel1 = (np.isnan(output[:,3]+output[:,2]+output[:,4]+output2[:,2]+output2[:,3]+output2[:,4]) == False)
sel2 = np.sum(output[:,3:], axis=1) != 0.0
sel3 = np.sum(output2[:,3:], axis=1) != 0.0
sel = [x and y and z for x, y, z in zip(sel1, sel2, sel3)]
sel5Min = sel
pdf = PdfPages(str(config.get('Output options', 'outputFile')))
matplotlib.rcParams.update({'font.size': 12})
plotWorldMap(output[sel5Min,3], output[sel5Min,0], output[sel5Min,1], 'Correlation with observations (%s)' %(str(config.get('Main options', 'RunName'))))
plotWorldMap(output2[sel,3], output2[sel,0], output2[sel,1], 'Correlation with observations (%s)' %(str(config.get('Reference options', 'RunName'))))
plotWorldMap(output[sel,3]-output2[sel,3], output[sel,0], output[sel,1], 'Correlation difference 5min - 30min', vmin=-0.5, vmax=0.5)
plotWorldMap(output[sel5Min,4], output[sel5Min,0], output[sel5Min,1], 'Anomaly Correlation (%s)' %(str(config.get('Main options', 'RunName'))))
plotWorldMap(output2[sel,4], output2[sel,0], output2[sel,1], 'Anomaly Correlation (%s)' %(str(config.get('Reference options', 'RunName'))))
plotWorldMap(output[sel,4]-output2[sel,4], output[sel,0], output[sel,1], 'Anomaly Correlation difference', vmin=-0.5, vmax=0.5)
plotWorldMap(output[sel5Min,4]-output[sel5Min,3], output[sel5Min,0], output[sel5Min,1], 'Anomaly Correlation - Correlation (%s)' %(str(config.get('Main options', 'RunName'))))
stackedPlotHistogram(output[sel5Min,3], output[sel5Min,2], "Correlation with observations (%s)" %(str(config.get('Main options', 'RunName'))), ymax=750)
stackedPlotHistogram(output2[sel,3], output2[sel,2], "Correlation with observations (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=750)
stackedPlotHistogram(output[sel5Min,4], output[sel5Min,2], "Anomaly Correlation with observations (%s)" %(str(config.get('Main options', 'RunName'))), ymax=750)
stackedPlotHistogram(output2[sel,4], output2[sel,2], "Anomaly Correlation with observations (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=750)
stackedPlotHistogram(output[sel5Min,5], output[sel5Min,2], "Kling-Gupta Efficiency (%s)" %(str(config.get('Main options', 'RunName'))), ymax=500)
stackedPlotHistogram(output2[sel,5], output2[sel,2], "Kling-Gupta Efficiency (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=500)
stackedPlotHistogram(output[sel5Min,4]-output[sel5Min,3], output[sel5Min,2], "AC - R (%s)" %(str(config.get('Main options', 'RunName'))), ymax=550)
plotCDF(output[sel,3], output2[sel,3], "R")
plotCDF(output[sel,4], output2[sel,4], "AC")
plotCDF(output[sel,5], output2[sel,5], "KGE")
plotHexBin(output[sel,3], output2[sel,3], "R")
plotHexBin(output[sel,4], output2[sel,4], "AC")
plotHexBin(output[sel,5], output2[sel,5], "KGE")
pdf.close()
|
edwinkost/PCR-GLOBWB_validation
|
niko_validation_scripts/standAlone/plotValidation.py
|
plotValidation.py
|
py
| 7,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ConfigParser.RawConfigParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "scipy.stats.cumfreq",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "scipy.stats.cumfreq",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hexbin",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.basemap.Basemap",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_pdf.PdfPages",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams.update",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 132,
"usage_type": "attribute"
}
] |
30569513843
|
from flask import Flask, render_template, flash, redirect, url_for, session, logging, request
from wtforms import Form, StringField, validators
import Project
import re
app = Flask(__name__)
@app.route("/search")
def search():
return render_template('search.html')
class WordPredictionForm(Form):
word = StringField('', [validators.Length(min=1, max=1000)])
# PROJECT NLP
@app.route('/', methods=['GET', 'POST'])
def index():
form = WordPredictionForm(request.form)
if request.method == 'POST' and form.validate():
word = form.word.data
print(word)
#Predict the Model
project = Project
word = re.sub(r'([^\s\w]|_)+', '', word)
seq = word[:40].lower()
# print(seq)
list = project.predict_completions(seq, 5)
chosen = list[0]
print(list)
flash("loading...")
# redirect(url_for('index', list=list))
return render_template('index.html', form=form, list=list, seq=seq, chosen=chosen, scroll='result')
return render_template('index.html', form=form)
if __name__ == "__main__":
app.secret_key = "secret123"
app.run(debug=True)
|
jmgang/wordpredictor
|
app.py
|
app.py
|
py
| 1,218 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "wtforms.Form",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Length",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 43,
"usage_type": "call"
}
] |
32742347893
|
import requests,time
from bs4 import BeautifulSoup
import p_mysql,json
class jxy_all():
def xunhuan(self,gol_cookies):
wrong = 0
first_run = 0
jishu = 0
toufayu = False
multiple = [1, 3, 7, 15, 31, 63, 127, 34, 55, 89, 144, 1, 1]
maxwrong = 6
global moni
firstflag_vote = ''
current_period = ''
vote_retime = 0
endf = 1
wrongflag = False
vote_list = []
self.header = {"Accept": "text/html, application/xhtml+xml, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN",
"Connection": "Keep-Alive",
"Host": "www.juxiangyou.com",
"Referer": "http://www.juxiangyou.com/",
"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64;Trident/5.0)"}
post_head = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-cn",
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "www.juxiangyou.com",
"Referer": "http://www.juxiangyou.com/fun/play/crazy28/index",
"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"X-Requested-With": "XMLHttpRequest"}
self.url = 'http://www.juxiangyou.com/fun/play/crazy28/index'
yinshu = 1
list_v = []
czlst = []
c_time = time.strftime('%m-%d %H:%M', time.localtime(time.time()))
try:
req = requests.get(self.url, cookies=gol_cookies, headers=self.header)
soup = BeautifulSoup(req.text, 'lxml')
# 查询当前投注信息
vote_info = soup.find('p', attrs={'class': 'time-static1'})
# 第一步 找到当前期 这里必然找出当前期,目的是为了投注。
if vote_info != None:
if (vote_info.text).find('正在开奖') > 0:
print('正在开奖,等待5秒')
time.sleep(5)
else:
# 如果没有开奖,则查询当前投注期
try:
vote_current = vote_info.find_all('span')
# 结束标识,查询
end_flag = (vote_info.text).find('截止投注')
if end_flag > 0:
# 即使投注了,当前期也需要展示出来,为投注判断
print(vote_current[0].string + '期已经截止投注')
current_period = vote_current[0].string
else:
print('当前期' + vote_current[0].string + '剩余' + vote_current[1].string + '秒投注')
vote_retime = int(vote_current[1].string)
current_period = vote_current[0].string
except Exception as e:
print('搜索资料出错,列表错误')
print('traceback.format_exc():%s' % traceback.format_exc())
if current_period != '':
# 添加保存第一次金币部分
try:
current_jinbi = (soup.find('span', attrs={'class': 'J_udou'}).string).replace(',', '')
except Exception as e:
print(repr(e))
if firstflag_vote == '':
firstflag_vote = current_period
firstflag_jinbi = current_jinbi
config = configparser.ConfigParser()
config.read("Config_jxyfk28.ini")
config_title = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
try:
config.add_section(config_title)
config.set(config_title, "starttime:", config_title)
config.set(config_title, "firstvote:", firstflag_vote)
config.set(config_title, "firstjinbi", firstflag_jinbi)
config.write(open("Config_jxyfk28.ini", "w"))
tempa = config.sections()
newa = []
findtime = time.strftime('%Y-%m-%d', time.localtime(time.time()))
# print(findtime)
for x in tempa:
# print(x.find(findtime))
if x.find(findtime) >= 0:
newa.append(x)
todayfirstjinbi = int(config.get(newa[0], 'firstjinbi'))
except configparser.DuplicateSectionError:
print("Section already exists")
# 循环采集部分
mydb = p_mysql.MySQL()
# 查询数据库最后一期,然后显示出来
sql_text = "select period from jx_fk28 ORDER BY period DESC limit 1"
sql_re = mydb.query(sql_text)
if len(sql_re) <= 0:
endf = 44
else:
endf = int((int(current_period) - int(sql_re[0][0])) / 25) + 1
if endf >= 44:
endf = 44
self.up_dt_info.emit("需采集" + str(endf) + "页数")
w = 1
while w <= endf:
self.up_dt_info.emit("开始采集,第" + str(w) + "页---")
try:
base_time = int(time.time()) * 1000
x_sign = baseN(base_time, 36)
# 为header字典添加一个X-sign标识,毫秒级时间戳36进制
post_head['X-Sign'] = x_sign
# 服务器接受str格式,把字典格式json格式转化
a = json.dumps(
{"c": "quiz", "fun": "getEachList", "items": "crazy28", "pageSize": 23, "pageIndex": w})
b = json.dumps({"items": "crazy28"})
# 毫秒级时间戳,同时作为postdatspeed16a数据发现服务器
pst_data = {'jxy_parameter': a, 'timestamp': base_time, 'params': b,
'xtpl': 'fun/private/jc-index-tbl'}
url = 'http://www.juxiangyou.com/fun/play/interaction'
# Post数据服务器,cookies使用登录页面与验证码 合并cookies提交
req_one = requests.post(url, data=pst_data, cookies=gol_cookies, headers=post_head,
allow_redirects=False)
vote_data = json.loads(req_one.text)
if vote_data['code'] == 10000:
for x in vote_data['itemList']:
period = x['num']
vote_time = x['date']
jcjg = x['jcjg2']
state = x['state']
if state == 1:
sql = "insert into jx_fk28 values ('" + period + "','" + vote_time + "','" + str(
jcjg) + "')"
mydb.query(sql)
w = w + 1
except Exception as e:
self.up_dt_info.emit("采集过程中,页面信息问题,重新采集该页")
print("错误:%s" % traceback.format_exc())
w = w - 1
if w <= 0:
w = 1
self.up_dt_info.emit("采集完成")
self.up_table_info.emit(req.text)
# if moni == 1 and first_run == 0:
# wrong = firstwrong
# print('当我更新wrong时,我的值还是',firstwrong)
if first_run == 0:
self.up_dt_info.emit('先搜索最近的一次错6')
remax = self.remaxwrong()
if int(current_period) - int(remax) <= 30:
moni = 0
first_run = 1
self.up_statusinfo.emit(
'第一次查询错六为: ' + str(remax) + " ,间隔期 : " + str(int(current_period) - int(remax)))
self.up_dt_info.emit('搜索结束')
# 每一次,必须采集完成后,才开始从数据库中拿数据判断
if vote_list: # 如果不为空,说明上一次投注了,判断是否正确。
try:
vote_period = str(vote_list[-1]).strip()
sql = "select * from jx_fk28 where period='" + vote_period + "' limit 1"
redata = mydb.query(sql)
last_vote = redata[0][2]
# print('返回列表', vote_list, '查找返回投注期的结果', last_vote[0])
self.up_dt_info.emit('上期投注列表' + str(vote_list))
if int(last_vote) in vote_list:
print('投注正确,倍率清空')
self.up_lastinfo.emit((vote_period, '', '', last_vote, '正确', ''))
wrong = 0
if wrongflag == True and moni == 1:
wrongflag = False
toufayu = True
jishu = 0
moni = 0
else:
self.up_lastinfo.emit((vote_period, '', '', last_vote, '错误', ''))
if int(last_vote) > 0:
# print('投注错误,次数加 1 ,错误次数:', wrong)
wrong = wrong + 1
if wrong >= maxwrong:
wrongflag = True
moni = 1
except Exception as e:
self.up_dt_info.emit("查询已投注的结果错误:%s" % traceback.format_exc())
# ---------------------------------------------------
s1 = int(current_period) - 1
s2 = str(int(current_period) - 2)
s3 = str(int(current_period) - 3)
s4 = str(int(current_period) - 4)
# sql = "select * from jx_fk28 where period='" + s1 + "' or period='" + s2 + "' or period='" + s3 + "' or period='" + s4 + "' order by period DESC"
sql = "select * from jx_fk28 where period <= %s order by period DESC LIMIT 20" % (s1)
# print(sql)
redata_1 = mydb.query(sql)
# print(redata_1)
last_1 = redata_1[0][2]
last_2 = redata_1[1][2]
last_3 = redata_1[2][2]
last_4 = redata_1[3][2]
print(last_1, last_2, last_3, last_4)
for x in redata_1:
czlst.append(int(x[2]))
print(czlst)
if vote_retime > 9:
if moni == 0:
if jishu >= 6 and wrong == 0:
toufayu = False
if toufayu == True:
yinshu = 20
jishu = jishu + 1
if jishu >= 250 and wrong <= 2:
moni = 1
jishu = 0
# print('lezhuan,最大错:', maxwrong, '当前错误', wrong, "金币:", '倍数', yinshu, '模拟', moni, '投注次数', jishu,
# '错标', wrongflag, '偷发育', toufayu)
# list_v = daxiao_1(last_1, last_2, last_3, last_4, multiple[wrong], yinshu)
list_v = daxiao_2(last_1, last_2, last_3, last_4, multiple[wrong], yinshu, czlst)
if list_v:
vote_list = vote_thing(current_period, list_v)
if int(vote_list[0]) < 10:
dd = '小'
else:
dd = '大'
self.up_curinfo.emit((current_period, multiple[wrong] * yinshu * 500, jishu, wrong,
int(current_jinbi) - todayfirstjinbi, moni, dd))
else:
vote_list = []
self.up_curinfo.emit((current_period, '', '', '', '', moni, ''))
del mydb
dealy_time = vote_retime + 28
self.up_dt_info.emit('延时%s刷新' % dealy_time)
for m in range(dealy_time, -1, -1):
self.up_lcd_num.emit(m)
time.sleep(1)
else:
self.up_dt_info.emit("当前期都没找到,继续延时30秒查找")
time.sleep(5)
except Exception as e:
print('traceback.format_exc():%s' % traceback.format_exc())
self.up_dt_info.emit("访问网站出错,等待10秒,重新访问" + repr(e))
time.sleep(5)
|
ssolsu/newproject
|
server_jxy.py
|
server_jxy.py
|
py
| 13,611 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.strftime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "p_mysql.MySQL",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 244,
"usage_type": "call"
}
] |
18015910724
|
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Import PyWavelets library
import pywt
import pywt.data
# Load an example image
path = os.path.dirname(__file__)
image_path = "image.jpg"
original_image = cv2.imread(os.path.join(path, image_path), cv2.IMREAD_GRAYSCALE)
# Perform 2D wavelet transform (MRA) on the original image
''' The output is a tuple with 4 elements: LL, (LH, HL, HH)
LL = Approximation, LH = Horizontal detail, HL = Vertical detail, HH = Diagonal detail
"haar" is the name of the wavelet used '''
coeffs2 = pywt.dwt2(original_image, 'haar')
LL, (LH, HL, HH) = coeffs2
# Define meta information (for example, a watermark)
'''Random meta-information is generated using NumPy's
np.random.randint function. The meta_info variable
contains random integer values between 0 and 127.
The goal is to embed this meta-information into the
approximation component (LL) of the wavelet-transformed image.'''
meta_info = np.random.randint(0, 128, size=LL.shape) # Ensure meta_info has the same dimensions as LL
# Resize meta_info to match the shape of LL
meta_info_resized = cv2.resize(meta_info, (LL.shape[1], LL.shape[0]))
# Exchange the LL (approximation) coefficients with meta information
LL_with_meta_info = LL + meta_info_resized
# Reconstruct the image using the modified coefficients
'''The modified coefficients, including LL_with_meta_info,
LH, HL, and HH, are used to reconstruct the modified image
using the inverse wavelet transform with the 'haar' wavelet.
The reconstructed image is stored in the modified_image variable.'''
modified_image = pywt.idwt2((LL_with_meta_info, (LH, HL, HH)), 'haar')
# Plot the original and modified images
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.imshow(original_image, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(modified_image, cmap='gray')
plt.title('Modified Image with Meta Information')
plt.axis('off')
plt.tight_layout()
plt.show()
|
kio7/smart_tech
|
Submission 2/Task_4/wavelet_transform.py
|
wavelet_transform.py
|
py
| 1,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pywt.dwt2",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pywt.idwt2",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
}
] |
17661433287
|
from itertools import islice
from collections import defaultdict
def distance(point):
return abs(point[0]) + abs(point[1])
def neighbours(point):
x, y = point
return ((x+1, y), (x-1, y), (x, y+1), (x, y-1),
(x+1, y+1), (x-1, y-1), (x+1, y-1), (x-1, y+1))
def spiral_seq():
yield 0, 0
x, y = 1, 0
inc_x, inc_y = 0, 1
while True:
yield x, y
if abs(x) == abs(y):
if x <= 0 and y <= 0:
inc_x, inc_y = 1, 0
elif x > 0 and y <= 0:
x += 1
y -= 1
inc_x, inc_y = 0, 1
elif x <= 0 and y > 0:
inc_x, inc_y = 0, -1
else:
inc_x, inc_y = -1, 0
x += inc_x
y += inc_y
def sequential_spiral(nth):
return next(islice(spiral_seq(), nth - 1, nth))
def neighbour_spiral(limit):
matrix = defaultdict(int)
matrix[(0, 0)] = 1
for point in islice(spiral_seq(), 1, None):
value = sum(matrix[neighbour] for neighbour in neighbours(point))
if value > limit:
return value
else:
matrix[point] = value
print(distance(sequential_spiral(368078)))
print(neighbour_spiral(368078))
|
pdhborges/advent-of-code
|
2017/3.py
|
3.py
|
py
| 1,231 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.islice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 38,
"usage_type": "call"
}
] |
7354238248
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from kouzi_crawler.items import KouziCrawlerItem
class QzkeySpider(CrawlSpider):
name = 'qzkey'
allowed_domains = ['qzkey.com']
start_urls = ['http://mimi1688.aly611.qzkey.com/']
rules = (
Rule(LinkExtractor(allow=r'Product.aspx\?typeid=\d+'), callback='parse_item', follow=True),
)
def parse_item(self, response):
app_list = response.xpath('//dl[@class="cpDl2"]/dd/ul//li')
kouzi_name = '有鱼汇'
kouzi_link = response.url
kouzi_type = 'web'
for item in app_list:
app_item = KouziCrawlerItem()
app_item['app_name'] = item.xpath('./a//dd//h3/text()').extract_first().strip()
app_item['app_link'] = item.xpath('./a/@href').extract_first()
app_item['kouzi_type'] = kouzi_type
app_item['kouzi_name'] = kouzi_name
app_item['kouzi_link'] = kouzi_link
yield app_item
|
largerbigsuper/kouzi_crawler
|
kouzi_crawler/spiders/qzkey.py
|
qzkey.py
|
py
| 1,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "kouzi_crawler.items.KouziCrawlerItem",
"line_number": 22,
"usage_type": "call"
}
] |
28857307321
|
import torch
import numpy as np
from six import string_types
from torch import optim
import inspect
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torch.nn.functional as F
from tqdm import tqdm
import copy
def get_function_args( fn ):
"""returns a list of all argumnts, dict of all the defualts , and list of all non default arguments
Args:
fn (function): [description]
Returns:
[type]: [description]
"""
args = inspect.getargspec( fn ).args
if inspect.getargspec( fn ).defaults is None:
n_defaults = 0
def_args = []
else:
n_defaults = len(inspect.getargspec( fn ).defaults )
def_args = list(inspect.getargspec( fn ).defaults )
if n_defaults > 0:
default_args = args[ -1*n_defaults : ]
else:
default_args = []
defaults = { a[0]:a[1] for a in zip(default_args , def_args ) }
non_defaults = args[: len( args) - n_defaults ]
return args , defaults , non_defaults
# given a dictionary kwargs .. this will return which all of those can be sent to the function fn_name
def filter_functions_kwargs(fn_name , kwargs ):
fn_args = inspect.getargspec( fn_name ).args
ret = {}
for k in kwargs:
if k in fn_args:
ret[ k ] = kwargs[k]
return ret
def str_to_auto_type(var):
#first test bools
if var == 'True' or var=='true':
return True
elif var == 'False' or var=='false':
return False
else:
#int
try:
return int(var)
except ValueError:
pass
#float
try:
return float(var)
except ValueError:
pass
# homogenus list
# todo
#string
try:
return str(var)
except ValueError:
raise NameError('Something Messed Up Autocasting var %s (%s)'
% (var, type(var)))
# returns a dictionarly of named args from cli!!
def get_cli_opts(argv):
opts = {} # Empty dictionary to store key-value pairs.
argv= copy.deepcopy(argv)
while argv: # While there are arguments left to parse...
if argv[0][0] == '-' and argv[0][1] == '-': # Found a "--name value" pair.
argv[0] = argv[0][2:] # remove '--'
assert argv[0] != '' , "There is some issue with the cli args becasue a key cannot be empty"
assert not argv[0] in opts , "Repeated argument: "+argv[0]
opts[argv[0]] = str_to_auto_type( argv[1] ) # Add key and value to the dictionary.
argv = argv[1:] # Reduce the argument list by copying it starting from index 1.
return opts
def get_vars( data , cuda=False , numpy=False ):
# list( map( lambda x :Variable(torch.FloatTensor(x.float() )).cuda() , imgs ))
if type( data ) is tuple:
return tuple([ get_vars(d , cuda=cuda , numpy=numpy) for d in data ])
elif type( data ) is list:
return list([ get_vars(d , cuda=cuda , numpy=numpy) for d in data ])
elif type( data ) is dict:
return { k:get_vars(data[k] , cuda=cuda , numpy=numpy) for k in data }
else:
if numpy:
data = torch.from_numpy(data)
r = Variable( data )
if cuda:
r = r.cuda()
return r
def get_np_arrs( data ):
if type( data ) is tuple:
return tuple([ get_np_arrs(d ) for d in data ])
elif type( data ) is list:
return list([ get_np_arrs(d ) for d in data ])
elif type( data ) is dict:
return { k:get_np_arrs(data[k] ) for k in data }
else:
return data.cpu().detach().numpy()
class ProgressBar(tqdm):
def __init__( self , iterator ):
super(ProgressBar, self).__init__(iterator)
self.vals_history_dict = {}
def add( self , vals_dict ):
for k in vals_dict:
if not k in self.vals_history_dict:
self.vals_history_dict[k] = []
self.vals_history_dict[k].append( vals_dict[k])
self.bar_str = ""
for k in self.vals_history_dict:
self.bar_str += k+":"+ "%.3f"%(np.mean(self.vals_history_dict[k])) + " "
self.set_description(self.bar_str )
|
divamgupta/pytorch-propane
|
pytorch_propane/utils.py
|
utils.py
|
py
| 4,467 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "inspect.getargspec",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 153,
"usage_type": "call"
}
] |
74280993467
|
import os
import sys
import threading
import asyncio
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import discord
client = None
channel = None
ready = False
def init():
global client
global channel
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
# discord.utils.get(channels.guild.channels, name="")
@client.event
async def on_ready():
global ready
ready = True
print(f"We have logged in as {client.user}")
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send('Hello!')
def start(token):
threading.Thread(target=client.run, args=(token,)).start()
def send_message(channel_id, text, files=[]):
channel = client.get_channel(channel_id)
if channel == None:
print("no such channel")
return
client.loop.create_task(channel.send(text, files=[discord.File(p) for p in files]))
def stop():
client.loop.create_task(client.close())
|
mojyack/rpi-cat-monitor
|
remote.py
|
remote.py
|
py
| 1,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "discord.Client",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "discord.File",
"line_number": 46,
"usage_type": "call"
}
] |
21071659263
|
from enum import Enum
import ffmpeg
import numpy as np
import pandas as pd
import torch
from data_processing.custom_segmentation import CustomSegmentationStrategy
from data_processing.simple_segmentation import SimpleSegmentation
from data_processing.voice_activity_detection import VADSilero
class Method(Enum):
CUSTOM = "CUSTOM"
SILERO = "SILERO"
SIMPLE = "SIMPLE"
class AudioConvert:
def __init__(self, method: Method = Method.CUSTOM, use_gpu: bool = False):
self.method = method
if method == method.SILERO:
self.custom_speaker_activity_detection = VADSilero(use_gpu=use_gpu)
self.custom_segmentation = None
self.simple_segmentation = None
elif method == method.CUSTOM:
self.custom_segmentation = CustomSegmentationStrategy()
self.custom_speaker_activity_detection = None
self.simple_segmentation = None
elif method == method.SIMPLE:
self.custom_segmentation = None
self.custom_speaker_activity_detection = None
self.simple_segmentation = SimpleSegmentation()
@staticmethod
def read_file_to_np(audiofile_path: str):
out, err = (
ffmpeg
.input(audiofile_path)
.output('pipe:', format="wav", acodec="pcm_s16le", ar=16000, ac=1)
.run(capture_stdout=True)
)
numpy_array = np.frombuffer(out, dtype=np.int16)
return numpy_array
def convert_file_to_segments(self, audiofile_path: str):
audio = self.read_file_to_np(audiofile_path)
audio_tensor = torch.Tensor(audio)
if self.method == Method.CUSTOM:
vad_matrix = self.custom_speaker_activity_detection.get_VAD_matrix(audio_tensor)
self.custom_segmentation.plot_VAD(vad_matrix)
segments = self.custom_segmentation.segment(vad_matrix.numpy())
audio_segments = self.custom_speaker_activity_detection.audio_to_segments_from_stamps(audio, segments)
elif self.method == Method.SILERO:
timestamps = self.custom_speaker_activity_detection._get_speech_ts_adaptive(audio_tensor)
audio_segments = self.custom_speaker_activity_detection.audio_to_segments(audio, timestamps)
elif self.method == Method.SIMPLE:
audio_segments = self.simple_segmentation.segment(audio_tensor)
else:
raise RuntimeError()
return audio_segments
if __name__ == '__main__':
method = Method.SILERO
converter = AudioConvert(method=method, use_gpu=False)
audio_files = [
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/foelg_pengende/Foelg-pengene--Hvem-sk_5e5eee8c464747fdaab37a30a626df9b_192.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/24_spørgsmål_til_professoren/Historier_fra_de_varme_lande.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/danske_statsministre/Bang_Andr_f_rdigproduceret_med_intro_og_outro_online-audio-converter_com_.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/den_agile_podcast/Podcast#3 - Agile kontra vandfald.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/supertanker/Supertanker--USA-paa-r_2c271306def14480840af87150e5d636_192.mp3",
"/home/rafje/Downloads/Foelg-pengene--Apple--_823566a09c664d17aad77862d288473a_192.mp3"
]
audio_lenghts = []
for audio_file in audio_files:
lengths = map(lambda x: len(x[2]) / 16000, converter.convert_file_to_segments(audio_file))
audio_lenghts.append(lengths)
import matplotlib.pyplot as plt
all_lengths = []
lower_seconds = 4
upper_seconds = 15
under_seconds = []
between = []
over_seconds = []
for i in range(len(audio_lenghts)):
current_lengths = list(audio_lenghts[i])
all_lengths += current_lengths
df = pd.DataFrame(current_lengths, columns=['one'])
ax = df.plot.hist(bins=20, alpha=0.5)
plt.show()
for audio_length in current_lengths:
if audio_length < lower_seconds:
under_seconds.append(audio_length)
if audio_length > upper_seconds:
over_seconds.append(audio_length)
else:
between.append(audio_length)
df = pd.DataFrame(all_lengths, columns=['Audio lengths'])
ax = df.plot.hist(bins=20, alpha=0.5)
plt.show()
print(f"Length under: {len(under_seconds)}")
print(f"Length over: {len(over_seconds)}")
print(f"Length between: {len(between)}")
print(f"total length: {len(under_seconds) + len(over_seconds) + len(between)}")
print(f"Length under seconds: {sum(under_seconds)}")
print(f"Length over seconds: {sum(over_seconds)}")
print(f"Length between seconds: {sum(between)}")
print(f"total length seconds: {sum(under_seconds) + sum(over_seconds) + sum(between)}")
|
centre-for-humanities-computing/Gjallarhorn
|
data_processing/convert_audiofile_to_segments.py
|
convert_audiofile_to_segments.py
|
py
| 4,941 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "enum.Enum",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "data_processing.voice_activity_detection.VADSilero",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "data_processing.custom_segmentation.CustomSegmentationStrategy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "data_processing.simple_segmentation.SimpleSegmentation",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ffmpeg.input",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
}
] |
18602034777
|
from django import forms
from bankapp.models import Person, City
GENDER_CHOICES = [
('Male', 'Male'),
('Female', 'Female')
]
MATERIALS_PROVIDE_CHOICE = [
('Debit Card', 'Debit Card'),
('Credit Card', 'Credit Card'),
('Check Book', 'Check Book'),
]
class PersonCreationForm(forms.ModelForm):
gender = forms.ChoiceField(choices=GENDER_CHOICES, widget=forms.RadioSelect)
materials = forms.MultipleChoiceField(label='Materials Provide', choices=MATERIALS_PROVIDE_CHOICE,
widget=forms.CheckboxSelectMultiple)
class Meta:
model = Person
fields = '__all__'
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Name'}),
'email': forms.EmailInput(attrs={'class': 'form-control','placeholder':'Enter Your Email-ID'}),
'address': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Address'}),
'age': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Age'}),
'dob': forms.DateInput(attrs={'class': 'form-control','type':'date'}),
'account': forms.Select(attrs={'class': 'form-control'}),
'district': forms.Select(attrs={'class': 'form-control'}),
'city': forms.Select(attrs={'class': 'form-control'}),
'mob': forms.NumberInput(attrs={'class': 'form-control','placeholder':'Enter Your Mobile Number'}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].queryset = City.objects.none()
if 'district' in self.data:
try:
district_id = int(self.data.get('district'))
self.fields['city'].queryset = City.objects.filter(district_id=district_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['city'].queryset = self.instance.district.city_set.order_by('name')
|
Manjith123/Easybankproject
|
bankapp/forms.py
|
forms.py
|
py
| 2,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.forms.RadioSelect",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.forms.MultipleChoiceField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.CheckboxSelectMultiple",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "bankapp.models.Person",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailInput",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.forms.DateInput",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.forms.Select",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.forms.Select",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.forms.Select",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.forms.NumberInput",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "bankapp.models.City.objects.none",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bankapp.models.City.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "bankapp.models.City",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "bankapp.models.City.objects.filter",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "bankapp.models.City.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "bankapp.models.City",
"line_number": 42,
"usage_type": "name"
}
] |
10719490049
|
import sys
import pathlib
import generator_func
import generator_logging
from datetime import datetime
from PyQt6.QtCore import QRunnable, QThreadPool, QDateTime, QSettings
from PyQt6.QtWidgets import (QApplication,
QDateTimeEdit,
QLabel,
QMainWindow,
QPushButton,
QWidget,
QFileDialog,
QGridLayout,
QLineEdit,
QComboBox,
QProgressBar,
QStatusBar,
QSpinBox,
QTableWidget,
QTableWidgetItem,
QMessageBox)
from PyQt6.QtGui import QIcon
MAXIMUM_IK_QUANTITY = 9999
class Worker(QRunnable): # класс для мультипоточности???
def run(self): # мой код
date_1 = win.date_1
ed_date = date_1.toString('yyyy-MM-dd')
req_date_time = date_1.toString('yyyy-MM-ddThh:mm:ssZ')
path_for_ik = win.directory_path.currentText() # в качестве пути для ИК берётся значение, указанное в ComboBox
win.progressbar.setMaximum(win.ik_quantity.value())
win.btn_create_IK.setEnabled(False)
start = datetime.now()
# aaa = generator_func.check_dir_emptiness(path_for_ik) # проверка каталога сохранения ИК на наличие файлов
for i in range(win.ik_quantity.value()):
generator_func.create_ik(path_for_ik, ed_date, req_date_time)
win.progressbar.setValue(i + 1)
win.status_bar.showMessage(f'Создано конвертов: {i + 1}')
end = datetime.now()
win.status_bar.showMessage(f'Создано конвертов: {win.ik_quantity.value()}. Затраченное время: {end - start}')
generator_logging.log_event(f'Создано конвертов: {win.ik_quantity.value()}. Каталог: {path_for_ik}. '
f'Затраченное время: {end - start}')
win.btn_create_IK.setEnabled(True)
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
# Добавляем файл с настройками
self.settings = QSettings('settings.ini', QSettings.Format.IniFormat)
self.path_history = set()
self.date_1 = ''
self.setWindowTitle("Генератор ИК") # заголовок главного окна
self.setMinimumSize(500, 150) # минимальные размеры главного окна
self.get_directory_path = QPushButton('Выбрать каталог', self)
self.get_directory_path.setFixedWidth(150) # установка ширины кнопки
# Определяем элементы интерфейса
self.btn_create_IK = QPushButton('Создать конверты', self)
self.ik_quantity_label = QLabel()
self.calendar_label = QLabel()
self.line_edit_for_combo = QLineEdit()
self.directory_path = QComboBox()
self.directory_path.setLineEdit(self.line_edit_for_combo)
self.ik_quantity = QSpinBox()
self.calendar = QDateTimeEdit()
self.progressbar = QProgressBar()
self.status_bar = QStatusBar()
self.start_date = QDateTime.currentDateTime()
self.calendar.setDisplayFormat('dd.MM.yyyy')
self.ik_quantity.setMaximum(MAXIMUM_IK_QUANTITY)
self.setMaximumWidth(1800)
self.get_directory_path.clicked.connect(self.get_directory)
self.btn_create_IK.clicked.connect(self.create_ik_func)
self.ik_quantity.textChanged.connect(self.ik_quantity_signal)
self.calendar.dateTimeChanged.connect(self.calendar_changed)
self.calendar.setCalendarPopup(True)
self.calendar.setDateTime(self.start_date)
self.date_1 = self.calendar.dateTime()
self.table = QTableWidget()
self.table_widget_item = QTableWidgetItem()
# размещение элементов
grid_layout = QGridLayout()
grid_layout.addWidget(self.get_directory_path, 0, 0)
grid_layout.addWidget(self.directory_path, 0, 1)
grid_layout.addWidget(self.ik_quantity_label, 1, 0)
grid_layout.addWidget(self.ik_quantity, 1, 1)
grid_layout.addWidget(self.calendar_label, 2, 0)
grid_layout.addWidget(self.calendar, 2, 1)
grid_layout.addWidget(self.btn_create_IK, 3, 0, 1, 2)
# grid_layout.addWidget(self.progressbar, 5, 0, 1, 2)
grid_layout.addWidget(self.status_bar, 4, 0, 1, 2)
widget = QWidget()
widget.setLayout(grid_layout)
self.setCentralWidget(widget)
self.ik_quantity_label.setText('Количество конвертов')
self.calendar_label.setText('Дата ИК')
self.btn_create_IK.setEnabled(False)
# создание всплывающих подсказок для элементов интерфейса
self.get_directory_path.setToolTip('Выберите каталог для сохранения ИК')
self.directory_path.setToolTip('Можно вставить путь или выбрать с помощью кнопки')
self.ik_quantity.setToolTip('Количество создаваемых конвертов')
self.btn_create_IK.setToolTip('Введите количество создаваемых конвертов')
self.calendar.setToolTip('Дата интеграционного конверта, дата заявки, дата выдачи кредита')
self.status_bar.showMessage('')
# self.table.cellClicked(0,0)
# Что-то про многопоточность
self.threadpool = QThreadPool()
self.ik_quantity_label = ''
self.iteration_count = ''
# определение переменных для пути к каталогам и файлам
self.start_path = pathlib.Path.cwd()
self.envelope_path = self.start_path.joinpath('sample/envelope.xml')
self.routeinfo_path = self.start_path.joinpath('sample/RouteInfo.xml')
self.ed421_path = self.start_path.joinpath('sample/ED421.xml')
self.line_edit_for_combo.setText(str(self.start_path))
self.path_for_ik = self.start_path
self.path_for_ik_str = str(self.path_for_ik)
# подгонка ширины под длину пути к каталогу
self.setMinimumWidth(int(len(str(self.start_path)) * 8.5))
# импорт сохраненных настроек
if self.settings.value('OD'):
self.calendar.setDateTime(self.settings.value('OD'))
else:
self.date_1 = self.calendar.date()
if self.settings.value('Path'):
self.directory_path.addItems(self.settings.value('Path'))
self.path_history = self.settings.value('Path')
else:
self.path_history = set()
def get_directory(self):
"""
Вызов диалогового окна для выбора каталога сохранения создаваемых конвертов
:return:
"""
self.path_for_ik = QFileDialog.getExistingDirectory(self, caption='Выбрать каталог сохранения',
directory=str(pathlib.Path.cwd()))
self.path_for_ik_str = str(self.path_for_ik)
self.line_edit_for_combo.setText(self.path_for_ik_str)
self.setMinimumWidth(len(self.path_for_ik_str * 10))
def create_ik_func(self):
"""
Создание конвертов
:return:
"""
worker = Worker() # делаем переменную на созданный класс FirstThread
self.threadpool.start(worker) # обращаемся к созданному классу FirstThread
# добавление пути для ИК в выпадающий список
if self.path_for_ik_str in self.path_history:
pass
elif self.path_for_ik_str not in self.path_history:
self.path_history.add(self.path_for_ik_str)
self.directory_path.addItem(self.path_for_ik_str)
def ik_quantity_signal(self, value):
"""
Определяет заполнено поле с количеством конвертов или нет и блокирует кнопку создания ИК
:param value:
:return:
"""
if self.ik_quantity.value() == 0:
self.btn_create_IK.setEnabled(False)
self.btn_create_IK.setToolTip('Введите количество создаваемых конвертов')
else:
self.btn_create_IK.setEnabled(True)
self.btn_create_IK.setToolTip('Создать конверты')
def calendar_changed(self):
self.date_1 = self.calendar.dateTime()
def closeEvent(self, event): # переопределение события закрытия окна
self.settings.setValue('Path', self.path_history) # Сохранить переменную с историей в файле с настройками
self.settings.setValue('OD', self.date_1) # Сохранить переменную с датой в файле с настройками
if __name__ == '__main__':
app = QApplication(sys.argv)
style = """
QMainWindow {
/*background-color: #fff;*/
}
QProgressBar {
border: 1px solid grey;
border-radius: 5px;
text-align: center;
}
QProgressBar::chunk {
background-color: #05B8CC;
width: 10px;
/*margin: 0.5px;*/
}
"""
app.setStyleSheet(style)
win = Window()
app.setWindowIcon(QIcon(str(win.start_path.joinpath('other/hedgehog_deep_red.png'))))
win.show()
sys.exit(app.exec())
|
Steelglowhawk/updateTool
|
generator_gui.py
|
generator_gui.py
|
py
| 10,348 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "PyQt6.QtCore.QRunnable",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "generator_func.create_ik",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "generator_logging.log_event",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QMainWindow",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtCore.QSettings",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtCore.QSettings.Format",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtWidgets.QPushButton",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QPushButton",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QLabel",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QLabel",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QLineEdit",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QComboBox",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QSpinBox",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QDateTimeEdit",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QProgressBar",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QStatusBar",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtCore.QDateTime.currentDateTime",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtCore.QDateTime",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtWidgets.QTableWidget",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QTableWidgetItem",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QGridLayout",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QWidget",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtCore.QThreadPool",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtWidgets.QFileDialog.getExistingDirectory",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QFileDialog",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtWidgets.QApplication",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtGui.QIcon",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 202,
"usage_type": "call"
}
] |
73817284346
|
"""Basic status commands to check the health of the bot."""
import datetime
import discord
from discord.ext import commands
from metricity.config import BotConfig
DESCRIPTIONS = (
"Command processing time",
"Last event received",
"Discord API latency",
)
ROUND_LATENCY = 3
INTRO_MESSAGE = "Hello, I'm {name}. I insert all your data into a GDPR-compliant database."
class Status(commands.Cog):
"""Get the latency between the bot and Discord."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_socket_event_type(self, _: str) -> None:
"""Store the last event received as an int."""
self.last_event_received = int(datetime.datetime.now(datetime.UTC).timestamp())
@commands.command()
@commands.has_any_role(BotConfig.staff_role_id)
@commands.guild_only()
async def status(self, ctx: commands.Context) -> None:
"""Respond with an embed with useful status info for debugging."""
if ctx.guild.id != BotConfig.guild_id:
return
bot_ping = (datetime.datetime.now(datetime.UTC) - ctx.message.created_at).total_seconds() * 1000
if bot_ping <= 0:
bot_ping = "Your clock is out of sync, could not calculate ping."
else:
bot_ping = f"{bot_ping:.{ROUND_LATENCY}f} ms"
discord_ping = f"{self.bot.latency * 1000:.{ROUND_LATENCY}f} ms"
last_event = f"<t:{self.last_event_received}>"
embed = discord.Embed(
title="Status",
description=INTRO_MESSAGE.format(name=ctx.guild.me.display_name),
)
for desc, latency in zip(DESCRIPTIONS, (bot_ping, last_event, discord_ping), strict=True):
embed.add_field(name=desc, value=latency, inline=False)
await ctx.send(embed=embed)
async def setup(bot: commands.Bot) -> None:
"""Load the status extension."""
await bot.add_cog(Status(bot))
|
python-discord/metricity
|
metricity/exts/status.py
|
status.py
|
py
| 1,958 |
python
|
en
|
code
| 39 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "datetime.UTC",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Context",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "metricity.config.BotConfig.guild_id",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "metricity.config.BotConfig",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "datetime.UTC",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_any_role",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "metricity.config.BotConfig.staff_role_id",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "metricity.config.BotConfig",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.guild_only",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 58,
"usage_type": "name"
}
] |
44075659516
|
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import pickle
import os
#put url here
#example_url= "https://archive.thehated3.workers.dev/0:/Station%20X%20-%20The%20Complete%20Cyber%20Security%20Course!/"
durl= "https://archive.thehated3.workers.dev/0:/Station%20X%20-%20The%20Complete%20Cyber%20Security%20Course!/"
#put local path to download here, leave '.' to download in current directory
#example_path="./Station_X_The_Complete_Cyber_Security_Course"
dpath="."
count=0
rcount=0
failed_links=[]
failed_paths=[]
def download(url,path):
global count, failed_links, failed_paths
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.add_argument("--headless")
# brower = webdriver.Firefox(firefox_options=fireFoxOptions)
driver = webdriver.Firefox(executable_path="./geckodriver.exe",options=fireFoxOptions)
driver.get(url)
time.sleep(3)
previous_height=driver.execute_script('return document.body.scrollHeight')
while True:
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(3)
new_height=driver.execute_script('return document.body.scrollHeight')
if new_height==previous_height:
break
previous_height=new_height
try:
element = WebDriverWait(driver,100).until(EC.presence_of_element_located((By.CLASS_NAME, "list-group-item")))
except:
count+=1
print(f"FILE NOT DOWNLOADED:\npath: {path}\n count:{count}")
print("TIMEOUT not LOADING ELEMENTS BY CLASS NAME list-grout-items EXCEPTION")
return
tuna=driver.find_elements_by_class_name("list-group-item")
dlinks=[]
for i in tuna:
folder=i.get_attribute('href')
if folder==None:
target_urls=i.find_elements_by_css_selector('a')
furl=target_urls[1].get_attribute('href')
dlinks.append(furl)
else:
fname=i.text
formated_folder_name=fname.replace(" ","-")
new_path=path+"/"+formated_folder_name
download(folder,new_path)
for x in dlinks:
# print(x)
# cmd=f'wget -c -P '+'"'+f'{path}'+'" '+'"'+ f'{x}'+'"'
print(f"****DOWNLOADING IN PATH****: {path}\nfiles_skipped_till_now={count} \n\n")
failure=os.system(f"""wget -c -P "{path}" "{x}" """)
if failure != 0:
count+=1
failed_links.append(x)
failed_paths.append(path)
print(f"FILE NOT DOWNLOADED:\npath: {path}\nfile: {x}\n count:{count}")
driver.close()
def direct_download(dd_url,dd_path):
rfc=os.system(f"""wget -c -P "{dd_path}" "{dd_url}" """)
return rfc
def retry():
global rcount
new_links=[]
new_paths=[]
rcount=0
try:
failed_file_open=open("failed_links_info.pickle","rb")
except:
print('failed_links_info NOT Available, ABORTING...')
return
get_failed=pickle.load(failed_file_open)
fetch_links=get_failed[0]
fetch_paths=get_failed[1]
failed_file_open.close()
link_size=len(fetch_links)
for k in range(link_size):
l=fetch_links[k]
p=fetch_paths[k]
status=direct_download(l,p)
if status!=0:
rcount+=1
new_links.append(l)
new_paths.append(p)
print(f"FILE NOT DOWNLOADED:\npath: {p}\nfile: {l}\n count:{rcount}")
print(f"Number of files not downloaded: {rcount}")
nf=len(new_paths)
o_again=open("failed_links_info.pickle","wb")
m_list=[new_links,new_paths]
pickle.dump(m_list,o_again)
o_again.close()
for e in range(nf):
ww=new_paths[e]
tt=new_links[e]
print(f"{ww}\n{tt}\n\n")
if __name__=='__main__':
ui=input("Choose:\n1.Retry failed downloads\n2.Download from new link provided\nChoose either '1' or ('2') :")
if ui==1 or ui=='1':
retry()
else:
download(durl,dpath)
print(f"Number of files not downloaded: {count}")
number_failed=len(failed_paths)
fo=open("failed_links_info.pickle","wb")
combined_list=[failed_links,failed_paths]
pickle.dump(combined_list,fo)
fo.close()
for i in range(number_failed):
a=failed_paths[i]
b=failed_links[i]
print(f"{a}\n{b}\n\n")
user_input=input("Do you want to retry {count} failed downloads? (Y)/N : ")
if user_input.lower()=='n':
pass
else:
retry()
# print(turl)
|
aniket328/workers-dev-download-folders
|
fx.py
|
fx.py
|
py
| 4,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.FirefoxOptions",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 141,
"usage_type": "call"
}
] |
24013809061
|
from QLearning import Game
from collections import Counter
import pandas as pd
import matplotlib.pyplot as plt
gamma = 0.1
def Menu():
usr_op = None
while usr_op != 0:
print('//-//-//-// Card-Jitsu Menu //-//-//-//')
print('\nSelect an option to continue: ')
print('1. Play game vs AI.')
print('2. Get Strategy Metrics.')
print('3. Get Random Metrics.')
print('4. Train Ai Manual.')
print('5. Train Ai Random.')
print('0. Exit.')
usr_op = int(input('\n Option selected: '))
if usr_op == 1:
Game(gamma)
elif usr_op == 2:
get_metrics(is_random = False, train = False, show_game = True)
elif usr_op == 3:
get_metrics(is_random = True, train = False, show_game = False)
elif usr_op == 4:
get_metrics(is_random = False, train = True, show_game = True)
elif usr_op == 5:
get_metrics(is_random = True, train = True, show_game = False, show_metrics = False)
print('\n\n')
def get_metrics(is_random, train, show_game, show_metrics = True):
history = {
'Game': [],
'Round': [],
'AI': [],
'Player': [],
'Winner': [],
'Game Winner': []
}
game = 0
g = int(input('Numero de juegos a realizar: '))
while game < g:
winrecord , winner = Game(gamma, is_random, train, show_game)
for round in range(len(winrecord)):
history['Game'].append(game)
history['Game Winner'].append(winner)
history['Round'].append(round)
history['AI'].append(winrecord[round]['AI'])
history['Player'].append(winrecord[round]['Player'])
history['Winner'].append(winrecord[round]['Winner'])
game += 1
if not show_metrics: return 0
history = pd.DataFrame.from_dict(history)
# Histograma de Rondas y juegos
game_winrate = Counter(list(history['Game Winner']))
game_winrate = pd.DataFrame.from_dict(game_winrate, orient='index', columns=['Games Won'])
game_winrate.plot(kind='pie', y='Games Won', autopct='%1.0f%%', explode=(0.01, 0.01), startangle=20)
plt.title('Frecuency of Games Won')
plt.ylabel('')
plt.show()
# Diagrama de Pie de rondas ganadas
round_winrate = Counter(list(history['Winner']))
round_winrate = pd.DataFrame.from_dict(round_winrate, orient='index', columns=['Rounds Won'])
round_winrate.plot(kind='pie', y='Rounds Won', autopct='%1.0f%%', explode=(0.01, 0.01, 0.01), startangle=60)
plt.title('Frecuency of Rounds Won and Tied')
plt.ylabel('')
plt.show()
# Histograma de cartas
ai_cardrate = Counter(list(history['AI']))
ai_cardrate = pd.DataFrame.from_dict(ai_cardrate, orient='index', columns=['AI Cards'])
player_cardrate = Counter(list(history['Player']))
player_cardrate = pd.DataFrame.from_dict(player_cardrate, orient='index', columns=['Player Cards'])
hist_cardrate = ai_cardrate.merge(player_cardrate, how='outer', left_index=True, right_index=True).fillna(0)
hist_cardrate.plot(kind = 'bar')
plt.title('Frecuency of Cards Used')
plt.show()
Menu()
|
Marinovsky/Card-Jitsu
|
metrics_modifications/game.py
|
game.py
|
py
| 3,225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "QLearning.Game",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "QLearning.Game",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
}
] |
19637375362
|
import serial
import datetime as dt
import sys
class gps:
def __init__(self, port = "/dev/serial0"):
# Initializes serial connection for gps communication
try:
self.__ser = serial.Serial(port)
except Exception as e:
sys.exit("Can not connect with GPS using uart: " + str(e))
def get_record(self):
# For 50 times tries to read GPRMC record from gps in form of strings
got_record = False
for _ in range(50):
gps_record = self.__ser.readline().decode('UTF-8')
if gps_record[0:6] == "$GPRMC":
got_record = True
break
if got_record == True:
data = gps_record.split(",")
if data[2] == 'A':
self._status = "Correct"
# GMT time
if is_number(data[1][0:2]) and is_number(data[1][2:4]) and is_number(data[1][4:6]):
self._time = data[1][0:2] + ":" + data[1][2:4] + ":" + data[1][4:6]
else:
self._time = dt.datetime.now().strftime('[%H:%M:%S]')
self._status = "Corrupted data"
# Latitude
if (is_number(data[3])):
self._latitude = data[3]
else:
self._status = "Corrupted data"
# Latitude direction N/S
self._hemisphere_NS = data[4]
# Longitude
if (is_number(data[5])):
self._longitude = data[5]
else:
self._status = "Corrupted data"
# Longitude direction W/E
self._hemisphere_WE = data[6]
# Velocity in knots
if (is_number(data[7])):
self._velocity = data[7]
else:
self._status = "Corrupted data"
# True course
if (is_number(data[8])):
self._course = data[8]
elif data[8] == '':
self._course = 0;
else:
self._status = "Corrupted data"
# Date
if is_number(data[9][4:6]) and is_number(data[9][2:4]) and is_number(data[9][0:2]):
self._date = data[9][4:6] + "-" + data[9][2:4] + "-" + data[9][0:2]
else:
self._status = "Corrupted data"
if self._status == "Correct":
return 0
else:
return 1
else:
self._status = "Signal lost"
self._time = dt.datetime.now().strftime('%H:%M:%S')
self._date = dt.datetime.now().strftime('%Y-%m-%d')
return 1
else:
self._status = "Connection error"
self._time = dt.datetime.now().strftime('%H:%M:%S')
self._date = dt.datetime.now().strftime('%Y-%m-%d')
return 1
def _decode(self, coord):
#Converts DDDMM.MMMMM to DD deg MM.MMMMM min
tmp = coord.split(".")
deg = tmp[0][0:-2]
mins = tmp[0][-2:]
return deg + " deg " + mins + "." + tmp[1] + " min"
def get_gps_time(self):
# Returns date and time or 1 if fails to obtain them
if (self.get_record()):
return 1
else:
return self._date + " " + self._time
def get_decimal_degrees_record(self):
# Read from GPS and get current location parameters dictionary in decimal_degrees
if (self.get_record() == 0):
hemi_NE_sign = "+" if self._hemisphere_NS == "N" else "-"
hemi_WE_sign = "+" if self._hemisphere_WE == "E" else "-"
pos = self._latitude.find('.')
lat_deg = self._latitude[:pos-2]
lat_mins = self._latitude[pos-2:pos] + self._latitude[pos+1:]
lat_mins = str(round(float(lat_mins) / 60.0))
pos = self._longitude.find('.')
lng_deg = self._longitude[:pos-2]
lng_mins = self._longitude[pos-2:pos] + self._longitude[pos+1:]
lng_mins = str(round(float(lng_mins) / 60.0))
return {
'timestamp' : self.get_gps_time(),
'status' : self._status,
'latitude' : float(hemi_NE_sign + lat_deg + "." + lat_mins),
'longitude' : float(hemi_WE_sign + lng_deg + "." + lng_mins),
'velocity' : float(self._velocity),
'course' : float(self._course) }
else:
return {
'timestamp' : self._date + " " + self._time,
'status' : self._status,
'latitude' : 0,
'longitude' : 0,
'velocity' : 0,
'course' : 0 }
def get_location_message(self):
# Read from GPS and get current location in a easily readible string
self.get_record()
time_stamp = dt.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
return "%s latitude: %s(%s), longitude: %s(%s), velocity: %s, True Course: %s" % (
time_stamp,
self._decode(self._latitude),
self._hemisphere_NS,
self._decode(self._longitude),
self._hemisphere_NS,
self._velocity,
self._course)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
|
maciejzj/pi-observer
|
scripts/gps.py
|
gps.py
|
py
| 5,664 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 138,
"usage_type": "attribute"
}
] |
9357540363
|
#!/usr/bin/env python
__author__ = '[email protected]'
import commands
from d3r.celppade.custom_protein_prep import ProteinPrep
class chimera_dockprep(ProteinPrep):
"""Abstract class defining methods for a custom docking solution
for CELPP
"""
ProteinPrep.OUTPUT_PROTEIN_SUFFIX = '.mol2'
def receptor_scientific_prep(self,
protein_file,
prepared_protein_file,
targ_info_dict={}):
"""
Protein 'scientific preparation' is the process of generating
a dockable representation of the candidate protein from a
single-chain PDB file.
:param protein_file: PDB file containing candidate protein.
:param prepared_protein_file: The result of preparation should have this file name.
:param targ_info_dict: A dictionary of information about this target and the candidates chosen for docking.
:returns: True if preparation was successful. False otherwise.
"""
#####################################################################
### $ python clean_receptor.py receptor.pdb clean_receptor.pdb ###
#####################################################################
# Implements the logic that was formerly in clean_receptor.py
orig_pdb = open(protein_file).readlines()
with open('clean_receptor.pdb','wb') as of:
for line in orig_pdb:
if len(line) > 4:
if line[:4] == 'ATOM':
of.write(line)
#####################################################################
### $ chimera --nogui --script "chimeraPrep.py clean_receptor.pdb prepared_receptor.mol2"
#####################################################################
# Write the chimera-interpreted code to a script file
chimera_prep_text = '''import chimera
import sys
opened = chimera.openModels.open(sys.argv[1])
mol = opened[0]
import DockPrep
DockPrep.prep([mol])
from WriteMol2 import writeMol2
with open(sys.argv[2],'wb') as of:
writeMol2([mol], of)
'''
with open('chimera_prep.py','wb') as of:
of.write(chimera_prep_text)
# Run chimera with the script as an input
prep_cmd = 'chimera --nogui --script "chimera_prep.py clean_receptor.pdb ' + prepared_protein_file + ' " 1> prep.stdout 2> prep.stderr'
commands.getoutput(prep_cmd)
return True
if ("__main__") == (__name__):
import logging
import os
import shutil
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-p", "--pdbdb", metavar = "PATH", help = "PDB DATABANK which we will dock into")
parser.add_argument("-c", "--challengedata", metavar="PATH", help = "PATH to the unpacked challenge data package")
parser.add_argument("-o", "--prepdir", metavar = "PATH", help = "PATH to the output directory")
logger = logging.getLogger()
logging.basicConfig( format = '%(asctime)s: %(message)s', datefmt = '%m/%d/%y %I:%M:%S', filename = 'final.log', filemode = 'w', level = logging.INFO )
opt = parser.parse_args()
pdb_location = opt.pdbdb
challenge_data_path = opt.challengedata
prep_result_path = opt.prepdir
#running under this dir
abs_running_dir = os.getcwd()
log_file_path = os.path.join(abs_running_dir, 'final.log')
log_file_dest = os.path.join(os.path.abspath(prep_result_path), 'final.log')
prot_prepper = chimera_dockprep()
prot_prepper.run_scientific_protein_prep(challenge_data_path, pdb_location, prep_result_path)
#move the final log file to the result dir
shutil.move(log_file_path, log_file_dest)
|
drugdata/tutorial_rdock_implementation
|
tutorial_rdock_implementation/tutorial_rdock_implementation_protein_prep.py
|
tutorial_rdock_implementation_protein_prep.py
|
py
| 3,827 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "d3r.celppade.custom_protein_prep.ProteinPrep",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "d3r.celppade.custom_protein_prep.ProteinPrep.OUTPUT_PROTEIN_SUFFIX",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "d3r.celppade.custom_protein_prep.ProteinPrep",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "commands.getoutput",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 99,
"usage_type": "call"
}
] |
75316082746
|
import os
import wx
from wx.lib.colourchooser.canvas import Canvas
class ImageCanvas(wx.Panel):
"""
Image Panel
"""
def __init__(self, parent, image_path=None, *args, **kwargs):
"""
Constructor
:param parent:
"""
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.image_path = image_path
if self.image_path:
bmp = wx.Bitmap(self.image_path)
padding = 10
self.SetMinClientSize((bmp.GetWidth() + padding,
bmp.GetHeight() + padding))
self.glyphs = []
# self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.frame = parent
# img = wx.EmptyImage(240, 240)
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.main_sizer.Add((1, 1), 0, wx.EXPAND, 75)
# self.main_sizer.Add(img, 0, wx.EXPAND)
self.main_sizer.Add((1,1), 0, wx.ALL, 75)
self.SetSizer(self.main_sizer)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)
self.Bind(wx.EVT_SIZE, self.on_size)
def set_sizer(self):
"""
:param sizer:
:return:
"""
sizer = wx.BoxSizer(wx.VERTICAL)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
for num in range(4):
label = "Button %s" % num
btn = wx.Button(self, label=label)
sizer.Add(btn, 0, wx.ALL, 5)
hSizer.Add((1,1), 1, wx.EXPAND)
hSizer.Add(sizer, 0, wx.TOP, 100)
hSizer.Add((1,1), 0, wx.ALL, 75)
self.SetSizer(hSizer)
def on_size(self, event):
"""
:param event:
"""
event.Skip()
self.Refresh()
def scale_image(self, image, max_width=None, max_height=None):
"""
:param image:
:param max_width:
:param max_height:
:return:
"""
width = image.GetWidth()
height = image.GetHeight()
ratio = min(max_width / width, max_height / height)
image = image.Scale(ratio * width, ratio * height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
def on_erase_background(self, event):
"""
Add a picture to the background
:param event:
"""
# self.Freeze()
dc = event.GetDC()
w, h = self.GetClientSize()
if not dc:
dc = wx.ClientDC(self)
rect = self.GetUpdateRegion().GetBox()
dc.SetClippingRect(rect)
dc.Clear()
if self.image_path:
bmp = wx.Bitmap(self.image_path)
# bmp = self.scale_image(bmp, 100, 200)
size = bmp.GetSize()
x = int(w/2.0 - size.x/2.0)
y = int(h/2.0 - size.y/2.0)
dc.DrawBitmap(bmp, x, y)
self.draw_model(dc)
# self.Thaw()
def draw_model(self, dc):
"""
Draw glyps
:param dc:
:return:
"""
for glyph in self.glyphs:
glyph.draw(dc)
class Glyph(object):
def __init__(self, *args, **kwargs):
self.pen_color = kwargs.get('pen_color', wx.BLACK)
self.pen_width = kwargs.get('pen_width', 5)
self.coordinates = kwargs.get('coordinates', [])
def set_pen(self, dc):
dc.SetPen(self.pen_color, self.pen_width)
def pre_draw(self, dc):
self.set_pen()
def post_draw(self, dc):
pass
def _draw_(self, dc):
pass
def draw(self, dc):
self.pre_draw(dc)
self._draw_(dc)
self.post_draw(dc)
class Arc(Glyph):
"""
"""
def _draw_(self, dc):
pass
class Line(Glyph):
"""
"""
def _draw_(self, dc):
xy1 = self.coordinates[0]
xy2 = self.coordinates[1]
dc.DrawLine(xy1[0], xy1[1], xy2[0], xy2[1])
class Circle(Glyph):
"""
"""
def _draw_(self, dc):
xy = self.coordinates[0]
dc.DrawCircle(xy[0], xy[1], 100)
class Rectangle(Glyph):
"""
"""
def _draw_(self, dc):
pass
|
JoenyBui/boa-gui
|
boaui/panel/image.py
|
image.py
|
py
| 4,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "wx.Panel",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "wx.Bitmap",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "wx.BoxSizer",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "wx.ALL",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_ERASE_BACKGROUND",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_SIZE",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "wx.Button",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "wx.ALL",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "wx.ALL",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "wx.IMAGE_QUALITY_HIGH",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapFromImage",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "wx.ClientDC",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "wx.BLACK",
"line_number": 136,
"usage_type": "attribute"
}
] |
856153134
|
from setuptools import setup
import sys
VERSION = '1.2.1263'
plist = dict(
CFBundleName='VisTrails',
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join(['VisTrails', VERSION]),
CFBundleExecutable='vistrails',
CFBundleIdentifier='edu.utah.sci.vistrails',
)
sys.path.append('../..')
APP = ['../../vistrails/run.py']
#comma-separated list of additional data files and
#folders to include (not for code!)
#DATA_FILES = ['/usr/local/graphviz-2.12/bin/dot',]
OPTIONS = {'argv_emulation': True,
'iconfile': 'vistrails/resources/vistrails_icon.icns',
'includes': 'sip,pylab,xml,netCDF3,netCDF4_utils,netcdftime,\
libxml2,libxslt, Cookie, BaseHTTPServer, multifile, shelve,itk, itkBase, itkConfig, itkLazy, itkTypes, itkExtras',
'packages': 'PyQt4,vtk,MySQLdb,matplotlib,vistrails,numpy,ZSI,api',
'plist': plist,
}
setup(
app=APP,
# data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
VisTrails/VisTrails
|
scripts/dist/mac/setup_itk.py
|
setup_itk.py
|
py
| 1,027 |
python
|
en
|
code
| 100 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 27,
"usage_type": "call"
}
] |
16325116494
|
from functools import wraps
from typing import Callable
from util.threading import Thread, TimeoutException
from util.typing import P
from .AbstractHandler import PAYLOAD_TYPE, RESPONSE_TYPE, CONTEXT_TYPE, AbstractHandler
class AbstractTimeoutHandler(AbstractHandler[PAYLOAD_TYPE, RESPONSE_TYPE, CONTEXT_TYPE]):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.handle_request = cls._wrap_timeout(cls.handle_request)
def __init__(self, timeout: float = None, default: PAYLOAD_TYPE = None, **kwargs):
super().__init__(**kwargs)
self.timeout = timeout
self.default = default
@staticmethod
def _wrap_timeout(
handle_request: Callable[P, RESPONSE_TYPE]
) -> Callable[P, RESPONSE_TYPE]:
if (
hasattr(handle_request, "_AbstractTimeoutHandler_wrapped")
and handle_request._AbstractTimeoutHandler_wrapped == True
):
return handle_request
@wraps(handle_request)
def execute_with_timeout(self: "AbstractTimeoutHandler") -> RESPONSE_TYPE:
result = None
completed = False
def run_execute_and_store_result():
nonlocal result
nonlocal completed
try:
result = handle_request()
completed = True
except TimeoutException:
pass
thread = Thread(target=run_execute_and_store_result, daemon=True)
thread.start()
thread.join(self.timeout)
if not completed:
result = self.default
return result # type: ignore
execute_with_timeout._AbstractTimeoutHandler_wrapped = True
return execute_with_timeout
|
MysteriousChallenger/nat-holepunch
|
protocol/interface/request_handler/AbstractTimeoutHandler.py
|
AbstractTimeoutHandler.py
|
py
| 1,804 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "AbstractHandler.AbstractHandler",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.PAYLOAD_TYPE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.RESPONSE_TYPE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.CONTEXT_TYPE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.PAYLOAD_TYPE",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "util.typing.P",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.RESPONSE_TYPE",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "util.threading.TimeoutException",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "util.threading.Thread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "AbstractHandler.RESPONSE_TYPE",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "util.typing.P",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "AbstractHandler.RESPONSE_TYPE",
"line_number": 23,
"usage_type": "name"
}
] |
21228252116
|
from django.urls import path
from widgets.views import HomePageView, UserProfilePageView, SharedWidgetsPageView, \
PrivateWidgetsPageView, MemoryWidgetsView
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('home/shared-widgets/', SharedWidgetsPageView.as_view(), name='shared-widgets'),
path('user-profile/<slug:slug>/', UserProfilePageView.as_view(), name='user-profile'),
path('user-profile/<slug:slug>/widgets/', PrivateWidgetsPageView.as_view(), name='private-widgets'),
path('memory-widgets/<int:pk>', MemoryWidgetsView.as_view(), name='memory-widgets'),
]
|
alex-polo/homepage
|
widgets/urls.py
|
urls.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "widgets.views.HomePageView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "widgets.views.HomePageView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "widgets.views.SharedWidgetsPageView.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "widgets.views.SharedWidgetsPageView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "widgets.views.UserProfilePageView.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "widgets.views.UserProfilePageView",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "widgets.views.PrivateWidgetsPageView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "widgets.views.PrivateWidgetsPageView",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "widgets.views.MemoryWidgetsView.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "widgets.views.MemoryWidgetsView",
"line_number": 11,
"usage_type": "name"
}
] |
72143866108
|
from typing import Any, Dict
def play_game() -> None:
print('playing game')
def update_state(current_state: Dict) -> Dict:
print('here we change things')
possible_actions = {
'mod status': lambda : print('modifting status'),
'remove status': lambda : print('removing status'),
'go back': lambda : print('saving updates')
}
show_commands('update status menu', possible_actions)
return current_state
def quit():
print('good bye m8')
def show_commands(title: str, commands: Dict) -> Any:
print(title.upper())
idxs = {}
for idx, op in enumerate(commands):
print(f'{op} -> press [{idx}]')
idxs[str(idx)] = commands[op]
while True:
user_op = input('select an option > ')
if user_op in idxs:
return idxs[user_op]()
def main():
state = {
'user_name': 'santi'
}
commands = {
'play': play_game,
'quit': quit,
'update_satus': lambda : update_state(state)
}
show_commands('main menu', commands=commands)
main()
|
levensworth/udesa-pc-tutorial
|
2022-a/4-testing_and_train/solutions/example_command.py
|
example_command.py
|
py
| 1,096 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
}
] |
38722538066
|
import pandas as pd
import numpy as np
import tensorflow as tf
import sklearn.model_selection as sk
import helper as hp
import preprocessing as pre
import machine_learning as ml
import json
import os
from flask import Flask, redirect, url_for, request, jsonify
from tensorflow.keras import layers
from tensorflow.keras.models import load_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
ML_model = None
ML_history = None
graph = None
titles = None
classes = None
targets = None
categories = None
training_type = 0
app = Flask(__name__)
@app.route('/')
def initialize():
return 'Use /Train or /Predict'
@app.route('/Train', methods = ['POST'])
def Train():
global ML_model
global ML_history
global classes
global titles
global targets
global categories
global graph
# Getting the POST Request Body Data
Data = request.data
# Converting Text/Plain to JSON Structure
JsonData = json.loads(Data)
# Extracting product titles and product classes
titles = JsonData["products"]
targets = JsonData["classes"]
training_type = JsonData["training_type"]
# 1 is Very Small (80 vec size, Hidden Layers (1024,512))
# 2 is Small (200 vec size, Hidden Layers (2048,1024))
# 3 is Large (200 vec size, Hidden Layers (2048,1024,1024))
if(len(titles) == len(targets)):
# Preprocessing of data
# Converts target to multi classes array where [1,0,0,0,0,0,....] corresponds to class 1 and [0,1,0,0,0,0,....] corresponds to class 2
labels, classes, categories = hp.Get_Targets_Arrays(targets)
print(categories)
# Converts products titles to vectors
pre.Doc2Vectors(titles, training_type)
# Creating Vectors List for all products -> Dataset
Vectors_List = hp.Get_Product_Vectors(len(titles),training_type)
# Splitting Data to Train, Validate and Test sets
train_data, train_labels, val_data, val_labels, test_data, test_labels = pre.Data_Split(Vectors_List,labels)
# Training
if(training_type == 1):
ML_model, ML_history = ml.Train_1(train_data, train_labels, val_data, val_labels, len(labels[0]))
else:
if(training_type ==2):
ML_model, ML_history = ml.Train_2(train_data, train_labels, val_data, val_labels, len(labels[0]))
else:
ML_model, ML_history = ml.Train_3(train_data, train_labels, val_data, val_labels, len(labels[0]))
graph = tf.get_default_graph()
# Evaluating the trained model
results = ML_model.evaluate(test_data, test_labels)
response = "Training Completed with testing scores of " + str(results[1]) + " accuracy and " + str(results[0]) + " Loss"
return response
else:
return "Products and Classes don't have the same length"
@app.route('/Predict',methods = ['POST'])
def Predict():
global ML_model
global classes
global categories
global training_type
# Getting the POST Request Body Data
Data = request.data
# Converting Text/Plain to JSON Structure
JsonData = json.loads(Data)
# Extracting product titles and product classes
titles = JsonData["products"]
# Get the product title for prediction from the GET Request
#title = request.args.get('product')
# Convert the title to vector based on the titles vector model done in the training process
#v = hp.Get_Products_Title_Vector(titles)
# Load model weights for predictins
ML_model = load_model("weights")
ML_model._make_predict_function()
predicted_classes = []
for title in titles:
v = hp.Get_Product_Title_Vector(title)
# Predictions
pred = ML_model.predict(v)
max_index = np.argmax(pred)
predicted_class = categories[max_index]
predicted_classes.append(predicted_class)
response = {
"predictions":predicted_classes,
}
return jsonify(response)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5010)
|
ahmedhazemfekry/Neural-Network-Flask-Server
|
server.py
|
server.py
|
py
| 4,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "helper.Get_Targets_Arrays",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "preprocessing.Doc2Vectors",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "helper.Get_Product_Vectors",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "preprocessing.Data_Split",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "machine_learning.Train_1",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "machine_learning.Train_2",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "machine_learning.Train_3",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_default_graph",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "helper.Get_Product_Title_Vector",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 119,
"usage_type": "call"
}
] |
29191289762
|
import datetime
import json
import os
import re
import shutil
class Fileop():
def isDirectory(self, fDir):
return os.path.isdir(fDir)
def countDir(self, dPath):
dirListing = next(os.walk(dPath))[2]
return len(dirListing)
def CjsonLoad(self, jfile):
fdir = os.path.join(Fileop.dwnDir(''), "conf")
condir = Fileop.isDirectory('', fdir)
if condir:
confile = os.path.join(fdir, jfile)
with open(confile, "r") as j:
return json.load(j)
def SjsonLoad(self, jfile):
fdir = Fileop.dwnDir('')
condir = Fileop.isDirectory('', fdir)
if condir:
confile = os.path.join(fdir, jfile)
with open(confile, "r") as j:
return json.load(j)
def curWorkDir(self):
return os.path.dirname(os.path.realpath(__file__))
def makDir(self, Folder):
try:
os.makedirs(Folder)
except OSError as e:
print("Warning making {0} : MSG - {1}".format(Folder, e))
def dwnDir(self):
return os.path.normpath(os.getcwd() + os.sep + os.pardir)
def newDirec(self, nDirName):
return os.mkdir(nDirName)
def RecFileDir(self, dirName):
new_Folder = os.path.join(Fileop.dwnDir(''), dirName)
dFlag = Fileop.isDirectory('', new_Folder)
if not dFlag:
# make directory
try:
Fileop.newDirec('', new_Folder)
except OSError:
print("Creation of the directory %s failed. \n" % new_Folder)
else:
print("Successfully created the directory %s.\n " % new_Folder)
else:
print("Directory ( %s ) already exists.\n" % new_Folder)
return new_Folder
def newest(self, path):
files = os.listdir(path)
lenfile = len(files)
if lenfile != 0:
paths = [os.path.join(path, basename) for basename in files]
return max(paths, key=os.path.getctime)
else:
print("Directory ( %s ) is empty\n", path)
def removefiles(self, folder):
files_in_directory = os.listdir(folder)
filtered_files = [file for file in files_in_directory if file.endswith(".wav")]
dircount = Fileop.countDir('', folder)
if dircount > 1:
for file in filtered_files:
path_to_file = os.path.join(folder, file)
os.remove(path_to_file)
else:
print('Failed to delete files, {0} is empty: \n'.format(folder))
def moveFiles(self, froDir, toDir, froFile, toFile):
try:
shutil.move(os.path.join(froDir, froFile), os.path.join(toDir, toFile))
print("Successfully moved {0}.\n".format(os.path.join(froDir, froFile)))
except OSError:
print("Could not move file ({0}) operation.\n".format(os.path.join(froDir, froFile)))
def main(self):
# Check if directories have been created
source_param = Fileop.CjsonLoad('', "conf.json")
source_rep = os.path.join(Fileop.dwnDir(''), "reports")
Fileop.RecFileDir('', source_rep)
dwn_dir = source_param['download_dir']
# Recordings directory based on current date
recFolder = "Recordings" + datetime.datetime.now().strftime("%Y%m%d")
dirRecs = Fileop.RecFileDir('', recFolder)
# print(dirRecs)
# get latest data report file
newFile = Fileop.newest('', source_rep)
# print (newFile)
count = 0
if Fileop.countDir('', dwn_dir) != 0:
with open(newFile, "r") as nf:
lines = nf.readlines()
for line in lines:
count += 1
line_id = ' '.join(re.findall(r'\b\w+\b', line)[:+1])
line_data = ' '.join(re.findall(r'\b\w+\b', line)[:]).replace(line_id, "")
line_data = "_".join(line_data.split())
print("line {0} - file ID : {1} file metadata :- {2} \n".format(count, line_id, line_data))
# move and rename files
Fileop.moveFiles("", dwn_dir, dirRecs, line_id + ".wav", line_data + ".wav")
else:
print("Warning: Call recordings download did not run!\n")
# if __name__ == "__main__":
# main()
|
kkweli/Avaya
|
Avy/avayaFile.py
|
avayaFile.py
|
py
| 4,353 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.isdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 109,
"usage_type": "call"
}
] |
13453404410
|
import sqlite3
from flask import Flask
import json
app = Flask(__name__)
@app.route('/animals/<idx>')
def animals(idx):
with sqlite3.connect("animal.db") as connection:
cursor = connection.cursor()
query = f"""
select * from animals_final
left join outcomes on outcomes.animal_id = animals_final.animal_id
where animals_final.id = {idx}
"""
cursor.execute(query)
result = cursor.fetchall()
if len(result) == 1:
line = result[0]
result_dict = {}
number = 1
for i in line:
result_dict[f"{number}"] = i
number += 1
else:
result_dict = "Nothing found"
return json.dumps(result_dict)
app.run(debug=True, port=5001)
|
aquwue/lesson_15
|
main_program.py
|
main_program.py
|
py
| 812 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
}
] |
41439897989
|
from django.test import TestCase
from django.urls.base import reverse
from .models import Provinces
# Create your tests here.
class ProvincesModelTests(TestCase):
def test_get_one_province(self):
"""if not province exist with passed id, return appropiate message"""
province = Provinces.objects.create(id=1, name='Santa Fe', population=23142323, density=5.8, surface=3252352)
response = self.client.get(reverse('provinciasCrud:get_one_province', args=[province.id]))
print(response)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Santa Fe')
# self.assertQuerysetEqual(response.context['province'], {})
def test_get_all_provinces(self):
"""if provinces array is empty, return appropiate message"""
province = Provinces.objects.create(id=1, name='Santa Fe', population=23142323, density=5.8, surface=3252352)
response = self.client.get(reverse('provinciasCrud:get_provinces'))
print(response)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Santa Fe')
|
matiasfeliu92/crud_provincias
|
server/provinciasCrud/tests.py
|
tests.py
|
py
| 1,119 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Provinces.objects.create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Provinces.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Provinces",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.base.reverse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Provinces.objects.create",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Provinces.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "models.Provinces",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.base.reverse",
"line_number": 20,
"usage_type": "call"
}
] |
5243707290
|
#!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
NUMBER_OF_WORDS = 50
file_path = sys.argv[1]
lines = pd.read_table(file_path, header=None, delim_whitespace=True)
lines = lines.sample(NUMBER_OF_WORDS).reset_index(drop=True)
words = lines.iloc[:, 0]
vectors = lines.iloc[:, 1:]
pca = PCA(n_components=2)
vecs_transformed = pca.fit_transform(vectors)
plt.figure(figsize=(16, 16))
for i in range(len(words)):
(x, y) = [float(val) for val in vecs_transformed[i]]
plt.scatter(x, y)
plt.annotate(words[i],
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
plt.savefig('evaluation.png')
|
data-science-and-big-data-analytics/data-science-frameworks
|
FastText/evaluation.py
|
evaluation.py
|
py
| 819 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_table",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
43242415991
|
from os.path import abspath, dirname, join
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.compatibility.storage import Storage
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.importer import Importer
STORAGE_PATH = abspath(join(dirname(__file__), "../fixtures/images/"))
class CompatibilityStorageTestCase(TestCase):
def get_image_path(self, name):
return f"./tests/fixtures/images/{name}"
def get_image_bytes(self, name):
with open(self.get_image_path(name), "rb") as img:
return img.read()
def get_image_url(self, name):
return f"s.glbimg.com/some/{name}"
def get_context(self):
config = Config(
FILE_LOADER_ROOT_PATH=STORAGE_PATH,
COMPATIBILITY_LEGACY_STORAGE="tests.compatibility.legacy_file_storage",
STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True,
)
importer = Importer(config)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return Context(server, config=config, importer=importer)
@gen_test
async def test_should_raise_for_invalid_compatibility_storage(self):
config = Config(
FILE_LOADER_ROOT_PATH=STORAGE_PATH,
STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True,
)
importer = Importer(config)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, config=config, importer=importer)
storage = Storage(ctx)
with expect.error_to_happen(
RuntimeError,
message=(
"The 'COMPATIBILITY_LEGACY_STORAGE' configuration should "
"point to a valid storage when using compatibility storage."
),
):
await storage.get("invalid-path")
@gen_test
async def test_should_return_none_for_invalid_image(self):
storage = Storage(self.context)
result = await storage.get("invalid-path")
expect(result).to_be_null()
@gen_test
async def test_should_get(self):
url = self.get_image_url("image.jpg")
image_bytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(url, image_bytes)
result = await storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
expect(result).to_equal(image_bytes)
@gen_test
async def test_should_put(self):
url = self.get_image_url("image.jpg")
image_bytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(url, image_bytes)
result = await storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
expect(result).to_equal(image_bytes)
@gen_test
async def test_should_put_detector_data(self):
iurl = self.get_image_url("image_7.jpg")
ibytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(iurl, ibytes)
await storage.put_detector_data(iurl, "some-data")
got = await storage.get_detector_data(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal("some-data")
@gen_test
async def test_should_put_crypto(self):
iurl = self.get_image_url("image_7.jpg")
ibytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(iurl, ibytes)
await storage.put_crypto(iurl)
got = await storage.get_crypto(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal("ACME-SEC")
@gen_test
async def test_exists(self):
iurl = self.get_image_url("image_7.jpg")
ibytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(iurl, ibytes)
exists = await storage.exists(iurl)
not_exists = await storage.exists("some-invalid-random-file.jpg")
expect(exists).to_be_true()
expect(not_exists).to_be_false()
@gen_test
async def test_remove(self):
iurl = self.get_image_url("image_7.jpg")
ibytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(iurl, ibytes)
exists = await storage.exists(iurl)
expect(exists).to_be_true()
await storage.remove(iurl)
not_exists = await storage.exists(iurl)
expect(not_exists).to_be_false()
|
thumbor/thumbor
|
tests/compatibility/test_compatibility_storage.py
|
test_compatibility_storage.py
|
py
| 4,916 |
python
|
en
|
code
| 9,707 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tests.base.TestCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "thumbor.config.Config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "thumbor.importer.Importer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "thumbor.context.ServerParameters",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "thumbor.context.Context",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "thumbor.config.Config",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "thumbor.importer.Importer",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "thumbor.context.ServerParameters",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "thumbor.context.Context",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "preggy.expect.error_to_happen",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "thumbor.compatibility.storage.Storage",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 139,
"usage_type": "name"
}
] |
32169695426
|
import requests
from .models import Item
import datetime
from django.utils.timezone import make_aware
def fetch_items():
conn = requests.get('https://hacker-news.firebaseio.com/v0/newstories.json?print=pretty')
res = sorted(conn.json())
return list(reversed(res))[:5] # top 5 stories
def fetch_item_by_id(item):
int_item = int(item)
conn = requests.get(f'https://hacker-news.firebaseio.com/v0/item/{int_item}.json?print=pretty')
res = conn.json()
if res['type'] == 'job':
print(res)
return res
def add_kid(kid):
comment = fetch_item_by_id(kid)
parent = Item.objects.get(id=comment['parent'])
if 'deleted'in comment or 'dead' in comment:
return
obj = get_obj(comment)
Item.objects.create(**obj, parent=parent)
return obj
def get_obj(detail):
type = detail.get("type")
id = str(detail.get("id"))
by = detail.get("by")
timestamp = datetime.datetime.fromtimestamp(detail.get("time"))
time = make_aware(timestamp)
url = detail.get("url")
title = detail.get("title")
text = detail.get("text")
descendants = detail.get("descendants", 0)
score = detail.get("score", 0)
obj = {
"type": type,
"id": id,
"by": by,
"time": time,
"url": url,
"title": title,
"text": text,
"score": score,
"fetched": True,
"descendants": descendants
}
return obj
def add_to_db():
items = fetch_items()
for single in items:
details = fetch_item_by_id(single)
if details['type'] == 'comment' or 'deleted' in details or 'dead' in details:
return
if details['type'] == 'job':
print(details)
if not Item.objects.filter(id=details['id']).exists():
item_obj = get_obj(details)
Item.objects.create(**item_obj)
if 'kids' in details:
kids = details['kids']
for kid in kids:
add_kid(kid)
|
Alisjj/HackerNews
|
newsList/fetcher.py
|
fetcher.py
|
py
| 2,008 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Item.objects.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Item.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.Item",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.Item.objects.create",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Item.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "models.Item",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "django.utils.timezone.make_aware",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Item.objects.filter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Item.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Item",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "models.Item.objects.create",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "models.Item.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "models.Item",
"line_number": 65,
"usage_type": "name"
}
] |
10422172533
|
from __future__ import annotations
from PySide6.QtCore import QMargins, QPoint, QRect, QSize, Qt
from PySide6.QtWidgets import QLayout, QSizePolicy, QWidgetItem
class FlowLayout(QLayout):
def __init__(self, parent=None, center=False):
super().__init__(parent)
if parent is not None:
self.setContentsMargins(QMargins(0, 0, 0, 0))
self._item_list: list[QWidgetItem] = []
self.center = center
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self._item_list.append(item)
def count(self):
return len(self._item_list)
def itemAt(self, index):
if 0 <= index < len(self._item_list):
return self._item_list[index]
return None
def takeAt(self, index):
if 0 <= index < len(self._item_list):
return self._item_list.pop(index)
return None
def expandingDirections(self):
return Qt.Orientation(0)
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self._do_layout(QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super().setGeometry(rect)
self._do_layout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QSize()
for item in self._item_list:
size = size.expandedTo(item.minimumSize())
size += QSize(2 * self.contentsMargins().top(), 2 * self.contentsMargins().top())
return size
def _do_layout(self, rect, test_only):
x = rect.x()
y = rect.y()
line_height = 0
spacing = self.spacing()
rows: list[tuple[list[QWidgetItem], int, int]] = []
row = []
for item in self._item_list:
style = item.widget().style()
layout_spacing_x = style.layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Horizontal)
layout_spacing_y = style.layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Vertical)
space_x = (
spacing + layout_spacing_x
) * item.widget().isVisible() # If an item isn't visible, it does not have any influence on the next
space_y = spacing + layout_spacing_y
next_x = x + item.sizeHint().width() + space_x
if next_x - space_x > rect.right() and line_height > 0:
rows.append(
(row, rect.right() - x - space_x, line_height)
) # We need to remove the unnecessary extra padding from all rows, not just the last
row = []
x = rect.x()
y = y + line_height + space_y
next_x = x + item.sizeHint().width() + space_x
line_height = 0
if not test_only:
item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))
x = next_x
line_height = max(line_height, item.sizeHint().height())
row.append(item)
rows.append((row, rect.right() - x - space_x, line_height))
if not test_only and self.center:
for items, x_margin, y_size in rows:
x_margin /= 2
for item in items:
r = item.geometry()
new_y = r.y()
if r.height() < y_size:
new_y += (y_size - r.height()) / 2
item.setGeometry(QRect(QPoint(r.x() + x_margin, new_y), item.sizeHint()))
return y + line_height - rect.y()
|
randovania/randovania
|
randovania/gui/lib/flow_layout.py
|
flow_layout.py
|
py
| 3,663 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "PySide6.QtWidgets.QLayout",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.QMargins",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QWidgetItem",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Qt.Orientation",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.Qt",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.QRect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QSize",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QSize",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QWidgetItem",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QSizePolicy.PushButton",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtWidgets.QSizePolicy",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Qt.Horizontal",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtCore.Qt",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QSizePolicy.PushButton",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtWidgets.QSizePolicy",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Qt.Vertical",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtCore.Qt",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.QRect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QPoint",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QRect",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QPoint",
"line_number": 111,
"usage_type": "call"
}
] |
74589712187
|
import cv2
smilecascade=cv2.CascadeClassifier('haarcascade\\haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img=cap.read()
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
smiles = smilecascade.detectMultiScale(img, 1.3,50 )
for (x,y,w,h) in smiles:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,255,255), 2)
cv2.imshow('img', img)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
harshikesh-kumar/Projects
|
Project Smile Detect.py
|
Project Smile Detect.py
|
py
| 488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.CascadeClassifier",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 20,
"usage_type": "call"
}
] |
29954994164
|
from __future__ import print_function
import re
import bitarray
def filterFeatures(sr_obj, feature_types=None, qualifier_regexs=None):
"""Filter a `SeqRecord` object's `SeqFeature` list by type and qualifiers.
Args:
sr_obj (``SeqRecord``) : instantiated Biopython
``SeqRecord`` object
feature_types (list, optional) : list of feature types
(e.g., ['gene', 'CDS'])
qualifier_regexs (dict, optional) : dict of <field name>:
<value regex> entries
Returns:
Filtered list of `SeqRecord` objects
Raises:
None
Examples:
Return a list of all ``SeqFeature`` objects from ``gb_rec``
that are of type 'mRNA' or 'CDS'::
>>>filterFeatures(gb_rec, ['mRNA', 'CDS'])
Return a list of all ``SeqFeature`` objects from ``gb_rec``
that are of type 'mRNA' or 'CDS' and that additionally have the
qualifier field 'gene' with a value that matches the regular expression
'ubc.*'::
>>>filterFeatures(gb_rec, ['gene', 'CDS'], {'gene': 'ubc.*'})
The latter example would match the following genbank records::
CDS join(54..567,789..1254)
/gene="ubc42"
/product="ubiquitin conjugating enzyme"
/function="cell division control"
CDS join(54..567,789..1254)
/gene="ubc51"
/product="ubiquitin conjugating enzyme"
/function="cell division control"
"""
qualifier_regexs = qualifier_regexs or {}
features = sr_obj.features
def _featureFilter(feature):
if feature_types is not None and feature.type not in feature_types:
return False
for feat_key, feat_value_re in qualifier_regexs.items():
q_values = feature.qualifiers.get(feat_key, None)
if q_values is None:
return False
for v in q_values:
if re.search(feat_value_re, v) is None:
return False
return True
return filter(_featureFilter, features)
def findBorders(sr_obj, feature_types=None, qualifier_regexs=None,
strand_specific=False):
"""Filter a ``SeqFeature`` list and find the border indices of its members.
See :func:`filterFeatures` for explanation of filtering functionality.
Args:
sr_obj (``SeqRecord``): instantiated Biopython ``SeqRecord`` object
feature_types (list, optional) : list of feature types (e.g.,
['gene', 'CDS'])
qualifier_regexs (list, optional) : dict of <field name>:
<value regex> entries
strand_specific (list, optional) : boolean determining whether
separate lists should be returned
for each strand (fwd / rev)
Returns:
List(s) of (<idx>, <1 if rising edge, 0 if falling edge>) tuples.
Raises:
None
"""
filtered_features = filterFeatures(sr_obj, feature_types, qualifier_regexs)
if strand_specific:
fwd_feat_list = []
rev_feat_list = []
else:
feat_list = []
for feat in filtered_features:
if strand_specific:
if feat.location.strand == -1:
feat_list = rev_feat_list
else:
feat_list = fwd_feat_list
feat_list.append((feat.location.start, 1))
feat_list.append((feat.location.end, 0))
if strand_specific:
return fwd_feat_list, rev_feat_list
else:
return feat_list
def buildBorderLUT(sr_obj, feature_types=None, qualifier_regexs=None,
strand_specific=False):
"""Filter a ``SeqRecord``'s features and build a binary LUT of border edges.
See :func:`filterFeatures` for explanation of filtering functionality.
Args:
sr_obj (``SeqRecord``): instantiated Biopython ``SeqRecord`` object
feature_types (list, optional) : list of feature types (e.g.,
['gene', 'CDS'])
qualifier_regexs (list, optional) : dict of <field name>:
<value regex> entries
strand_specific (list, optional) : boolean determining whether
separate lists should be returned
for each strand (fwd / rev)
Returns:
Binary bitarray(s) (``bitarray.bitarray``) indicating the indices of
feature borders (border indices have a value of 1). Strand-specific
bitarrays are returned if ``strand_specific`` is ``True``.
Raises:
None
"""
if strand_specific:
fwd_feat_list, rev_feat_list = findBorders(sr_obj, feature_types,
qualifier_regexs,
strand_specific)
fwd_lut = bitarray.bitarray(len(sr_obj.seq))
fwd_lut.setall(0)
rev_lut = bitarray.bitarray(len(sr_obj.seq))
rev_lut.setall(0)
for rec in fwd_feat_list:
fwd_lut[rec[0]] = 1
for rec in rev_feat_list:
rev_lut[rec[0]] = 1
return fwd_lut, rev_lut
else:
feat_list = findBorders(sr_obj, feature_types, qualifier_regexs,
strand_specific)
feat_lut = bitarray.bitarray(len(sr_obj.seq))
feat_lut.setall(0)
for rec in feat_list:
try:
feat_lut[rec[0]] = 1
except IndexError:
print('IndexError while generating border array {}'.format(
rec[0]))
return feat_lut
def findAggregateBoundaries(sr_obj, feature_types=None, qualifier_regexs=None):
"""Determine the outermost border indices of a group of filtered features.
See :func:`filterFeatures` for explanation of filtering functionality.
Args:
sr_obj (``SeqRecord``): instantiated Biopython ``SeqRecord`` object
feature_types (list, optional) : list of feature types (e.g.,
['gene', 'CDS'])
qualifier_regexs (list, optional) : dict of <field name>:
<value regex> entries
Returns:
Tuple of (<min index>, <max index>) of the filtered features
Raises:
None
For example, let's say your genbank file has the following features:
synth_seg 1001..2000
/label="seg17_000"
synth_seg 2001..3000
/label="seg17_001"
synth_seg 3001..4000
/label="seg17_002"
synth_seg 4001..5000
/label="seg18_000"
Then the following call will produce this output::
>>>findAggregateBoundaries(sr, ['synth_seg'], {'label': r'seg17.*'})
(1001, 4000)
"""
filtered_features = list(filterFeatures(sr_obj, feature_types,
qualifier_regexs))
if len(filtered_features) == 0:
return None, None
min_idx = len(sr_obj.seq) + 1
max_idx = -1
for ff in filtered_features:
min_idx = min(int(ff.location.start), min_idx)
max_idx = max(int(ff.location.end), max_idx)
return min_idx, max_idx
|
Wyss/mascpcr
|
mascpcr/genbankfeatures.py
|
genbankfeatures.py
|
py
| 7,725 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "re.search",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "bitarray.bitarray",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "bitarray.bitarray",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "bitarray.bitarray",
"line_number": 156,
"usage_type": "call"
}
] |
71066866429
|
from ....utils.onlinetex import tex_to_svg_file_online
from ....utils.jupyter import video
from ..scene import SceneGL
from ..config import Size
from .plot import Plot
from .scatter import Scatter
from pathlib import Path
import re
import time
import shutil
from manimlib import (
BLUE,
GREEN,
ShowCreation,
Write,
VGroup,
Transform,
ReplacementTransform,
FadeIn,
FadeOut,
)
from manimlib.utils.rate_functions import linear, smooth
from manimlib.extract_scene import get_scene_config
import manimlib.config
from manimlib.camera.camera import Camera
from sparrow.path import rel_to_abs
__all__ = ["EagerModeScene", "JupyterModeScene", "CONFIG"]
class CONFIG:
# skip_animations = False # "Save the last frame"
color = None # Background color"
full_screen = False
gif = False
resolution = '1920x1080'
preview = False
# Render to a movie file with an alpha channel,
# if transparent is True, .mov file will be generated.
transparent = False
save_pngs = False # Save each frame as a png
hd = False
uhd = False
quiet = True
open = False # Automatically open the saved file once its done
finder = False # Show the output file in finder
frame_rate = 30
file_name = None
video_dir = None # directory to write video
start_at_animation_number = None
use_online_tex = False
class EagerModeScene(SceneGL):
def __init__(
self,
screen_size=Size.big,
scene_name='EagerModeScene',
):
# self.CONFIG = CONFIG
args = manimlib.config.parse_cli()
args_dict = vars(args)
args_dict['file'] = None
args_dict['scene_names'] = scene_name
args_dict['screen_size'] = screen_size
if CONFIG.preview:
from pyglet.window import key
self.key = key
else:
args_dict['write_file'] = True
for key, value in CONFIG.__dict__.items():
args_dict[key] = value
if CONFIG.gif is True:
args_dict['write_file'] = True
# if CONFIG.gif is True:
# args_dict["transparent"] = False
if CONFIG.use_online_tex:
print("Use online latex compiler")
manimlib.mobject.svg.tex_mobject.tex_to_svg_file = tex_to_svg_file_online
self.config = manimlib.config.get_configuration(args)
self.scene_config = get_scene_config(self.config)
super().__init__(**self.scene_config)
self.virtual_animation_start_time = 0
self.real_animation_start_time = time.time()
self.file_writer.begin()
self.setup()
self.plt = Plot()
self.is_axes_line_gen_ed = False
self._scatter_ax = None
self.clips = []
self.current_clip = 0
self.saved_states = []
self.animation_list = []
self.animation_func_dict = {}
self.loop_start_animation = None
self.pause_start_animation = 0
def play(self, *args, run_time=1, rate_func=linear, **kwargs):
"""TODO:"""
super().play(*args, run_time=run_time,
rate_func=rate_func,
**kwargs)
def _play_method(self, mobj, Method, loc):
loc.pop('self')
args = loc.pop('args')
kwargs = loc.pop('kwargs')
self.play(Method(mobj), *args, **loc, **kwargs)
def write(self, mobject, *args, run_time=1., rate_func=linear, **kwargs):
self._play_method(mobject, Write, locals())
def show_creation(self, mobject, *args, run_time=1, rate_func=linear, **kwargs):
self._play_method(mobject, ShowCreation, locals())
def fade_in(self, mobject, *args, run_time=1, rate_func=linear, **kwargs):
self._play_method(mobject, FadeIn, locals())
def fade_out(self, mobject, *args, run_time=1, rate_func=linear, **kwargs):
self._play_method(mobject, FadeOut, locals())
def get_animate_name_func(self):
def get_clip_names():
names = []
# Fixme: use other method to replace `dir()`
for name in dir(self):
if re.search(r'clip_*[0-9]+', name):
names.append(name)
# sort
if names:
names = sorted(names, key=lambda x: int(re.search(r"[0-9]+", x).group()))
return names
clip_names = get_clip_names()
animation_func_dict = {}
if clip_names:
for func_name in clip_names:
animation_func_dict.setdefault(func_name, getattr(self, func_name))
self.animation_func_dict = animation_func_dict
def save_image(self, filename):
"""This method works only when CONFIG.preview=False. """
assert (CONFIG.preview == False, "`self.save_image` works only when CONFIG.preview=False.")
self.camera: Camera
self.camera.get_image().save(filename)
def render(self):
self.get_animate_name_func()
for name, func in self.animation_func_dict.items():
self.save_state()
self.saved_states.append(self.saved_state)
self.current_clip += 1
func()
self.animation_list.append(func)
self.hold_on()
def replay(self, animation_index=None):
if animation_index is None:
animation_index = self.current_clip
self.saved_state = self.saved_states[animation_index - 1]
self.restore()
self.animation_list[animation_index - 1]()
def loop_animate(self, animation_index=None, num=10):
while num:
num -= 1
self.replay(animation_index)
def next_animate(self):
self.current_clip += 1
def _clip_control(self, symbol):
# play preview clip
if symbol in (self.key.LEFT, self.key.COMMA, self.key.NUM_1, self.key._1):
self.current_clip -= 1
try:
self.replay(self.current_clip)
except IndexError:
self.current_clip += 1
# play next clip
elif symbol in (self.key.RIGHT, self.key.PERIOD, self.key._3, self.key.NUM_3):
self.current_clip += 1
try:
self.replay(self.current_clip)
except IndexError:
self.current_clip -= 1
# play current clip
elif symbol in (self.key.NUM_DIVIDE, self.key.DOWN, self.key._2, self.key.NUM_2):
self.replay(self.current_clip)
def hold_on(self):
self.tear_down()
def tear_down(self):
super().tear_down()
def get_config(self):
return self.config
def save_default_config(self):
"""Save the default config file to current directory."""
shutil.copy(rel_to_abs("custom_config.yml"), rel_to_abs('custom_config.yml'))
def get_scene_config(self):
return self.scene_config
def save_start(self, file_name):
"""TODO"""
def save_end(self):
"""TODO"""
# self.file_writer.finish()
def embed(self):
super().embed()
# FIXME: Remove method `plot` from EagerModeScene.
def plot(self,
x,
y,
color=None,
width=2,
axes_ratio=0.62,
scale_ratio=None,
num_decimal_places=None,
show_axes=True,
include_tip=True,
x_label='x',
y_label='y'):
"""
params
------
scale_ratio: Scale ratio of coordinate axis. i.e. y / x .
num_decimal_places: Number of significant digits of coordinate_labels.
"""
self.plt.plot(x, y, color, width, axes_ratio, scale_ratio, show_axes, include_tip, num_decimal_places,
x_label, y_label)
def scatter2d(self, x, y, color=BLUE, size=0.05, ax=None):
self._scatter_nd(x, y, color=color, size=size, ax=ax)
def scatter3d(self, x, y, z, color=BLUE, size=0.05, ax=None):
self._scatter_nd(x, y, z, color=color, size=size, ax=ax)
def _scatter_nd(self, x, y, z=None, color=BLUE, size=0.05, ax=None):
scatter_obj = Scatter()
if ax is not None: self._scatter_ax = ax
if z is not None:
self._scatter_ax, mobj = scatter_obj.from_dot_cloud_3d(
x, y, z, size=size, color=color, ax=self._scatter_ax)
else:
self._scatter_ax, mobj = scatter_obj.from_dotcloud(x, y, size=size, color=color, ax=self._scatter_ax)
if self._scatter_ax not in self.mobjects:
self.write(self._scatter_ax)
self.add(mobj)
return self._scatter_ax, mobj
def plot3d(self, x, y, z, width=2, axes_ratio=0.62, show_axes=True):
"""TODO"""
def get_plot_mobj(self):
if self.is_axes_line_gen_ed is False:
self.plt.gen_axes_lines()
self.is_axes_line_gen_ed = True
axes_lines_dict = self.plt.get_axes_lines()
axes_mobj = VGroup(*axes_lines_dict["axes"])
lines_mobj = VGroup(*axes_lines_dict["line"])
return axes_mobj, lines_mobj
def get_plot_axes(self):
return self.plt.get_axes()
def reset_plot(self):
self.plt = Plot()
self.is_axes_line_gen_ed = False
def show_plot(self, play=True, reset=True):
axes_mobj, lines_mobj = self.get_plot_mobj()
pltvgroup = VGroup(axes_mobj, lines_mobj)
if play:
self.write(axes_mobj, run_time=1.5, rate_func=smooth)
self.show_creation(lines_mobj, run_time=1.5, rate_func=smooth)
else:
self.add(pltvgroup)
if reset:
self.plt = Plot()
return pltvgroup
class JupyterModeScene(EagerModeScene):
def __init__(self, write_file=True, **kwargs):
CONFIG.write_file = write_file
super().__init__(**kwargs)
def finish(self):
self.file_writer.finish()
def embed(self):
"""We don't need it in jupyter lab/notebook."""
@property
def video_path(self):
path = Path(self.file_writer.get_movie_file_path())
self.file_writer.finish()
relative_path = path.relative_to(Path.cwd())
return str(relative_path)
def display(self,
width=854,
height=480,
controls=True,
autoplay=True,
loop=True):
return video(self.video_path, width, height, controls, autoplay, loop)
def quit(self):
"""Please use exit() or quit() in jupyter cell."""
pass
|
beidongjiedeguang/manim-express
|
manim_express/backend/manimgl/express/eager.py
|
eager.py
|
py
| 10,559 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "scene.SceneGL",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "config.Size.big",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "config.Size",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "manimlib.config.parse_cli",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "manimlib.config",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "manimlib.mobject",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "utils.onlinetex.tex_to_svg_file_online",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "manimlib.config.get_configuration",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "manimlib.config",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "manimlib.extract_scene.get_scene_config",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "plot.Plot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "manimlib.utils.rate_functions.linear",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "manimlib.utils.rate_functions.linear",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "manimlib.Write",
"line_number": 121,
"usage_type": "argument"
},
{
"api_name": "manimlib.utils.rate_functions.linear",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "manimlib.ShowCreation",
"line_number": 124,
"usage_type": "argument"
},
{
"api_name": "manimlib.utils.rate_functions.linear",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "manimlib.FadeIn",
"line_number": 127,
"usage_type": "argument"
},
{
"api_name": "manimlib.utils.rate_functions.linear",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "manimlib.FadeOut",
"line_number": 130,
"usage_type": "argument"
},
{
"api_name": "re.search",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "manimlib.camera.camera.Camera",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "shutil.copy",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "sparrow.path.rel_to_abs",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "manimlib.BLUE",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "manimlib.BLUE",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "manimlib.BLUE",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "scatter.Scatter",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "manimlib.VGroup",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "manimlib.VGroup",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "plot.Plot",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "manimlib.VGroup",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "manimlib.utils.rate_functions.smooth",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "manimlib.utils.rate_functions.smooth",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "plot.Plot",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "utils.jupyter.video",
"line_number": 332,
"usage_type": "call"
}
] |
21365592875
|
from __future__ import print_function
import sys
import os
from os.path import exists, dirname
import numpy as np
import pickle
import json
import time
import six
if six.PY3:
import _thread as thread
from queue import Queue
else:
import thread
from Queue import Queue
from collections import OrderedDict
from datetime import datetime
from sklearn.metrics import roc_auc_score
import multiprocessing
import paddle.distributed as dist
from glob import glob
from paddle import fluid
from pahelix.utils.splitters import \
RandomSplitter, IndexSplitter, ScaffoldSplitter, RandomScaffoldSplitter
from pahelix.datasets import *
def get_downstream_task_names(dataset_name, data_path):
"""
Get task names of downstream dataset
"""
if dataset_name == 'bace':
task_name = get_default_bace_task_names()
elif dataset_name == 'bbbp':
task_name = get_default_bbbp_task_names()
elif dataset_name == 'clintox':
task_name = get_default_clintox_task_names()
elif dataset_name == 'hiv':
task_name = get_default_hiv_task_names()
elif dataset_name == 'muv':
task_name = get_default_muv_task_names()
elif dataset_name == 'sider':
task_name = get_default_sider_task_names()
elif dataset_name == 'tox21':
task_name = get_default_tox21_task_names()
elif dataset_name == 'toxcast':
task_name = get_default_toxcast_task_names(data_path)
elif dataset_name == 'esol':
return get_default_esol_task_names()
elif dataset_name == 'freesolv':
return get_default_freesolv_task_names()
elif dataset_name == 'lipophilicity':
return get_default_lipophilicity_task_names()
else:
raise ValueError('%s not supported' % dataset_name)
return task_name
def get_dataset(dataset_name, data_path, task_names):
"""Return dataset according to the ``dataset_name``"""
if dataset_name == 'bace':
dataset = load_bace_dataset(data_path, task_names)
elif dataset_name == 'bbbp':
dataset = load_bbbp_dataset(data_path, task_names)
elif dataset_name == 'clintox':
dataset = load_clintox_dataset(data_path, task_names)
elif dataset_name == 'hiv':
dataset = load_hiv_dataset(data_path, task_names)
elif dataset_name == 'muv':
dataset = load_muv_dataset(data_path, task_names)
elif dataset_name == 'sider':
dataset = load_sider_dataset(data_path, task_names)
elif dataset_name == 'tox21':
dataset = load_tox21_dataset(data_path, task_names)
elif dataset_name == 'toxcast':
dataset = load_toxcast_dataset(data_path, task_names)
elif dataset_name == 'pcba':
dataset = load_pcba_dataset(data_path, task_names)
elif dataset_name == 'esol':
dataset = load_esol_dataset(data_path, task_names)
elif dataset_name == 'freesolv':
dataset = load_freesolv_dataset(data_path, task_names)
elif dataset_name == 'lipophilicity':
dataset = load_lipophilicity_dataset(data_path, task_names)
elif dataset_name == 'qm7':
dataset = load_qm7_dataset(data_path, task_names)
elif dataset_name == 'qm8':
dataset = load_qm8_dataset(data_path, task_names)
elif dataset_name == 'qm9':
dataset = load_qm9_dataset(data_path, task_names)
elif dataset_name == 'qm9_gdb':
dataset = load_qm9_gdb_dataset(data_path, task_names)
else:
raise ValueError('%s not supported' % dataset_name)
return dataset
def get_dataset_stat(dataset_name, data_path, task_names):
"""tbd"""
if dataset_name == 'esol':
return get_esol_stat(data_path, task_names)
elif dataset_name == 'freesolv':
return get_freesolv_stat(data_path, task_names)
elif dataset_name == 'lipophilicity':
return get_lipophilicity_stat(data_path, task_names)
elif dataset_name == 'qm7':
return get_qm7_stat(data_path, task_names)
elif dataset_name == 'qm8':
return get_qm8_stat(data_path, task_names)
elif dataset_name == 'qm9':
return get_qm9_stat(data_path, task_names)
elif dataset_name == 'qm9_gdb':
return get_qm9_gdb_stat(data_path, task_names)
else:
raise ValueError(dataset_name)
def create_splitter(split_type):
"""Return a splitter according to the ``split_type``"""
if split_type == 'random':
splitter = RandomSplitter()
elif split_type == 'index':
splitter = IndexSplitter()
elif split_type == 'scaffold':
splitter = ScaffoldSplitter()
elif split_type == 'random_scaffold':
splitter = RandomScaffoldSplitter()
else:
raise ValueError('%s not supported' % split_type)
return splitter
def calc_rocauc_score(labels, preds, valid):
"""compute ROC-AUC and averaged across tasks"""
if labels.ndim == 1:
labels = labels.reshape(-1, 1)
preds = preds.reshape(-1, 1)
rocauc_list = []
for i in range(labels.shape[1]):
c_valid = valid[:, i].astype("bool")
c_label, c_pred = labels[c_valid, i], preds[c_valid, i]
#AUC is only defined when there is at least one positive data.
if len(np.unique(c_label)) == 2:
rocauc_list.append(roc_auc_score(c_label, c_pred))
print('Valid ratio: %s' % (np.mean(valid)))
print('Task evaluated: %s/%s' % (len(rocauc_list), labels.shape[1]))
if len(rocauc_list) == 0:
raise RuntimeError("No positively labeled data available. Cannot compute ROC-AUC.")
return sum(rocauc_list)/len(rocauc_list)
def calc_rmse(labels, preds):
"""tbd"""
return np.sqrt(np.mean((preds - labels) ** 2))
def calc_mae(labels, preds):
"""tbd"""
return np.mean(np.abs(preds - labels))
def exempt_parameters(src_list, ref_list):
"""Remove element from src_list that is in ref_list"""
res = []
for x in src_list:
flag = True
for y in ref_list:
if x is y:
flag = False
break
if flag:
res.append(x)
return res
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
return True
else:
return False
def avg_split_list(listTemp, n):
twoList = [[] for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
return twoList
def load_pkls_to_list(args):
fid, pkl_path = args
if (pkl_path.endswith(".pkl")):
pkl = open(pkl_path, "rb")
data = pickle.load(pkl)
if fid % 10 == 0:
print(" ", fid, end=", ")
return data
def get_pickle_files_list(path):
# traversal directory
files_list = []
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".pkl"):
files_list.append(os.path.join(root, name))
files_list.sort()
return files_list
"""
Load data, build dataset list with InMemoryDataset, each line is the smile of a molecular
"""
def load_smiles_to_dataset(data_path):
"""tbd"""
files = sorted(glob('%s/*' % data_path))
print("files:", files)
data_list = []
for file in files:
with open(file, 'r') as f:
tmp_data_list = [line.strip() for line in f.readlines()]
data_list.extend(tmp_data_list)
dataset = InMemoryDataset(data_list=data_list)
return dataset
def get_steps_per_epoch(args):
"""tbd"""
# add as argument
if args.dataset == 'zinc':
train_num = int(20000000 * (1 - args.test_ratio))
else:
raise ValueError(args.dataset)
# if args.DEBUG:
# train_num = 100
steps_per_epoch = int(train_num / args.batch_size)
if args.distributed:
steps_per_epoch = int(steps_per_epoch / dist.get_world_size())
return steps_per_epoch
|
liyishuilys/SMPT
|
src/utils.py
|
utils.py
|
py
| 7,884 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "six.PY3",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pahelix.utils.splitters.RandomSplitter",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pahelix.utils.splitters.IndexSplitter",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pahelix.utils.splitters.ScaffoldSplitter",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pahelix.utils.splitters.RandomScaffoldSplitter",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "paddle.distributed.get_world_size",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "paddle.distributed",
"line_number": 253,
"usage_type": "name"
}
] |
34221019372
|
"""
UP42 authentication mechanism and base requests functionality
"""
import json
from pathlib import Path
from typing import Dict, List, Optional, Union
import requests
import requests.exceptions
from tenacity import (
Retrying,
wait_fixed,
wait_random_exponential,
stop_after_attempt,
retry_if_exception,
retry_if_exception_type,
retry,
)
from up42.utils import get_logger
logger = get_logger(__name__)
class retry_if_429_error(retry_if_exception):
"""
Adapted from https://github.com/alexwlchan/handling-http-429-with-tenacity
Retry strategy that retries if the exception is an ``HTTPError`` with
a 429 status code.
"""
def __init__(self):
def is_http_429_error(exception):
return (
isinstance(exception, requests.exceptions.HTTPError)
and exception.response.status_code == 429
)
super().__init__(predicate=is_http_429_error)
class Auth:
def __init__(
self,
cfg_file: Union[str, Path] = None,
project_id: str = None,
project_api_key: str = None,
**kwargs,
):
"""
The Auth class handles the authentication with UP42.
Info:
Authentication is possible via the credentials of a specific project (project_id &
project_api_key). To get your **project id** and **project api key**, follow
the instructions in the docs authentication chapter.
Args:
cfg_file: File path to the cfg.json with {project_id: "...", project_api_key: "..."}.
project_id: The unique identifier of the project.
project_api_key: The project-specific API key.
"""
self.cfg_file = cfg_file
self.project_id = project_id
self.project_api_key = project_api_key
self.workspace_id: Optional[str] = None
try:
self.env: str = kwargs["env"]
except KeyError:
self.env = "com"
try:
self.authenticate: bool = kwargs["authenticate"]
except KeyError:
self.authenticate = True
try:
self.retry: bool = kwargs["retry"]
except KeyError:
self.retry = True
try:
self.get_info: bool = kwargs["get_info"]
except KeyError:
self.get_info = True
if self.authenticate:
self._find_credentials()
self._get_token()
self._get_workspace()
logger.info("Authentication with UP42 successful!")
def __repr__(self):
return f"UP42ProjectAuth(project_id={self.project_id}, env={self.env})"
def _find_credentials(self) -> None:
"""
Sources the project credentials from a provided config file, error handling
if no credentials are provided in arguments or config file.
"""
if self.project_id is None or self.project_api_key is None:
if self.cfg_file is None:
raise ValueError(
"Provide project_id and project_api_key via arguments or config file!"
)
# Source credentials from config file.
try:
with open(self.cfg_file) as src:
config = json.load(src)
try:
self.project_id = config["project_id"]
self.project_api_key = config["project_api_key"]
except KeyError as e:
raise ValueError(
"Provided config file does not contain project_id and "
"project_api_key!"
) from e
logger.info("Got credentials from config file.")
except FileNotFoundError as e:
raise ValueError("Selected config file does not exist!") from e
elif all(
v is not None
for v in [self.cfg_file, self.project_id, self.project_api_key]
):
logger.info(
"Credentials are provided via arguments and config file, "
"now using the argument credentials."
)
def _endpoint(self) -> str:
"""Gets the endpoint."""
return f"https://api.up42.{self.env}"
# pylint: disable=assignment-from-no-return
def _get_token(self):
try:
self._get_token_project()
except requests.exceptions.HTTPError as err:
raise ValueError(
"Authentication was not successful, check the provided project credentials."
) from err
def _get_token_project(self) -> None:
"""Project specific authentication via project id and project api key."""
url = (
f"https://{self.project_id}:{self.project_api_key}@api.up42.{self.env}"
f"/oauth/token"
)
payload = "grant_type=client_credentials"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"cache-control": "no-cache",
}
token_response = requests.request("POST", url, data=payload, headers=headers)
token_response.raise_for_status()
token = json.loads(token_response.text)
# pylint: disable=attribute-defined-outside-init
self.token = token["data"]["accessToken"]
def _get_workspace(self) -> None:
"""Get workspace id belonging to authenticated project."""
url = f"https://api.up42.{self.env}/projects/{self.project_id}"
resp = self._request("GET", url)
self.workspace_id = resp["data"]["workspaceId"] # type: ignore
@staticmethod
def _generate_headers(token: str) -> Dict[str, str]:
version = (
Path(__file__)
.resolve()
.parent.joinpath("_version.txt")
.read_text(encoding="utf-8")
)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
"cache-control": "no-cache",
"X-UP42-info": f"python/{version}",
}
return headers
# pylint: disable=dangerous-default-value
@retry(
retry=retry_if_429_error(),
wait=wait_random_exponential(multiplier=0.5, max=180),
reraise=True,
)
def _request_helper(
self, request_type: str, url: str, data: Dict = {}, querystring: Dict = {}
) -> requests.Response:
"""
Helper function for the request, running the actual request with the correct headers.
Args:
request_type: 'GET', 'POST', 'PUT', 'PATCH', 'DELETE'
url: The requests url.
data: The payload, e.g. dictionary with job parameters etc.
querystring: The querystring.
Returns:
The request response.
"""
headers = self._generate_headers(self.token)
if querystring == {}:
response = requests.request(
method=request_type, url=url, data=json.dumps(data), headers=headers
)
else:
response = requests.request(
method=request_type,
url=url,
data=json.dumps(data),
headers=headers,
params=querystring,
)
logger.debug(response)
logger.debug(data)
response.raise_for_status()
return response
def _request(
self,
request_type: str,
url: str,
data: Union[Dict, List] = {},
querystring: Dict = {},
return_text: bool = True,
) -> Union[str, Dict, requests.Response]:
"""
Handles retrying the request and automatically gets a new token if the old
is invalid.
Retry is enabled by default, can be set to False as kwargs in Api().
Args:
request_type: 'GET', 'POST', 'PUT', 'PATCH', 'DELETE'
url: The url to request.
data: The payload, e.g. dictionary with job parameters etc.
querystring: The querystring.
return_text: If true returns response text/json, false returns response.
retry: If False, after 5 minutes and invalid token will return 401
errors.
Returns:
The API response.
"""
try:
if self.retry:
retryer = Retrying(
stop=stop_after_attempt(1), # TODO: Find optimal retry solution
wait=wait_fixed(0),
retry=(
retry_if_exception_type(requests.exceptions.HTTPError)
| retry_if_exception_type(requests.exceptions.ConnectionError)
),
after=self._get_token(),
reraise=True,
)
response = retryer(
self._request_helper, request_type, url, data, querystring
)
else:
response = self._request_helper(request_type, url, data, querystring) # type: ignore
except requests.exceptions.RequestException as err: # Base error class
err_message = json.loads(err.response.text)["error"]
if "code" in err_message:
err_message = f"{err_message['code']} Error - {err_message['message']}!"
logger.error(err_message)
raise requests.exceptions.RequestException(err_message) from err
# Handle response text.
if return_text:
try:
response_text = json.loads(response.text)
except json.JSONDecodeError: # e.g. JobTask logs are str format.
response_text = response.text
# Handle api error messages here before handling it in every single function.
# pylint: disable=no-else-raise
try:
if response_text["error"] is not None and response_text["data"] is None:
raise ValueError(response_text["error"])
else:
return response_text
except (
KeyError,
TypeError,
): # Catalog search, JobTask logs etc. does not have the usual {"data":"",
# "error":""} format.
return response_text
else: # E.g. for DELETE
return response
|
stasSajinDD/up42-py
|
up42/auth.py
|
auth.py
|
py
| 10,421 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "up42.utils.get_logger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tenacity.retry_if_exception",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "requests.exceptions",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "requests.request",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "requests.request",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "tenacity.retry",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "tenacity.wait_random_exponential",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "requests.Response",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "tenacity.Retrying",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "tenacity.stop_after_attempt",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "tenacity.wait_fixed",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "tenacity.retry_if_exception_type",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "tenacity.retry_if_exception_type",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.RequestException",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "requests.Response",
"line_number": 228,
"usage_type": "attribute"
}
] |
22879333885
|
# import socket
# import json
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # host = socket.gethostname()
# port = 9999
# s.connect(("127.0.0.1", port))
# msg = s.recv(1024)
# msg = msg.decode('utf-8')
# print(msg)
# s.close()
import socket
import json
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# host = socket.gethostname()
port = 8888
s.connect(("127.0.0.1", port))
# msg = "hi"
msg = {"a":0.01}
msg = json.dumps(msg)
s.sendall(msg.encode('utf-8'))
s.close()
|
HugoXK/ECE-445-Senior-Design
|
client.py
|
client.py
|
py
| 491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "socket.socket",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
}
] |
35241998177
|
from flask import Flask
#from flask_cors import CORS, cross_origin
from pymongo import MongoClient
connection = MongoClient("mongodb://localhost:27017/")
def create_mongodatabase():
try:
dbnames = connection.database_names()
if 'cloud_native' not in dbnames:
db = connection.cloud_native.users
db_tweets = connection.cloud_native.tweets
db_api = connection.cloud_native.apirelease
db.insert({
"email": "[email protected]",
"id": 33,
"name": "Eric stromberg",
"password": "eric@123",
"username": "eric.strom"
})
db_tweets.insert({
"body": "New blog post, Launch your app with the AWS StartupKit! # AWS",
"id": 18,
"timestamp": "2017-03-11T06:39:40Z",
"tweetedby": "eric.strom"
})
db_api.insert({
"buildtime": "2017-01-01 10:00:00",
"links": "/api/v1/users",
"methods": "get, post, put, delete",
"version": "v1"
})
db_api.insert({
"buildtime": "2017-02-11 10:00:00",
"links": "api/v2/tweets",
"methods": "get, post",
"version": "2017-01-10 10:00:00"
})
print("Database Initialize completed!")
else:
print("Database already Initialized!")
except:
print(" Database creation failed!!")
app = Flask(__name__)
#app.config['SERVER_NAME'] = 'enrg_sy:5000'
#app.secret_key = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KTq'
#CORS(app)
from flask import jsonify
import json
import sqlite3
from flask import make_response
@app.errorhandler(404)
def resource_not_found(error):
return make_response(jsonify({'error': 'Resource not found1!'}), 404)
@app.route("/performance")
def get_perf_counter():
strCount1 = "<div style=""position:relative;width:100%;height:60%"">" \
"<iframe width=""384"" height=""216""" \
" src=""https://insights-embed.newrelic.com/embedded_widget/y8OoxNBFXRR6yDOsQCIDGPlTkEA6LnJi"" frameborder=""0""" \
" style=""position:absolute;width:100%;height:100%""></iframe></div>" \
"<div id = ""we"" style=""position:relative;width:100%;height:60%"">" \
" <iframe width=""384"" height=""216"" " \
" src=""https://insights-embed.newrelic.com/embedded_widget/35HhAcTJ1y3KgDpbnSDmcI8y_5R01b1n"" frameborder=""0""" \
" style=""position:absolute;width:100%;height:100%""></iframe></div>"
return strCount1
@app.route("/api/v1/info")
def home_index():
api_list = []
db = connection.cloud_native.apirelease
for row in db.find():
api_list.append(str(row))
return jsonify({'api_version': api_list}), 200
@app.route('/api/v1/users', methods=['GET'])
def get_users():
return list_users()
def list_users():
api_list=[]
db = connection.cloud_native.users
for row in db.find():
api_list.append(str(row))
return jsonify({'user_list': api_list})
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
return list_user(user_id)
def list_user(user_id):
api_list=[]
db = connection.cloud_native.users
for i in db.find({'id':user_id}):
api_list.append(str(i))
if api_list == []:
abort(404)
return jsonify({'user_details':api_list})
@app.errorhandler(400)
def invalid_request(error):
return make_response(jsonify({'error': 'Bad Request1'}), 400)
@app.errorhandler(401)
def invalid_request1(error):
return make_response(jsonify({'error': 'Bad Request2'}), 400)
@app.errorhandler(405)
def invalid_request2(error):
return make_response(jsonify({'error': 'Bad Request5'}), 400)
@app.errorhandler(403)
def invalid_request3(error):
return make_response(jsonify({'error': 'Bad Request4'}), 400)
from flask import request, abort
import random
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'username' in request.json or not \
'email' in request.json or not 'password' in request.json:
abort(400)
user = {
'username': request.json['username'],
'email': request.json['email'],
'name': request.json['name'],
'password': request.json['password'],
'id': random.randint(1, 1000)
}
return jsonify({'status': add_user(user)}), 201
def add_user(new_user):
api_list=[]
print(new_user)
db = connection.cloud_native.users
user = db.find({'$or':[{"username":new_user['username']}, {"email":new_user['email']}]})
for i in user:
print(str(i))
api_list.append(str(i))
if api_list == []:
db.insert(new_user)
return "Succes"
else:
abort(409)
@app.route('/api/v1/users', methods=['DELETE'])
def delete_user():
if not request.json or not 'username' in request.json:
abort(400)
user = request.json['username']
return jsonify({'status': del_user(user)}), 200
def del_user(del_user):
db = connection.cloud_native.users
api_list = []
for i in db.find({'username':del_user}):
api_list.append(str(i))
if api_list == []:
abort(404)
else:
db.remove({'username':del_user})
return "Succes"
@app.route('/api/v1/users/<int:user_id>', methods=['PUT'])
def update_user(user_id):
print(user_id)
user = {}
user['id'] = user_id
key_list = request.json.keys()
for i in key_list:
user[i] = request.json[i]
return jsonify({'status': upd_user(user)}), 200
def upd_user(user):
api_list=[]
print(user)
db_user = connection.cloud_native.users
users = db_user.find_one({"id":user['id']})
for i in users:
api_list.append(str(i))
if api_list == []:
abort(409)
else:
db_user.update({'id':user['id']},{'$set': user},upsert=False)
return "Succes"
@app.route('/api/v2/tweets', methods=['GET'])
def get_tweets():
return list_tweets()
def list_tweets():
api_list = []
db = connection.cloud_native.tweets
for row in db.find():
api_list.append(str(row))
return jsonify({'tweets_list': api_list})
import time
@app.route('/api/v2/tweets', methods=['POST'])
def add_tweets():
user_tweet = {}
if not request.json or not 'username' in request.json or not 'Body' in request.json:
abort(400)
user_tweet['id'] = request.json['id']
user_tweet['tweetedby'] = request.json['username']
user_tweet['body'] = request.json['Body']
user_tweet['created_at'] = time.strftime(
"%Y-%m-%dT%H:%M:%SZ", time.gmtime())
print(user_tweet)
return add_tweet(user_tweet)
def add_tweet(new_tweets):
api_list = []
db_users = connection.cloud_native.users
db_tweets = connection.cloud_native.tweets
print(new_tweets)
users = db_users.find({"username":new_tweets['tweetedby']})
for user in users:
api_list.append(str(user))
if api_list == []:
abort(400)
else:
db_tweets.insert(new_tweets)
return "Succes"
#sdfsd
@app.route('/api/v2/tweets/<int:id>', methods=['GET'])
def get_tweet(id):
return list_tweet(id)
def list_tweet(user_id):
db = connection.cloud_native.tweets
api_list = []
tweets = db.find({'id':user_id})
for tweet in tweets:
api_list.append(str(tweet))
if api_list == []:
abort(404)
else:
return jsonify({"tweet":api_list})
from flask import render_template, make_response, url_for, request, redirect, session
def sumSessionCounter():
try:
session['counter'] += 1
except KeyError:
session['counter'] = 1
@app.route('/')
def main():
sumSessionCounter()
return render_template('main.html')
@app.route('/addname')
def addname():
if request.args.get('yourname'):
session['name'] = request.args.get('yourname')
# And then redirect the user to the main page
return redirect(url_for('main'))
else:
return render_template('addname.html', session=session)
@app.route('/adduser')
def adduser():
return render_template('adduser.htm')
@app.route('/addtweets')
def addtweets():
return render_template('addtweets.htm')
@app.route('/clear')
def clearsession():
# Clear the session
session.clear()
# Redirect the user to the main page
return redirect(url_for('main'))
@app.route('/set_cookie')
def cookie_insertion():
redirect_to_main = redirect('/')
response = app.make_response(redirect_to_main)
response.set_cookie('cookie_name2', value='qwqwqw')
return response
@app.route('/index')
def index():
return render_template('index.html')
if __name__ == "__main__":
create_mongodatabase()
app.run(host='0.0.0.0', port=5000, debug=True)
|
AnatolyS1/Cloud-Native-Python
|
app.py
|
app.py
|
py
| 8,921 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "flask.request.json.keys",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "flask.session.clear",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 327,
"usage_type": "call"
}
] |
72740356347
|
import subprocess
from dataclasses import dataclass
from typing import Dict
import json
from src.config import LOGGER
@dataclass
class Server:
server_name: str
server_id: int
class SpeedTestGateway:
@classmethod
def get_speed_test_result(cls, server_id: int) -> Dict:
command = [
"speedtest",
"--format=json-pretty",
"--progress=no",
"--accept-license",
"--accept-gdpr",
f"--server-id={server_id}",
]
try:
console_output = subprocess.check_output(command, timeout=180)
return cls.parse_json(console_output=console_output)
except subprocess.CalledProcessError as exc:
LOGGER.error("Process error", extra={"server_id": server_id, "exc": str(exc)})
except subprocess.TimeoutExpired:
LOGGER.error("Time out error", extra={"server_id": server_id})
@staticmethod
def parse_json(console_output: bytes) -> Dict:
try:
return json.loads(console_output)
except ValueError:
raise subprocess.CalledProcessError
|
galloramiro/internet-connection-log
|
src/speed_test_gateway.py
|
speed_test_gateway.py
|
py
| 1,128 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dataclasses.dataclass",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "subprocess.check_output",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "src.config.LOGGER.error",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "src.config.LOGGER",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "subprocess.TimeoutExpired",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "src.config.LOGGER.error",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "src.config.LOGGER",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 35,
"usage_type": "name"
}
] |
19703597779
|
# check the costs after every time consuming all examples
# usage: python check_costs.py
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
import network
import numpy as np
import matplotlib.pyplot as plt
net = network.Network([784, 30, 10])
net.set_check_cost_inside_SGD()
net.SGD(training_data, 5, 10, 3.0, test_data=test_data)
# draw a picture
xpoints = []
ypoints = []
for x, y in net.costs:
xpoints.append(x)
ypoints.append(y)
plt.plot(xpoints, ypoints, marker = 'o', mec = 'r', mfc = 'r')
plt.xlabel('# of input')
plt.ylabel('average cost')
plt.show()
|
hzget/machine-learning
|
dl_tutorial/check_costs.py
|
check_costs.py
|
py
| 617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mnist_loader.load_data_wrapper",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "network.Network",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
}
] |
26171304664
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from datetime import datetime
class MLPClassifier(nn.Module):
def __init__(self):
super().__init__()
self.MLP = nn.Sequential(
nn.Linear(10000, 2000),
nn.ReLU(),
nn.Linear(2000, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 5)
)
def forward(self, x):
x = self.MLP(x)
return x
def predict(self, X):
self.to('cpu')
samples = torch.from_numpy(X).float()
with torch.no_grad():
outputs = self(samples)
predicted = torch.argmax(outputs.data, axis=1)
return predicted
def fit(self, train_dataset, batch_size=128, num_epochs=5, PATH=None, device='cpu'):
# Multi-layer Perceptron classifier
criterion = CrossEntropyLoss()
optimizer = Adam(self.parameters(), lr=0.001)
trainloader = DataLoader(train_dataset, batch_size=batch_size)
losses = []
running_loss = 0.0
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(trainloader, start=0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = self(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if epoch % 1 == 0 and i==0: # print every epoch
print(f'[{epoch+1}] loss: {running_loss:.6f}')
losses.append((epoch, running_loss))
running_loss = 0.0
if not PATH:
t = datetime.now().strftime('%d-%m-%Y_%H-%M-%S')
PATH = f'models/MLP_{t}.pth'
torch.save(self.state_dict(), PATH)
return self
|
Charlie-Bell/stack-overflow-classifier
|
src/MLP.py
|
MLP.py
|
py
| 2,284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 70,
"usage_type": "call"
}
] |
30665509176
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('about/', views.about, name='about'),
path('cows/', views.cows_index, name='index'),
path('cows/<int:cow_id>/', views.cows_detail, name='detail'),
path('cows/create/', views.CowCreate.as_view(), name='cows_create'),
path('cows/<int:pk>/update/', views.CowUpdate.as_view(), name='cows_update'),
path('cows/<int:pk>/delete/', views.CowDelete.as_view(), name='cows_delete'),
path('cows/<int:cow_id>/add_feeding/', views.add_feeding, name='add_feeding'),
path('cows/<int:cow_id>/assoc_toy/<int:toy_id>/', views.assoc_toy, name='assoc_toy'),
path('cows/<int:cow_id>/unassoc_toy/<int:toy_id>/', views.unassoc_toy, name='unassoc_toy'),
path('toys/', views.ToyList.as_view(), name='toys_index'),
path('toys/<int:pk>/', views.ToyDetail.as_view(), name='toys_detail'),
path('toys/create/', views.ToyCreate.as_view(), name='toys_create'),
path('toys/<int:pk>/update/', views.ToyUpdate.as_view(), name='toys_update'),
path('toys/<int:pk>/delete/', views.ToyDelete.as_view(), name='toys_delete'),
]
|
jessmucklow/cowcollector
|
main_app/urls.py
|
urls.py
|
py
| 1,122 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
}
] |
28395227304
|
import torch
import torch.nn as nn
class ContentLoss(nn.Module):
def __init__(self, target):
super(ContentLoss, self).__init__()
# 必须要用detach来分离出target,否则会计算目标值的梯度
self.target = target.detach()
self.criterion = nn.MSELoss()
def forward(self, inputs):
self.loss = self.criterion(inputs, self.target)
return inputs
class StyleLoss(nn.Module):
def __init__(self, target):
super(StyleLoss, self).__init__()
self.gram = GramMatrix()
self.target = self.gram(target).detach()
self.criterion = nn.MSELoss()
def forward(self, inputs):
self.G = self.gram(inputs)
self.loss = self.criterion(self.G, self.target)
return inputs
class GramMatrix(nn.Module):
def forward(self, inputs):
a, b, c, d = inputs.size()
features = inputs.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
|
cwpeng-cn/style-transfer
|
losses.py
|
losses.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.mm",
"line_number": 34,
"usage_type": "call"
}
] |
18187145317
|
# -----------------------------
# pluieOS source code
# made with heart by dadoum
# -----------------------------
# Partly based on rainbox
# -----------------------------
import subprocess
import sys
import time
import signal
import os
import traceback
import matplotlib as matplotlib
import numpy
import sh
import distutils.core
import urllib.request
from pprint import pprint
from PIL import Image, ImageDraw, ImageFont
from pluieAPI import Application, View, width, height
import platform
# import bakebit_128_64_oled as display
app_path = os.path.join(os.getenv("HOME"), "Pluvieuses applications") # POA format is a compressed format that means pluieOS Application, but it corresponds to a Pluvieuses application (or correctly application pluvieuse, but we will stay simple)
# Launcher Application is an application like any application,
# Except it will never be killed until shutdown, and that it is not in the standard folder
class LauncherApp(Application):
name = "Launcher"
def __init__(self):
super().__init__()
def run(self):
sub = os.listdir(app_path)
import json
# For each app
applications = []
jsons = []
dirs = []
for di in sub:
d = os.path.join(app_path, di)
if os.path.isdir(d):
# We take its app.json,
app_json = os.path.join(d, "app.json")
with open(app_json) as f:
content = f.read()
# Retrieve some important values
parsed_json = json.loads(content)
script = os.path.join(d, parsed_json["script"])
name = parsed_json["name"]
entry_point = parsed_json["entry_point"]
# And import the entry point
import importlib.util
spec = importlib.util.spec_from_file_location(os.path.splitext(parsed_json["script"])[0], script)
app = importlib.util.module_from_spec(spec)
spec.loader.exec_module(app)
# This is the application class
app_class = getattr(app, entry_point)
applications.append(app_class)
jsons.append(parsed_json)
dirs.append(d)
collectionView = AppCollectionView(applications, jsons)
while True:
btn = collectionView.run()
if not collectionView.app_avail:
print("Cannot go further, shutdown.")
btn = 3
if btn == 1:
collectionView.app_number += 1
collectionView.reset()
elif btn == 2:
selected_app = applications[collectionView.app_number]
appli = selected_app()
appli.run(dirs[collectionView.app_number])
elif btn == 3:
print("Shutdown...")
if not os.uname().machine == "x86_64":
os.system('systemctl poweroff')
break
return 0
class AppCollectionView(View):
app_number = 0
app_avail = True
app_list = []
app_jsons = []
def __init__(self, app_list, app_jsons):
super().__init__("Applications", "Next", "Select", "Shutdown")
self.app_list = app_list
self.app_jsons = app_jsons
return
def draw(self, draw):
if len(self.app_list) == 0:
w, h = draw.textsize("No any app installed\nConnect on your computer\nand install apps !")
draw.text(((width - w) / 2, (height - h) / 2), "No any app installed\nConnect on your computer\nand install apps !", 255)
self.app_avail = False
return
self.app_number %= len(self.app_list)
app_name = str(self.app_jsons[self.app_number]["name"])
w, h = draw.textsize(app_name)
app_icon = os.path.join(app_path, self.app_jsons[self.app_number]["name"], "icon.png")
img = Image.open(app_icon)
img_w, img_h = img.size
from pluieAPI import image # Bad practice, do that when it is not possible to do in another way
image.paste(img, (5, int((height - (img_h + (h / 2))) / 2)))
draw.text(((width - w - 5), (height - h) / 2), app_name, 255)
return
def launch():
trace = ""
try:
launcher = LauncherApp()
exitCode = launcher.run()
except:
trace = ""
try:
trace = traceback.format_exc(-1)
exitCode = 2
except:
exitCode = 1
if exitCode != 0:
print("Launcher crashed !")
from pluieAPI import draw, image
draw.rectangle((0, 0, width, height), 0)
draw.text((0, 0), "Launcher crashed :(", 255)
if exitCode == 2:
w, h = draw.textsize("Launcher crashed!")
print(trace)
draw.text((0, h + 1), trace, 255, font=ImageFont.truetype('DejaVuSansMono.ttf', 8))
if os.uname().machine == "x86_64":
image.save("./debug.png")
launch()
|
Dadoum/pluieOS
|
pluieLauncher.py
|
pluieLauncher.py
|
py
| 4,248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pluieAPI.Application",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "importlib.util.util.spec_from_file_location",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "importlib.util.util",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "importlib.util",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "importlib.util.util.module_from_spec",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "importlib.util.util",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "importlib.util",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "os.uname",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pluieAPI.View",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "pluieAPI.width",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "pluieAPI.height",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "pluieAPI.image.paste",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pluieAPI.image",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pluieAPI.height",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pluieAPI.width",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "pluieAPI.height",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "{'json': 'json', 'importlib.util': 'importlib.util'}",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pluieAPI.draw.rectangle",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pluieAPI.draw",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pluieAPI.width",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pluieAPI.height",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pluieAPI.draw.text",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pluieAPI.draw",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "pluieAPI.draw.textsize",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pluieAPI.draw",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "pluieAPI.draw.text",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "pluieAPI.draw",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "os.uname",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pluieAPI.image.save",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pluieAPI.image",
"line_number": 144,
"usage_type": "name"
}
] |
24213867050
|
from django.contrib import admin
from django.urls import path, include
from . import views
#应用的名称
app_name = 'userprofile'
urlpatterns = [
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='logout'),
path('register/', views.user_register, name='register'),
#用户信息
path('edit/<int:id>/', views.user_edit, name='edit'),
]
|
blackjibert/Blog
|
myblog/userprofile/urls.py
|
urls.py
|
py
| 392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
}
] |
25091116397
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 13:14:13 2019
@author: jordan loll
Creating a cards library / deck
"""
import random
from PIL import Image, ImageDraw
#Local Path
local_path =r"C:\Users\jorda\Documents\PythonPrograms\Questar\Git_Stuff\Quest-Game"
#local_path = r"C:\Users\xTheC\Desktop\Quest\Quest-Game"
image_path = local_path+"\Images"
# Create the deck
# class for format of each card # weapons/armor need a 'slot' on each character
class card:
def __init__(self, n = "none", i = 'none', t = "none", st = "0", d = "none"):
self.title = n
self.image = i
self.type = t
self.stats = st
self.desc = d
# Weapons, Armor, and any other cards types we need
# Perhaps make a class for each type of card instead of one generic form
#images
im_sw = Image.open(image_path+"\sword.png")
# Weapons
sword = card("Sword", im_sw, "1-Handed Weapon", 10, "Sharp Steel")
spear = card("Spear", "image", "2-Handed Weapon", 30, "Deadly at a Distance")
# Armor
shield = card("Kite Shield", "shield image", "1-Handed", 20, "Impenetrable")
#print(sword.title, sword.type)
#print(sword.stats, shield.desc, spear.image)
sword.image.show()
# Jordan is the best
|
scottwedge/Quest-Game
|
Old Files/cards.py
|
cards.py
|
py
| 1,214 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 33,
"usage_type": "name"
}
] |
33147997203
|
from covid_constants_and_util import *
import geopandas as gpd
import statsmodels.api as sm
import json
import copy
from fbprophet import Prophet
from collections import Counter
import re
import h5py
import ast
from shapely import wkt
from scipy.stats import pearsonr
import fiona
import geopandas
import csv
import os
from geopandas.tools import sjoin
import time
try:
cast_to_datetime = [datetime.datetime.strptime(s, '%Y-%m-%d') for s in ALL_WEEKLY_STRINGS]
except:
print(ALL_WEEKLY_STRINGS)
raise Exception("At least one weekly string is badly formatted.")
def load_social_distancing_metrics(datetimes, version='v2'):
"""
Given a list of datetimes, load social distancing metrics for those days.
load_social_distancing_metrics(helper.list_datetimes_in_range(datetime.datetime(2020, 3, 1),
datetime.datetime(2020, 3, 7)))
"""
print("Loading social distancing metrics for %i datetimes; using version %s" % (len(datetimes), version))
t0 = time.time()
daily_cols = ['device_count', 'distance_traveled_from_home',
'completely_home_device_count', 'full_time_work_behavior_devices']
concatenated_d = None
for dt in datetimes:
if version == 'v1':
path = os.path.join(PATH_TO_SDM_V1, dt.strftime('%Y/%m/%d/%Y-%m-%d-social-distancing.csv.gz'))
elif version == 'v2':
path = os.path.join(PATH_TO_SDM_V2, dt.strftime('%Y/%m/%d/%Y-%m-%d-social-distancing.csv.gz'))
else:
raise Exception("Version should be v1 or v2")
if os.path.exists(path):
social_distancing_d = pd.read_csv(path, usecols=['origin_census_block_group'] + daily_cols)[['origin_census_block_group'] + daily_cols]
social_distancing_d.columns = ['census_block_group'] + ['%i.%i.%i_%s' %
(dt.year, dt.month, dt.day, a) for a in daily_cols]
old_len = len(social_distancing_d)
social_distancing_d = social_distancing_d.drop_duplicates(keep=False)
n_dropped_rows = old_len - len(social_distancing_d)
assert len(set(social_distancing_d['census_block_group'])) == len(social_distancing_d)
assert(1.*n_dropped_rows/old_len < 0.002) # make sure not very many rows are duplicates.
if version == 'v2':
assert n_dropped_rows == 0 # they fixed the problem in v2.
elif version == 'v1':
assert n_dropped_rows > 0 # this seemed to be a problem in v1.
if concatenated_d is None:
concatenated_d = social_distancing_d
else:
concatenated_d = pd.merge(concatenated_d,
social_distancing_d,
how='outer',
validate='one_to_one',
on='census_block_group')
else:
raise Exception('Missing Social Distancing Metrics for %s' % dt.strftime('%Y/%m/%d'))
if concatenated_d is None: # could not find any of the dates
return concatenated_d
print("Total time to load social distancing metrics: %2.3f seconds; total rows %i" %
(time.time() - t0, len(concatenated_d)))
return concatenated_d
def annotate_with_demographic_info_and_write_out_in_chunks(full_df, just_testing=False):
"""
Annotate the Safegraph POI data with Census data and other useful POI data.
"""
full_df['safegraph_place_id'] = full_df.index
full_df.index = range(len(full_df))
# merge with areas.
safegraph_areas = pd.read_csv(PATH_TO_SAFEGRAPH_AREAS)
print("Prior to merging with safegraph areas, %i rows" % len(full_df))
safegraph_areas = safegraph_areas[['safegraph_place_id', 'area_square_feet']].dropna()
safegraph_areas.columns = ['safegraph_place_id', 'safegraph_computed_area_in_square_feet']
full_df = pd.merge(full_df, safegraph_areas, how='inner', on='safegraph_place_id', validate='one_to_one')
print("After merging with areas, %i rows" % len(full_df))
# map to demo info. The basic class we use here is CensusBlockGroups, which processes the Census data.
print("Mapping SafeGraph POIs to demographic info, including race and income.")
gdb_files = ['ACS_2017_5YR_BG_51_VIRGINIA.gdb'] if just_testing else None
cbg_mapper = CensusBlockGroups(base_directory=PATH_FOR_CBG_MAPPER, gdb_files=gdb_files)
pop_df = load_dataframe_to_correct_for_population_size()
chunksize = 100000
annotated_df = []
for chunk_number in range(len(full_df) // chunksize + 1):
print("******************Annotating chunk %i" % chunk_number)
start, end = chunk_number * chunksize, min((chunk_number + 1) * chunksize, len(full_df))
d = full_df.iloc[start:end].copy()
# Now annotate each POI on the basis of its location.
mapped_pois = cbg_mapper.get_demographic_stats_of_points(d['latitude'].values,
d['longitude'].values,
desired_cols=['p_white', 'p_asian', 'p_black', 'median_household_income', 'people_per_mile'])
mapped_pois['county_fips_code'] = mapped_pois['county_fips_code'].map(lambda x:int(x) if x is not None else x)
mapped_pois.columns = ['poi_lat_lon_%s' % a for a in mapped_pois.columns]
for c in mapped_pois.columns:
d[c] = mapped_pois[c].values
# Then annotate with demographic data based on where visitors come from (visitor_home_cbgs).
d = aggregate_visitor_home_cbgs_over_months(d, population_df=pop_df)
block_group_d = cbg_mapper.block_group_d.copy()
block_group_d['id_to_match_to_safegraph_data'] = block_group_d['GEOID'].map(lambda x:x.split("US")[1]).astype(int)
block_group_d = block_group_d[['id_to_match_to_safegraph_data', 'p_black', 'p_white', 'p_asian', 'median_household_income']]
block_group_d = block_group_d.dropna()
for col in block_group_d:
if col == 'id_to_match_to_safegraph_data':
continue
cbg_dict = dict(zip(block_group_d['id_to_match_to_safegraph_data'].values, block_group_d[col].values))
d['cbg_visitor_weighted_%s' % col] = d['aggregated_cbg_population_adjusted_visitor_home_cbgs'].map(lambda x:compute_weighted_mean_of_cbg_visitors(x, cbg_dict))
# see how well we did.
for c in [a for a in d.columns if 'poi_lat_lon_' in a or 'cbg_visitor_weighted' in a]:
print("Have data for %s for fraction %2.3f of people" % (c, 1 - pd.isnull(d[c]).mean()))
d.to_hdf(os.path.join(ANNOTATED_H5_DATA_DIR, CHUNK_FILENAME) ,f'chunk_{chunk_number}', mode='a', complevel=2)
annotated_df.append(d)
annotated_df = pd.concat(annotated_df)
annotated_df.index = range(len(annotated_df))
return annotated_df
def load_date_col_as_date(x):
# we allow this to return None because sometimes we want to filter for cols which are dates.
try:
year, month, day = x.split('.') # e.g., '2020.3.1'
return datetime.datetime(int(year), int(month), int(day))
except:
return None
def get_h5_filepath(load_backup):
backup_string = 'BACKUP_' if load_backup else ''
filepath = os.path.join(ANNOTATED_H5_DATA_DIR, backup_string + CHUNK_FILENAME)
return filepath
def load_chunk(chunk, load_backup=False):
"""
Load a single 100k chunk from the h5 file; chunks are randomized and so should be reasonably representative.
"""
filepath = get_h5_filepath(load_backup=load_backup)
print("Reading chunk %i from %s" % (chunk, filepath))
d = pd.read_hdf(filepath, key=f'chunk_{chunk}')
date_cols = [load_date_col_as_date(a) for a in d.columns]
date_cols = [a for a in date_cols if a is not None]
print("Dates range from %s to %s" % (min(date_cols), max(date_cols)))
return d
def load_multiple_chunks(chunks, load_backup=False, cols=None):
"""
Loads multiple chunks from the h5 file. Currently quite slow; quicker if only a subset of columns are kept.
Use the parameters cols to specify which columns to keep; if None then all are kept.
"""
dfs = []
for i in chunks:
t0 = time.time()
chunk = load_chunk(i, load_backup=load_backup)
print("Loaded chunk %i in %2.3f seconds" % (i, time.time() - t0))
if cols is not None:
chunk = chunk[cols]
dfs.append(chunk)
t0 = time.time()
df = pd.concat(dfs)
print("Concatenated %d chunks in %2.3f seconds" % (len(chunks), time.time() - t0))
return df
def load_all_chunks(cols=None, load_backup=False):
"""
Load all 100k chunks from the h5 file. This currently takes a while.
"""
filepath = get_h5_filepath(load_backup=load_backup)
f = h5py.File(filepath, 'r')
chunks = sorted([int(a.replace('chunk_', '')) for a in list(f.keys())])
f.close()
assert chunks == list(range(max(chunks) + 1))
print("Loading all chunks: %s" % (','.join([str(a) for a in chunks])))
return load_multiple_chunks(chunks, cols=cols, load_backup=load_backup)
def load_patterns_data(month=None, year=None, week_string=None, extra_cols=[], just_testing=False):
"""
Load in Patterns data for a single month and year, or for a single week. (These options are mutually exclusive).
Use extra_cols to define non-default columns to load.
just_testing is a flag to allow quicker prototyping; it will load only a subset of the data.
"""
change_by_date = ['visitor_home_cbgs', 'visitor_country_of_origin',
'distance_from_home', 'median_dwell', 'bucketed_dwell_times'] # fields that are time-varying
if month is not None and year is not None:
month_and_year = True
assert week_string is None
assert month in range(1, 13)
assert year in [2017, 2018, 2019, 2020]
if (year == 2019 and month == 12) or (year == 2020 and month in [1, 2]):
upload_date_string = '2020-03-16' # we originally downloaded files in two groups; load them in the same way.
else:
upload_date_string = '2019-12-12'
month_and_year_string = '%i_%02d-%s' % (year, month, upload_date_string)
base_dir = os.path.join(UNZIPPED_DATA_DIR, 'SearchofAllRecords-CORE_POI-GEOMETRY-PATTERNS-%s' % month_and_year_string)
print("Loading all files from %s" % base_dir)
filenames = [a for a in os.listdir(base_dir) if
(a.startswith('core_poi-geometry-patterns-part') and a.endswith('.csv.gz'))]
# make sure we're not ignoring any files we don't expect to ignore.
assert all([a in ['brand_info.csv', 'visit_panel_summary.csv', 'README.txt', 'home_panel_summary.csv']
for a in os.listdir(base_dir) if a not in filenames])
if just_testing:
filenames = filenames[:2]
print("Number of files to load: %i" % len(filenames))
full_paths = [os.path.join(base_dir, a) for a in filenames]
x = load_csv_possibly_with_dask(full_paths, use_dask=True, usecols=['safegraph_place_id',
'parent_safegraph_place_id',
'location_name',
'latitude',
'longitude',
'city',
'region',
'postal_code',
'top_category',
'sub_category',
'naics_code',
"polygon_wkt",
"polygon_class",
'visits_by_day',
'visitor_home_cbgs',
'visitor_country_of_origin',
'distance_from_home',
'median_dwell',
'bucketed_dwell_times'] +
extra_cols,
dtype={'naics_code': 'float64'})
print("Fraction %2.3f of NAICS codes are missing" % pd.isnull(x['naics_code']).mean())
x = x.rename(columns={k: f'{year}.{month}.{k}' for k in change_by_date})
else:
# weekly patterns data.
month_and_year = False
assert month is None and year is None
assert week_string in ALL_WEEKLY_STRINGS
filepath = os.path.join(PATH_TO_WEEKLY_PATTERNS, '%s-weekly-patterns.csv.gz' % week_string)
# Filename is misleading - it is really a zipped file.
# Also, we're missing some columns that we had before, so I think we're just going to have to join on SafeGraph ID.
x = pd.read_csv(filepath, escapechar='\\', compression='gzip', nrows=10000 if just_testing else None, usecols=['safegraph_place_id',
'visits_by_day',
'visitor_home_cbgs',
'visitor_country_of_origin',
'distance_from_home',
'median_dwell',
'bucketed_dwell_times',
'date_range_start',
'visits_by_each_hour'])
x['offset_from_gmt'] = x['date_range_start'].map(lambda x:x.split('-')[-1])
assert x['date_range_start'].map(lambda x:x.startswith(week_string + 'T' + '00:00:00')).all() # make sure date range starts where we expect for all rows.
print("Offset from GMT value counts")
print(x['offset_from_gmt'].value_counts())
del x['date_range_start']
x = x.rename(columns={k: f'{week_string}.{k}' for k in change_by_date})
print("Prior to dropping rows with no visits by day, %i rows" % len(x))
x = x.dropna(subset=['visits_by_day'])
x['visits_by_day'] = x['visits_by_day'].map(json.loads) # convert string lists to lists.
if month_and_year:
days = pd.DataFrame(x['visits_by_day'].values.tolist(),
columns=[f'{year}.{month}.{day}'
for day in range(1, len(x.iloc[0]['visits_by_day']) + 1)])
else:
year = int(week_string.split('-')[0])
month = int(week_string.split('-')[1])
start_day = int(week_string.split('-')[2])
start_datetime = datetime.datetime(year, month, start_day)
all_datetimes = [start_datetime + datetime.timedelta(days=i) for i in range(7)]
days = pd.DataFrame(x['visits_by_day'].values.tolist(),
columns=['%i.%i.%i' % (dt.year, dt.month, dt.day) for dt in all_datetimes])
# Load hourly data as well.
# Per SafeGraph documentation:
# Start time for measurement period in ISO 8601 format of YYYY-MM-DDTHH:mm:SS±hh:mm
# (local time with offset from GMT). The start time will be 12 a.m. Sunday in local time.
x['visits_by_each_hour'] = x['visits_by_each_hour'].map(json.loads) # convert string lists to lists.
assert all_datetimes[0].strftime('%A') == 'Sunday'
hours = pd.DataFrame(x['visits_by_each_hour'].values.tolist(),
columns=[f'hourly_visits_%i.%i.%i.%i' % (dt.year, dt.month, dt.day, hour)
for dt in all_datetimes
for hour in range(0, 24)])
days.index = x.index
x = pd.concat([x, days], axis=1)
if not month_and_year:
assert list(x.index) == list(range(len(x)))
assert (hours.index.values == x.index.values).all()
hours.index = x.index
old_len = len(x)
x = pd.concat([x, hours], axis=1)
assert len(x) == old_len
x = x.drop(columns=['visits_by_each_hour'])
# The hourly data has some spurious spikes
# related to the GMT-day boundary which we have to correct for.
date_cols = [load_date_col_as_date(a) for a in x.columns]
date_cols = [a for a in date_cols if a is not None]
assert len(date_cols) == 7
if week_string >= '2020-03-15': # think this is because of DST. Basically, these are the timezone strings we look for and correct; they shift at DST.
hourly_offsets = [4, 5, 6, 7]
else:
hourly_offsets = [5, 6, 7, 8]
hourly_offset_strings = ['0%i:00' % hourly_offset for hourly_offset in hourly_offsets]
percent_rows_being_corrected = (x['offset_from_gmt'].map(lambda a:a in hourly_offset_strings).mean() * 100)
print("%2.3f%% of rows have timezones that we spike-correct for." % percent_rows_being_corrected)
assert percent_rows_being_corrected > 99 # make sure we're correcting almost all rows
# have to correct for each timezone separately.
for hourly_offset in hourly_offsets:
idxs = x['offset_from_gmt'] == ('0%i:00' % hourly_offset)
for date_col in date_cols: # loop over days.
date_string = '%i.%i.%i' % (date_col.year, date_col.month, date_col.day)
# not totally clear which hours are messed up - it's mainly one hour, but the surrounding ones look weird too -
# or what the best way to interpolate is, but this yields plots which look reasonable.
for hour_to_correct in [24 - hourly_offset - 1,
24 - hourly_offset,
24 - hourly_offset + 1]:
# interpolate using hours fairly far from hour_to_correct to avoid pollution.
if hour_to_correct < 21:
cols_to_use = ['hourly_visits_%s.%i' % (date_string, a) for a in [hour_to_correct - 3, hour_to_correct + 3]]
else:
# Use smaller offset so we don't have hours >= 24. This technically overlaps with earlier hours,
# but I think it should be okay because they will already have been corrected.
cols_to_use = ['hourly_visits_%s.%i' % (date_string, a) for a in [hour_to_correct - 2, hour_to_correct + 2]]
assert all([col in x.columns for col in cols_to_use])
x.loc[idxs, 'hourly_visits_%s.%i' % (date_string, hour_to_correct)] = x.loc[idxs, cols_to_use].mean(axis=1)
del x['offset_from_gmt']
x = x.set_index('safegraph_place_id')
x = x.drop(columns=['visits_by_day'])
if month_and_year:
print("%i rows loaded for month and year %s" % (len(x), month_and_year_string))
else:
print("%i rows loaded for week %s" % (len(x), week_string))
return x
def load_weekly_patterns_v2_data(week_string, cols_to_keep, expand_hourly_visits=True):
"""
Load in Weekly Patterns V2 data for a single week.
If week_string <= '2020-06-15': we are using the earlier version of Weekly Pattern v2 in /weekly_20190101_20200615/, and
week_string denotes the first day of the week.
Else: we are using the later version of Weekly Patterns v2 in /weekly_20200615_20201005/, and week_string denotes
the day this update was released.
"""
ts = time.time()
elements = week_string.split('-')
assert len(elements) == 3
week_datetime = datetime.datetime(int(elements[0]), int(elements[1]), int(elements[2]))
cols_to_load = cols_to_keep.copy()
must_load_cols = ['date_range_start', 'visits_by_each_hour'] # required for later logic
for k in must_load_cols:
if k not in cols_to_load:
cols_to_load.append(k)
if week_string <= '2020-06-15':
path_to_csv = os.path.join(CURRENT_DATA_DIR, 'weekly_20190101_20200615/main-file/%s-weekly-patterns.csv.gz' % week_string)
assert os.path.isfile(path_to_csv)
print('Loading from %s' % path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, use_dask=True, usecols=cols_to_load, dtype={'poi_cbg':'float64'})
start_day_string = week_string
start_datetime = week_datetime
else:
path_to_weekly_dir = os.path.join(CURRENT_DATA_DIR, 'weekly_20200615_20201028/patterns/%s/' % week_datetime.strftime('%Y/%m/%d'))
inner_folder = os.listdir(path_to_weekly_dir)
assert len(inner_folder) == 1 # there is always a single folder inside the weekly folder
path_to_patterns_parts = os.path.join(path_to_weekly_dir, inner_folder[0])
dfs = []
for filename in sorted(os.listdir(path_to_patterns_parts)):
if filename.startswith('patterns-part'): # e.g., patterns-part1.csv.gz
path_to_csv = os.path.join(path_to_patterns_parts, filename)
assert os.path.isfile(path_to_csv)
print('Loading from %s' % path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, use_dask=True, usecols=cols_to_load, dtype={'poi_cbg':'float64'})
dfs.append(df)
df = pd.concat(dfs, axis=0)
start_day_string = df.iloc[0].date_range_start.split('T')[0]
elements = start_day_string.split('-')
assert len(elements) == 3
start_datetime = datetime.datetime(int(elements[0]), int(elements[1]), int(elements[2]))
assert df['date_range_start'].map(lambda x:x.startswith(start_day_string + 'T00:00:00')).all() # make sure date range starts where we expect for all rows.
if expand_hourly_visits: # expand single hourly visits column into one column per hour
df['visits_by_each_hour'] = df['visits_by_each_hour'].map(json.loads) # convert string lists to lists.
all_dates = [start_datetime + datetime.timedelta(days=i) for i in range(7)] # all days in the week
hours = pd.DataFrame(df['visits_by_each_hour'].values.tolist(),
columns=[f'hourly_visits_%i.%i.%i.%i' % (date.year, date.month, date.day, hour)
for date in all_dates
for hour in range(0, 24)])
assert len(hours) == len(df)
hours.index = df.index
df = pd.concat([df, hours], axis=1)
# The hourly data has some spurious spikes
# related to the GMT-day boundary which we have to correct for.
df['offset_from_gmt'] = df['date_range_start'].map(lambda x:x[len(start_day_string + 'T00:00:00'):])
print("Offset from GMT value counts")
offset_counts = df['offset_from_gmt'].value_counts()
print(offset_counts)
hourly_offset_strings = offset_counts[:4].index # four most common timezones across POIs
assert all(['-0%i:00' % x in hourly_offset_strings for x in [5, 6, 7]]) # should always include GMT-5, -6, -7
assert ('-04:00' in hourly_offset_strings) or ('-08:00' in hourly_offset_strings) # depends on DST
percent_rows_being_corrected = (df['offset_from_gmt'].map(lambda x:x in hourly_offset_strings).mean() * 100)
print("%2.3f%% of rows have timezones that we spike-correct for." % percent_rows_being_corrected)
assert percent_rows_being_corrected > 98 # almost all rows should fall in these timezones
end_datetime = datetime.datetime(all_dates[-1].year, all_dates[-1].month, all_dates[-1].day, 23)
# have to correct for each timezone separately.
for offset_string in sorted(hourly_offset_strings):
print('Correcting GMT%s...' % offset_string)
idxs = df['offset_from_gmt'] == offset_string
offset_int = int(offset_string.split(':')[0])
assert (-8 <= offset_int) and (offset_int <= -4)
for date in all_dates:
# not totally clear which hours are messed up - it's mainly one hour, but the surrounding ones
# look weird too - but this yields plots which look reasonable.
for hour_to_correct in [24 + offset_int - 1,
24 + offset_int,
24 + offset_int + 1]:
# interpolate using hours fairly far from hour_to_correct to avoid pollution.
dt_hour_to_correct = datetime.datetime(date.year, date.month, date.day, hour_to_correct)
start_hour = max(start_datetime, dt_hour_to_correct + datetime.timedelta(hours=-3))
end_hour = min(end_datetime, dt_hour_to_correct + datetime.timedelta(hours=3))
cols_to_use = [f'hourly_visits_%i.%i.%i.%i' % (dt.year, dt.month, dt.day, dt.hour) for dt in list_hours_in_range(start_hour, end_hour)]
assert all([col in df.columns for col in cols_to_use])
# this technically overlaps with earlier hours, but it should be okay because they will
# already have been corrected.
df.loc[idxs, 'hourly_visits_%i.%i.%i.%i' % (date.year, date.month, date.day, hour_to_correct)] = df.loc[idxs, cols_to_use].mean(axis=1)
non_required_cols = [col for col in df.columns if not(col in cols_to_keep or col.startswith('hourly_visits_'))]
df = df.drop(columns=non_required_cols)
df = df.set_index('safegraph_place_id')
te = time.time()
print("%i rows loaded for week %s [total time = %.2fs]" % (len(df), start_day_string, te-ts))
return df
def load_core_places_footprint_data(cols_to_keep):
area_csv = os.path.join(CURRENT_DATA_DIR, 'core_places_footprint/August2020Release/SafeGraphPlacesGeoSupplementSquareFeet.csv.gz')
print('Loading', area_csv)
df = load_csv_possibly_with_dask(area_csv, usecols=cols_to_keep, use_dask=True)
df = df.set_index('safegraph_place_id')
print('Loaded core places footprint data for %d POIs' % len(df))
return df
def load_core_places_data(cols_to_keep):
core_dir = os.path.join(CURRENT_DATA_DIR, 'core_places/2020/10/') # use the most recent core info
dfs = []
for filename in sorted(os.listdir(core_dir)):
if filename.startswith('core_poi-part'):
path_to_csv = os.path.join(core_dir, filename)
print('Loading', path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, usecols=cols_to_keep, use_dask=True)
dfs.append(df)
df = pd.concat(dfs, axis=0)
df = df.set_index('safegraph_place_id')
print('Loading core places info for %d POIs' % len(df))
return df
def load_google_mobility_data(only_US=True):
df = pd.read_csv(PATH_TO_GOOGLE_DATA)
if only_US:
df = df[df['country_region_code'] == 'US']
return df
def list_datetimes_in_range(min_day, max_day):
"""
Return a list of datetimes in a range from min_day to max_day, inclusive. Increment is one day.
"""
assert(min_day <= max_day)
days = []
while min_day <= max_day:
days.append(min_day)
min_day = min_day + datetime.timedelta(days=1)
return days
def list_hours_in_range(min_hour, max_hour):
"""
Return a list of datetimes in a range from min_hour to max_hour, inclusive. Increment is one hour.
"""
assert(min_hour <= max_hour)
hours = []
while min_hour <= max_hour:
hours.append(min_hour)
min_hour = min_hour + datetime.timedelta(hours=1)
return hours
def normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints(old_dict):
"""
Self-explanatory; used by aggregate_visitor_home_cbgs_over_months.
"""
new_dict = {}
value_sum = 1.*sum(old_dict.values())
if len(old_dict) > 0:
assert value_sum > 0
for k in old_dict:
new_dict[int(k)] = old_dict[k] / value_sum
return new_dict
def cast_keys_to_ints(old_dict):
new_dict = {}
for k in old_dict:
new_dict[int(k)] = old_dict[k]
return new_dict
def aggregate_visitor_home_cbgs_over_months(d, cutoff_year=2019, population_df=None, periods_to_include=None):
"""
Aggregate visitor_home_cbgs across months and produce a normalized aggregate field.
Usage: d = aggregate_visitor_home_cbgs_over_months(d).
cutoff = the earliest time (could be year or year.month) to aggregate data from
population_df = the DataFrame loaded by load_dataframe_to_correct_for_population_size
"""
t0 = time.time()
if periods_to_include is not None:
cols = ['%s.visitor_home_cbgs' % period for period in periods_to_include]
assert cutoff_year is None
else:
# Not using CBG data from weekly files for now because of concerns that it's inconsistently
# processed - they change how they do the privacy filtering.
assert cutoff_year is not None
weekly_cols_to_exclude = ['%s.visitor_home_cbgs' % a for a in ALL_WEEKLY_STRINGS]
cols = [a for a in d.columns if (a.endswith('.visitor_home_cbgs') and (a >= str(cutoff_year)) and (a not in weekly_cols_to_exclude))]
print('Aggregating data from: %s' % cols)
assert all([a in d.columns for a in cols])
# Helper variables to use if visitor_home_cbgs counts need adjusting for differential sampling across CBGs.
adjusted_cols = []
if population_df is not None:
int_cbgs = [int(cbg) for cbg in population_df.census_block_group]
for k in cols:
if type(d.iloc[0][k]) != Counter:
print('Filling %s with Counter objects' % k)
d[k] = d[k].fillna('{}').map(lambda x:Counter(cast_keys_to_ints(json.loads(x)))) # map strings to counters.
if population_df is not None:
sub_t0 = time.time()
new_col = '%s_adjusted' % k
assert new_col not in d.columns
total_population = population_df.total_cbg_population.to_numpy()
time_period = k.strip('.visitor_home_cbgs')
population_col = 'number_devices_residing_%s' % time_period
assert(population_col in population_df.columns)
num_devices = population_df[population_col].to_numpy()
assert np.isnan(num_devices).sum() == 0
assert np.isnan(total_population).sum() == 0
cbg_coverage = num_devices / total_population
median_coverage = np.nanmedian(cbg_coverage)
cbg_coverage = dict(zip(int_cbgs, cbg_coverage))
assert ~np.isnan(median_coverage)
assert ~np.isinf(median_coverage)
assert median_coverage > 0.001
# want to make sure we aren't missing data for too many CBGs, so a small hack - have
# adjust_home_cbg_counts_for_coverage return two arguments, where the second argument
# tells us if we had to clip or fill in the missing coverage number.
d[new_col] = d[k].map(lambda x:adjust_home_cbg_counts_for_coverage(x, cbg_coverage, median_coverage=median_coverage))
print('Finished adjusting home CBG counts for %s [time=%.3fs] had to fill in or clip coverage for %2.6f%% of rows; in those cases used median coverage %2.3f' %
(time_period, time.time() - sub_t0, 100 * d[new_col].map(lambda x:x[1]).mean(), median_coverage))
d[new_col] = d[new_col].map(lambda x:x[0]) # remove the second argument of adjust_home_cbg_counts_for_coverage, we don't need it anymore.
adjusted_cols.append(new_col)
# make sure there are no NAs anywhere.
assert d[k].map(lambda x:len([a for a in x.values() if np.isnan(a)])).sum() == 0
assert d[new_col].map(lambda x:len([a for a in x.values() if np.isnan(a)])).sum() == 0
# add counters together across months.
d['aggregated_visitor_home_cbgs'] = d[cols].aggregate(func=sum, axis=1)
# normalize each counter so its values sum to 1.
d['aggregated_visitor_home_cbgs'] = d['aggregated_visitor_home_cbgs'].map(normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints)
if len(adjusted_cols) > 0:
d['aggregated_cbg_population_adjusted_visitor_home_cbgs'] = d[adjusted_cols].aggregate(func=sum, axis=1)
d['aggregated_cbg_population_adjusted_visitor_home_cbgs'] = d['aggregated_cbg_population_adjusted_visitor_home_cbgs'].map(normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints)
d = d.drop(columns=adjusted_cols)
for k in ['aggregated_cbg_population_adjusted_visitor_home_cbgs',
'aggregated_visitor_home_cbgs']:
y = d.loc[d[k].map(lambda x:len(x) > 0), k]
y = y.map(lambda x:sum(x.values()))
assert np.allclose(y, 1)
print("Aggregating CBG visitors over %i time periods took %2.3f seconds" % (len(cols), time.time() - t0))
print("Fraction %2.3f of POIs have CBG visitor data" % (d['aggregated_visitor_home_cbgs'].map(lambda x:len(x) != 0).mean()))
return d
def adjust_home_cbg_counts_for_coverage(cbg_counter, cbg_coverage, median_coverage, max_upweighting_factor=100):
"""
Adjusts the POI-CBG counts from SafeGraph to estimate the true count, based on the
coverage that SafeGraph has for this CBG.
cbg_counter: a Counter object mapping CBG to the original count
cbg_coverage: a dictionary where keys are CBGs and each data point represents SafeGraph's coverage: num_devices / total_population
This should be between 0 and 1 for the vast majority of cases, although for some weird CBGs it may not be.
Returns the adjusted dictionary and a Bool flag had_to_guess_coverage_value which tells us whether we had to adjust the coverage value.
"""
had_to_guess_coverage_value = False
if len(cbg_counter) == 0:
return cbg_counter, had_to_guess_coverage_value
new_counter = Counter()
for cbg in cbg_counter:
# cover some special cases which should happen very rarely.
if cbg not in cbg_coverage:
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
elif np.isnan(cbg_coverage[cbg]): # not sure this case ever actually happens, but just in case.
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
else:
assert cbg_coverage[cbg] >= 0
upweighting_factor = 1 / cbg_coverage[cbg] # need to invert coverage
if upweighting_factor > max_upweighting_factor:
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
new_counter[cbg] = cbg_counter[cbg] * upweighting_factor
return new_counter, had_to_guess_coverage_value
def compute_weighted_mean_of_cbg_visitors(cbg_visitor_fracs, cbg_values):
"""
Given a dictionary cbg_visitor_fracs which gives the fraction of people from a CBG which visit a POI
and a dictionary cbg_values which maps CBGs to values, compute the weighted mean for the POI.
"""
if len(cbg_visitor_fracs) == 0:
return None
else:
numerator = 0.
denominator = 0.
for cbg in cbg_visitor_fracs:
if cbg not in cbg_values:
continue
numerator += cbg_visitor_fracs[cbg] * cbg_values[cbg]
denominator += cbg_visitor_fracs[cbg]
if denominator == 0:
return None
return numerator/denominator
def load_dataframe_for_individual_msa(MSA_name, nrows=None):
"""
This loads all the POI info for a single MSA.
"""
t0 = time.time()
filename = os.path.join(STRATIFIED_BY_AREA_DIR, '%s.csv' % MSA_name)
d = pd.read_csv(filename, nrows=nrows)
for k in (['aggregated_cbg_population_adjusted_visitor_home_cbgs', 'aggregated_visitor_home_cbgs']):
d[k] = d[k].map(lambda x:cast_keys_to_ints(json.loads(x)))
for k in ['%s.visitor_home_cbgs' % a for a in ALL_WEEKLY_STRINGS]:
d[k] = d[k].fillna('{}')
d[k] = d[k].map(lambda x:cast_keys_to_ints(json.loads(x)))
print("Loaded %i rows for %s in %2.3f seconds" % (len(d), MSA_name, time.time() - t0))
return d
def load_dataframe_to_correct_for_population_size(just_load_census_data=False):
"""
Load in a dataframe with rows for the 2018 ACS Census population code in each CBG
and the SafeGraph population count in each CBG (from home-panel-summary.csv).
The correlation is not actually that good, likely because individual CBG counts are noisy.
Definition of
num_devices_residing: Number of distinct devices observed with a primary nighttime location in the specified census block group.
"""
acs_data = pd.read_csv(PATH_TO_ACS_1YR_DATA,
encoding='cp1252',
usecols=['STATEA', 'COUNTYA', 'TRACTA', 'BLKGRPA','AJWBE001'],
dtype={'STATEA':str,
'COUNTYA':str,
'BLKGRPA':str,
'TRACTA':str})
# https://www.census.gov/programs-surveys/geography/guidance/geo-identifiers.html
# FULL BLOCK GROUP CODE = STATE+COUNTY+TRACT+BLOCK GROUP
assert (acs_data['STATEA'].map(len) == 2).all()
assert (acs_data['COUNTYA'].map(len) == 3).all()
assert (acs_data['TRACTA'].map(len) == 6).all()
assert (acs_data['BLKGRPA'].map(len) == 1).all()
acs_data['census_block_group'] = (acs_data['STATEA'] +
acs_data['COUNTYA'] +
acs_data['TRACTA'] +
acs_data['BLKGRPA'])
acs_data['census_block_group'] = acs_data['census_block_group'].astype(int)
assert len(set(acs_data['census_block_group'])) == len(acs_data)
acs_data['county_code'] = (acs_data['STATEA'] + acs_data['COUNTYA']).astype(int)
acs_data = acs_data[['census_block_group', 'AJWBE001', 'STATEA', 'county_code']]
acs_data = acs_data.rename(mapper={'AJWBE001':'total_cbg_population',
'STATEA':'state_code'}, axis=1)
print("%i rows of 2018 1-year ACS data read" % len(acs_data))
if just_load_census_data:
return acs_data
combined_data = acs_data
# now read in safegraph data to use as normalizer. Months and years first.
all_filenames = []
all_date_strings = []
for month, year in [(1, 2017),(2, 2017),(3, 2017),(4, 2017),(5, 2017),(6, 2017),(7, 2017),(8, 2017),(9, 2017),(10, 2017),(11, 2017),(12, 2017),
(1, 2018),(2, 2018),(3, 2018),(4, 2018),(5, 2018),(6, 2018),(7, 2018),(8, 2018),(9, 2018),(10, 2018),(11, 2018),(12, 2018),
(1, 2019),(2, 2019),(3, 2019),(4, 2019),(5, 2019),(6, 2019),(7, 2019),(8, 2019),(9, 2019),(10, 2019),(11, 2019),(12, 2019),
(1, 2020),(2, 2020)]:
if (year == 2019 and month == 12) or (year == 2020 and month in [1, 2]):
upload_date_string = '2020-03-16' # we downloaded files in two groups; load them in the same way.
else:
upload_date_string = '2019-12-12'
month_and_year_string = '%i_%02d-%s' % (year, month, upload_date_string)
filename = os.path.join(UNZIPPED_DATA_DIR,
'SearchofAllRecords-CORE_POI-GEOMETRY-PATTERNS-%s' % month_and_year_string,
'home_panel_summary.csv')
all_filenames.append(filename)
all_date_strings.append('%i.%i' % (year, month))
# now weeks
for date_string in ALL_WEEKLY_STRINGS:
all_filenames.append(os.path.join(PATH_TO_HOME_PANEL_SUMMARY, '%s-home-panel-summary.csv' % date_string))
all_date_strings.append(date_string)
cbgs_with_ratio_above_one = np.array([False for a in range(len(acs_data))])
for filename_idx, filename in enumerate(all_filenames):
date_string = all_date_strings[filename_idx]
print("\n*************")
safegraph_counts = pd.read_csv(filename, dtype={'census_block_group':str})
print("%s: %i devices read from %i rows" % (
date_string, safegraph_counts['number_devices_residing'].sum(), len(safegraph_counts)))
safegraph_counts = safegraph_counts[['census_block_group', 'number_devices_residing']]
col_name = 'number_devices_residing_%s' % date_string
safegraph_counts.columns = ['census_block_group', col_name]
safegraph_counts['census_block_group'] = safegraph_counts['census_block_group'].map(int)
assert len(safegraph_counts['census_block_group'].dropna()) == len(safegraph_counts)
print("Number of unique Census blocks: %i; unique blocks %i: WARNING: DROPPING NON-UNIQUE ROWS" %
(len(safegraph_counts['census_block_group'].drop_duplicates(keep=False)), len(safegraph_counts)))
safegraph_counts = safegraph_counts.drop_duplicates(subset=['census_block_group'], keep=False)
combined_data = pd.merge(combined_data,
safegraph_counts,
how='left',
validate='one_to_one',
on='census_block_group')
missing_data_idxs = pd.isnull(combined_data[col_name])
print("Missing data for %i rows; filling with zeros" % missing_data_idxs.sum())
combined_data.loc[missing_data_idxs, col_name] = 0
r, p = pearsonr(combined_data['total_cbg_population'], combined_data[col_name])
combined_data['ratio'] = combined_data[col_name]/combined_data['total_cbg_population']
cbgs_with_ratio_above_one = cbgs_with_ratio_above_one | (combined_data['ratio'].values > 1)
combined_data.loc[combined_data['total_cbg_population'] == 0, 'ratio'] = None
print("Ratio of SafeGraph count to Census count")
print(combined_data['ratio'].describe(percentiles=[.25, .5, .75, .9, .99, .999]))
print("Correlation between SafeGraph and Census counts: %2.3f" % (r))
print("Warning: %i CBGs with a ratio greater than 1 in at least one month" % cbgs_with_ratio_above_one.sum())
del combined_data['ratio']
combined_data.index = range(len(combined_data))
assert len(combined_data.dropna()) == len(combined_data)
return combined_data
def load_and_reconcile_multiple_acs_data():
"""
Because we use Census data from two data sources, load a single dataframe that combines both.
"""
acs_1_year_d = load_dataframe_to_correct_for_population_size(just_load_census_data=True)
column_rename = {'total_cbg_population':'total_cbg_population_2018_1YR'}
acs_1_year_d = acs_1_year_d.rename(mapper=column_rename, axis=1)
acs_1_year_d['state_name'] = acs_1_year_d['state_code'].map(lambda x:FIPS_CODES_FOR_50_STATES_PLUS_DC[str(x)] if str(x) in FIPS_CODES_FOR_50_STATES_PLUS_DC else np.nan)
acs_5_year_d = pd.read_csv(PATH_TO_ACS_5YR_DATA)
print('%i rows of 2017 5-year ACS data read' % len(acs_5_year_d))
acs_5_year_d['census_block_group'] = acs_5_year_d['GEOID'].map(lambda x:x.split("US")[1]).astype(int)
# rename dynamic attributes to indicate that they are from ACS 2017 5-year
dynamic_attributes = ['p_black', 'p_white', 'p_asian', 'median_household_income',
'block_group_area_in_square_miles', 'people_per_mile']
column_rename = {attr:'%s_2017_5YR' % attr for attr in dynamic_attributes}
acs_5_year_d = acs_5_year_d.rename(mapper=column_rename, axis=1)
# repetitive with 'state_code' and 'county_code' column from acs_1_year_d
acs_5_year_d = acs_5_year_d.drop(['Unnamed: 0', 'STATEFP', 'COUNTYFP'], axis=1)
combined_d = pd.merge(acs_1_year_d, acs_5_year_d, on='census_block_group', how='outer', validate='one_to_one')
combined_d['people_per_mile_hybrid'] = combined_d['total_cbg_population_2018_1YR'] / combined_d['block_group_area_in_square_miles_2017_5YR']
return combined_d
def compute_cbg_day_prop_out(sdm_of_interest, cbgs_of_interest=None):
'''
Computes the proportion of people leaving a CBG on each day.
It returns a new DataFrame, with one row per CBG representing proportions for each day in sdm_of_interest.
sdm_of_interest: a Social Distancing Metrics dataframe, data for the time period of interest
cbgs_of_interest: a list, the CBGs for which to compute reweighting; if None, then
reweighting is computed for all CBGs in sdm_of_interest
---------------------------------------
Sample usage:
sdm_sq = helper.load_social_distancing_metrics(status_quo_days)
days_of_interest = helper.list_datetimes_in_range(datetime.datetime(2020, 3, 1), datetime.datetime(2020, 4, 1))
sdm_of_interest = helper.load_social_distancing_metrics(days_of_interest)
reweightings_df = helper.compute_cbg_day_reweighting( sdm_of_interest)
'''
# Process SDM of interest dataframe
orig_len = len(sdm_of_interest)
interest_num_home_cols = [col for col in sdm_of_interest.columns if col.endswith('completely_home_device_count')]
interest_device_count_cols = [col for col in sdm_of_interest.columns if col.endswith('device_count') and col not in interest_num_home_cols]
sdm_of_interest = sdm_of_interest.dropna(subset=interest_device_count_cols + interest_num_home_cols)
assert sdm_of_interest['census_block_group'].duplicated().sum() == 0
sdm_of_interest.set_index(sdm_of_interest['census_block_group'].values, inplace=True)
print('Kept %i / %i CBGs with non-NaN SDM for days of interest' % (len(sdm_of_interest), orig_len))
if cbgs_of_interest is None:
cbgs_of_interest = sdm_of_interest.census_block_group.unique()
# Find CBGs in common between SDM dataframe and CBGs of interest
cbgs_with_data = set(cbgs_of_interest).intersection(sdm_of_interest.index)
print('Found SDM data for %i / %i CBGs of interest' % (len(cbgs_with_data), len(cbgs_of_interest)))
# Get proportion of population that goes out during days of interest
sub_sdm_int = sdm_of_interest[sdm_of_interest['census_block_group'].isin(cbgs_with_data)]
assert(len(sub_sdm_int) == len(cbgs_with_data))
sub_sdm_int = sub_sdm_int.sort_values(by='census_block_group')
assert list(sub_sdm_int['census_block_group']) == sorted(cbgs_with_data)
int_num_out = sub_sdm_int[interest_device_count_cols].values - sub_sdm_int[interest_num_home_cols].values
int_prop_out = int_num_out / sub_sdm_int[interest_device_count_cols].values
int_prop_out = np.clip(int_prop_out, 1e-10, None) # so that the reweighting is not zero
N, T = int_prop_out.shape
dates = [col.strip('_device_count') for col in interest_device_count_cols]
dates2 = [col.strip('_completely_home_device_count') for col in interest_num_home_cols]
assert dates == dates2
sorted_cbgs_with_data = sorted(cbgs_with_data)
prop_df = pd.DataFrame(int_prop_out, columns=dates)
prop_df['census_block_group'] = sorted_cbgs_with_data
# If we could not compute reweighting for a CBG, use median reweighting for that day
if len(cbgs_with_data) < len(cbgs_of_interest):
missing_cbgs = set(cbgs_of_interest) - cbgs_with_data
print('Filling %d CBGs with median props' % len(missing_cbgs))
median_prop = np.median(int_prop_out, axis=0)
missing_props = np.broadcast_to(median_prop, (len(missing_cbgs), T))
missing_props_df = pd.DataFrame(missing_props, columns=dates)
missing_props_df['census_block_group'] = list(missing_cbgs)
prop_df = pd.concat((prop_df, missing_props_df))
return prop_df
def write_out_acs_5_year_data():
cbg_mapper = CensusBlockGroups(base_directory=PATH_FOR_CBG_MAPPER, gdb_files=None)
geometry_cols = ['STATEFP',
'COUNTYFP',
'TRACTCE',
'Metropolitan/Micropolitan Statistical Area',
'CBSA Title',
'State Name']
block_group_cols = ['GEOID',
'p_black',
'p_white',
'p_asian',
'median_household_income',
'block_group_area_in_square_miles',
'people_per_mile']
for k in geometry_cols:
cbg_mapper.block_group_d[k] = cbg_mapper.geometry_d[k].values
df_to_write_out = cbg_mapper.block_group_d[block_group_cols + geometry_cols]
print("Total rows: %i" % len(df_to_write_out))
print("Missing data")
print(pd.isnull(df_to_write_out).mean())
df_to_write_out.to_csv(PATH_TO_ACS_5YR_DATA)
class CensusBlockGroups:
"""
A class for loading geographic and demographic data from the ACS.
A census block group is a relatively small area.
Less good than houses but still pretty granular. https://en.wikipedia.org/wiki/Census_block_group
Data was downloaded from https://www.census.gov/geographies/mapping-files/time-series/geo/tiger-data.html
We use the most recent ACS 5-year estimates: 2013-2017, eg:
wget https://www2.census.gov/geo/tiger/TIGER_DP/2017ACS/ACS_2017_5YR_BG.gdb.zip
These files are convenient because they combine both geographic boundaries + demographic data, leading to a cleaner join.
The main method for data access is get_demographic_stats_of_point. Sample usage:
x = CensusBlockGroups(gdb_files=['ACS_2017_5YR_BG_51_VIRGINIA.gdb'])
x.get_demographic_stats_of_points(latitudes=[38.8816], longitudes=[-77.0910], desired_cols=['p_black', 'p_white', 'mean_household_income'])
"""
def __init__(self, base_directory=PATH_TO_CENSUS_BLOCK_GROUP_DATA,
gdb_files=None,
county_to_msa_mapping_filepath=PATH_TO_COUNTY_TO_MSA_MAPPING):
self.base_directory = base_directory
if gdb_files is None:
self.gdb_files = ['ACS_2017_5YR_BG.gdb']
else:
self.gdb_files = gdb_files
self.crs_to_use = WGS_84_CRS # https://epsg.io/4326, WGS84 - World Geodetic System 1984, used in GPS.
self.county_to_msa_mapping_filepath = county_to_msa_mapping_filepath
self.load_raw_dataframes() # Load in raw geometry and demographic dataframes.
# annotate demographic data with more useful columns.
self.annotate_with_race()
self.annotate_with_income()
self.annotate_with_counties_to_msa_mapping()
self.annotate_with_area_and_pop_density()
def annotate_with_area_and_pop_density(self):
# https://gis.stackexchange.com/questions/218450/getting-polygon-areas-using-geopandas.
# See comments about using cea projection.
gdf = self.geometry_d[['geometry']].copy().to_crs({'proj':'cea'})
area_in_square_meters = gdf['geometry'].area.values
self.block_group_d['block_group_area_in_square_miles'] = area_in_square_meters / (1609.34 ** 2)
self.block_group_d['people_per_mile'] = (self.block_group_d['B03002e1'] /
self.block_group_d['block_group_area_in_square_miles'])
print(self.block_group_d[['block_group_area_in_square_miles', 'people_per_mile']].describe())
def annotate_with_race(self):
"""
Analysis focuses on black and non-white population groups. Also annotate with p_asian because of possible anti-Asian discrimination.
B03002e1 HISPANIC OR LATINO ORIGIN BY RACE: Total: Total population -- (Estimate)
B03002e3 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: White alone: Total population -- (Estimate)
B03002e4 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: Black or African American alone: Total population -- (Estimate)
B03002e6 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: Asian alone: Total population -- (Estimate)
"""
print("annotating with race")
self.block_group_d['p_black'] = self.block_group_d['B03002e4'] / self.block_group_d['B03002e1']
self.block_group_d['p_white'] = self.block_group_d['B03002e3'] / self.block_group_d['B03002e1']
self.block_group_d['p_asian'] = self.block_group_d['B03002e6'] / self.block_group_d['B03002e1']
print(self.block_group_d[['p_black', 'p_white', 'p_asian']].describe())
def load_raw_dataframes(self):
"""
Read in the original demographic + geographic data.
"""
self.block_group_d = None
self.geometry_d = None
demographic_layer_names = ['X25_HOUSING_CHARACTERISTICS', 'X01_AGE_AND_SEX', 'X03_HISPANIC_OR_LATINO_ORIGIN', 'X19_INCOME']
for file in self.gdb_files:
# https://www.reddit.com/r/gis/comments/775imb/accessing_a_gdb_without_esri_arcgis/doj9zza
full_path = os.path.join(self.base_directory, file)
layer_list = fiona.listlayers(full_path)
print(file)
print(layer_list)
geographic_layer_name = [a for a in layer_list if a[:15] == 'ACS_2017_5YR_BG']
assert len(geographic_layer_name) == 1
geographic_layer_name = geographic_layer_name[0]
geographic_data = geopandas.read_file(full_path, layer=geographic_layer_name).to_crs(self.crs_to_use)
# by default when you use the read file command, the column containing spatial objects is named "geometry", and will be set as the active column.
print(geographic_data.columns)
geographic_data = geographic_data.sort_values(by='GEOID_Data')[['GEOID_Data', 'geometry', 'STATEFP', 'COUNTYFP', 'TRACTCE']]
for demographic_idx, demographic_layer_name in enumerate(demographic_layer_names):
assert demographic_layer_name in layer_list
if demographic_idx == 0:
demographic_data = geopandas.read_file(full_path, layer=demographic_layer_name)
else:
old_len = len(demographic_data)
new_df = geopandas.read_file(full_path, layer=demographic_layer_name)
assert sorted(new_df['GEOID']) == sorted(demographic_data['GEOID'])
demographic_data = demographic_data.merge(new_df, on='GEOID', how='inner')
assert old_len == len(demographic_data)
demographic_data = demographic_data.sort_values(by='GEOID')
shared_geoids = set(demographic_data['GEOID'].values).intersection(set(geographic_data['GEOID_Data'].values))
print("Length of demographic data: %i; geographic data %i; %i GEOIDs in both" % (len(demographic_data), len(geographic_data), len(shared_geoids)))
demographic_data = demographic_data.loc[demographic_data['GEOID'].map(lambda x:x in shared_geoids)]
geographic_data = geographic_data.loc[geographic_data['GEOID_Data'].map(lambda x:x in shared_geoids)]
demographic_data.index = range(len(demographic_data))
geographic_data.index = range(len(geographic_data))
assert (geographic_data['GEOID_Data'] == demographic_data['GEOID']).all()
assert len(geographic_data) == len(set(geographic_data['GEOID_Data']))
if self.block_group_d is None:
self.block_group_d = demographic_data
else:
self.block_group_d = pd.concat([self.block_group_d, demographic_data])
if self.geometry_d is None:
self.geometry_d = geographic_data
else:
self.geometry_d = pd.concat([self.geometry_d, geographic_data])
assert pd.isnull(self.geometry_d['STATEFP']).sum() == 0
good_idxs = self.geometry_d['STATEFP'].map(lambda x:x in FIPS_CODES_FOR_50_STATES_PLUS_DC).values
print("Warning: the following State FIPS codes are being filtered out")
print(self.geometry_d.loc[~good_idxs, 'STATEFP'].value_counts())
print("%i/%i Census Block Groups in total removed" % ((~good_idxs).sum(), len(good_idxs)))
self.geometry_d = self.geometry_d.loc[good_idxs]
self.block_group_d = self.block_group_d.loc[good_idxs]
self.geometry_d.index = self.geometry_d['GEOID_Data'].values
self.block_group_d.index = self.block_group_d['GEOID'].values
def annotate_with_income(self):
"""
We want a single income number for each block group. This method computes that.
"""
print("Computing household income")
# copy-pasted column definitions right out of the codebook.
codebook_string = """
B19001e2 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): Less than $10,000: Households -- (Estimate)
B19001e3 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $10,000 to $14,999: Households -- (Estimate)
B19001e4 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $15,000 to $19,999: Households -- (Estimate)
B19001e5 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $20,000 to $24,999: Households -- (Estimate)
B19001e6 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $25,000 to $29,999: Households -- (Estimate)
B19001e7 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $30,000 to $34,999: Households -- (Estimate)
B19001e8 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $35,000 to $39,999: Households -- (Estimate)
B19001e9 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $40,000 to $44,999: Households -- (Estimate)
B19001e10 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $45,000 to $49,999: Households -- (Estimate)
B19001e11 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $50,000 to $59,999: Households -- (Estimate)
B19001e12 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $60,000 to $74,999: Households -- (Estimate)
B19001e13 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $75,000 to $99,999: Households -- (Estimate)
B19001e14 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $100,000 to $124,999: Households -- (Estimate)
B19001e15 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $125,000 to $149,999: Households -- (Estimate)
B19001e16 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $150,000 to $199,999: Households -- (Estimate)
B19001e17 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $200,000 or more: Households -- (Estimate)
"""
self.income_bin_edges = [0] + list(range(10000, 50000, 5000)) + [50000, 60000, 75000, 100000, 125000, 150000, 200000]
income_column_names_to_vals = {}
column_codes = codebook_string.split('\n')
for f in column_codes:
if len(f.strip()) == 0:
continue
col_name = f.split('HOUSEHOLD INCOME')[0].strip()
if col_name == 'B19001e2':
val = 10000
elif col_name == 'B19001e17':
val = 200000
else:
lower_bound = float(f.split('$')[1].split()[0].replace(',', ''))
upper_bound = float(f.split('$')[2].split(':')[0].replace(',', ''))
val = (lower_bound + upper_bound) / 2
income_column_names_to_vals[col_name] = val
print("The value for column %s is %2.1f" % (col_name, val))
# each column gives the count of households with that income. So we need to take a weighted sum to compute the average income.
self.block_group_d['total_household_income'] = 0.
self.block_group_d['total_households'] = 0.
for col in income_column_names_to_vals:
self.block_group_d['total_household_income'] += self.block_group_d[col] * income_column_names_to_vals[col]
self.block_group_d['total_households'] += self.block_group_d[col]
self.block_group_d['mean_household_income'] = 1.*self.block_group_d['total_household_income'] / self.block_group_d['total_households']
self.block_group_d['median_household_income'] = self.block_group_d['B19013e1'] # MEDIAN HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): Median household income in the past 12 months (in 2017 inflation-adjusted dollars): Households -- (Estimate)
assert (self.block_group_d['total_households'] == self.block_group_d['B19001e1']).all() # sanity check: our count should agree with theirs.
assert (pd.isnull(self.block_group_d['mean_household_income']) == (self.block_group_d['B19001e1'] == 0)).all()
print("Warning: missing income data for %2.1f%% of census blocks with 0 households" % (pd.isnull(self.block_group_d['mean_household_income']).mean() * 100))
self.income_column_names_to_vals = income_column_names_to_vals
assert len(self.income_bin_edges) == len(self.income_column_names_to_vals)
print(self.block_group_d[['mean_household_income', 'total_households']].describe())
def annotate_with_counties_to_msa_mapping(self):
"""
Annotate with metropolitan area info for consistency with Experienced Segregation paper.
# https://www2.census.gov/programs-surveys/metro-micro/geographies/reference-files/2017/delineation-files/list1.xls
"""
print("Loading county to MSA mapping")
self.counties_to_msa_df = pd.read_csv(self.county_to_msa_mapping_filepath, skiprows=2, dtype={'FIPS State Code':str, 'FIPS County Code':str})
print("%i rows read" % len(self.counties_to_msa_df))
self.counties_to_msa_df = self.counties_to_msa_df[['CBSA Title',
'Metropolitan/Micropolitan Statistical Area',
'State Name',
'FIPS State Code',
'FIPS County Code']]
self.counties_to_msa_df.columns = ['CBSA Title',
'Metropolitan/Micropolitan Statistical Area',
'State Name',
'STATEFP',
'COUNTYFP']
self.counties_to_msa_df = self.counties_to_msa_df.dropna(how='all') # remove a couple blank rows.
assert self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'].map(lambda x:x in ['Metropolitan Statistical Area', 'Micropolitan Statistical Area']).all()
print("Number of unique Metropolitan statistical areas: %i" %
len(set(self.counties_to_msa_df.loc[self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'] == 'Metropolitan Statistical Area', 'CBSA Title'])))
print("Number of unique Micropolitan statistical areas: %i" %
len(set(self.counties_to_msa_df.loc[self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'] == 'Micropolitan Statistical Area', 'CBSA Title'])))
old_len = len(self.geometry_d)
assert len(self.counties_to_msa_df.drop_duplicates(['STATEFP', 'COUNTYFP'])) == len(self.counties_to_msa_df)
self.geometry_d = self.geometry_d.merge(self.counties_to_msa_df,
on=['STATEFP', 'COUNTYFP'],
how='left')
# For some reason the index gets reset here. Annoying, not sure why.
self.geometry_d.index = self.geometry_d['GEOID_Data'].values
assert len(self.geometry_d) == old_len
assert (self.geometry_d.index == self.block_group_d.index).all()
def get_demographic_stats_of_points(self, latitudes, longitudes, desired_cols):
"""
Given a list or array of latitudes and longitudes, matches to Census Block Group.
Returns a dictionary which includes the state and county FIPS code, along with any columns in desired_cols.
This method assumes the latitudes and longitudes are in https://epsg.io/4326, which is what I think is used for Android/iOS -> SafeGraph coordinates.
"""
def dtype_pandas_series(obj):
return str(type(obj)) == "<class 'pandas.core.series.Series'>"
assert not dtype_pandas_series(latitudes)
assert not dtype_pandas_series(longitudes)
assert len(latitudes) == len(longitudes)
t0 = time.time()
# we have to match stuff a million rows at a time because otherwise we get weird memory warnings.
start_idx = 0
end_idx = start_idx + int(1e6)
merged = []
while start_idx < len(longitudes):
print("Doing spatial join on points with indices from %i-%i" % (start_idx, min(end_idx, len(longitudes))))
points = geopandas.GeoDataFrame(pd.DataFrame({'placeholder':np.array(range(start_idx, min(end_idx, len(longitudes))))}), # this column doesn't matter. We just have to create a geo data frame.
geometry=geopandas.points_from_xy(longitudes[start_idx:end_idx], latitudes[start_idx:end_idx]),
crs=self.crs_to_use)
# see eg gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude)). http://geopandas.org/gallery/create_geopandas_from_pandas.html
merged.append(sjoin(points, self.geometry_d[['geometry']], how='left', op='within'))
assert len(merged[-1]) == len(points)
start_idx += int(1e6)
end_idx += int(1e6)
merged = pd.concat(merged)
merged.index = range(len(merged))
assert list(merged.index) == list(merged['placeholder'])
could_not_match = pd.isnull(merged['index_right']).values
print("Cannot match to a CBG for a fraction %2.3f of points" % could_not_match.mean())
results = {}
for k in desired_cols + ['state_fips_code', 'county_fips_code', 'Metropolitan/Micropolitan Statistical Area', 'CBSA Title', 'GEOID_Data', 'TRACTCE']:
results[k] = [None] * len(latitudes)
results = pd.DataFrame(results)
matched_geoids = merged['index_right'].values[~could_not_match]
for c in desired_cols:
results.loc[~could_not_match, c] = self.block_group_d.loc[matched_geoids, c].values
if c in ['p_white', 'p_black', 'mean_household_income', 'median_household_income', 'new_census_monthly_rent_to_annual_income_multiplier', 'new_census_median_monthly_rent_to_annual_income_multiplier']:
results[c] = results[c].astype('float')
results.loc[~could_not_match, 'state_fips_code'] = self.geometry_d.loc[matched_geoids, 'STATEFP'].values
results.loc[~could_not_match, 'county_fips_code'] = self.geometry_d.loc[matched_geoids, 'COUNTYFP'].values
results.loc[~could_not_match, 'Metropolitan/Micropolitan Statistical Area'] = self.geometry_d.loc[matched_geoids,'Metropolitan/Micropolitan Statistical Area'].values
results.loc[~could_not_match, 'CBSA Title'] = self.geometry_d.loc[matched_geoids, 'CBSA Title'].values
results.loc[~could_not_match, 'GEOID_Data'] = self.geometry_d.loc[matched_geoids, 'GEOID_Data'].values
results.loc[~could_not_match, 'TRACTCE'] = self.geometry_d.loc[matched_geoids, 'TRACTCE'].values
print("Total query time is %2.3f" % (time.time() - t0))
return results
|
snap-stanford/covid-mobility
|
helper_methods_for_aggregate_data_analysis.py
|
helper_methods_for_aggregate_data_analysis.py
|
py
| 68,047 |
python
|
en
|
code
| 146 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 392,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 550,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 657,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 716,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 724,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.pearsonr",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 941,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 941,
"usage_type": "attribute"
},
{
"api_name": "fiona.listlayers",
"line_number": 942,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 949,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 956,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 959,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 1108,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 1117,
"usage_type": "call"
},
{
"api_name": "geopandas.points_from_xy",
"line_number": 1118,
"usage_type": "call"
},
{
"api_name": "geopandas.tools.sjoin",
"line_number": 1121,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 1149,
"usage_type": "call"
}
] |
29942141352
|
import functools
import typing as tp
import shapely.geometry
import torch
import torchmetrics
from torch.nn.utils.rnn import PackedSequence
def _multiarange(counts: torch.Tensor) -> torch.Tensor:
"""Returns a sequence of aranges concatenated along the first dimension.
>>> counts = torch.tensor([1, 3, 2])
>>> _multiarange(counts)
torch.tensor([0, 0, 1, 2, 0, 1])
"""
counts1 = counts[:-1]
reset_index = counts1.cumsum(0)
incr = torch.ones(int(counts.sum()), dtype=torch.int64)
incr[0] = 0
incr[reset_index] = 1 - counts1
out: torch.Tensor = incr.cumsum(0)
return out
class TokenAccuracy(torchmetrics.Metric):
higher_is_better = True
def __init__(self) -> None:
super().__init__()
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, prediction: PackedSequence, target: PackedSequence) -> None:
if prediction.data.ndim == 2:
prediction = prediction._replace(data=prediction.data.argmax(1))
# prediction and target should be padded to the same length so the shapes match
pad_length = max(len(prediction.batch_sizes), len(target.batch_sizes))
prediction_padded, prediction_lens = torch.nn.utils.rnn.pad_packed_sequence(
prediction, batch_first=True, total_length=pad_length
)
target_padded, target_lens = torch.nn.utils.rnn.pad_packed_sequence(
target, batch_first=True, total_length=pad_length
)
# correct only among target tokens, if prediction is longer extra tokens are
# ignored
selection = (torch.repeat_interleave(target_lens), _multiarange(target_lens))
self.correct += torch.sum(
prediction_padded[selection] == target_padded[selection]
)
self.total += torch.sum(target_lens)
def compute(self) -> torch.Tensor:
if self.correct == 0:
return torch.tensor(0.0)
return self.correct / self.total # type:ignore[operator]
class SequenceAccuracy(torchmetrics.Metric):
higher_is_better = True
def __init__(self) -> None:
super().__init__()
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, prediction: PackedSequence, target: PackedSequence) -> None:
if prediction.data.ndim == 2:
prediction = prediction._replace(data=prediction.data.argmax(1))
# prediction and target should be padded to the same length so the shapes match
pad_length = max(len(prediction.batch_sizes), len(target.batch_sizes))
prediction_padded, prediction_lens = torch.nn.utils.rnn.pad_packed_sequence(
prediction, batch_first=True, total_length=pad_length
)
target_padded, target_lens = torch.nn.utils.rnn.pad_packed_sequence(
target, batch_first=True, total_length=pad_length
)
batch_size = target_padded.shape[0]
self.correct += torch.sum(torch.all(prediction_padded == target_padded, dim=1))
self.total += batch_size # type:ignore[operator]
def compute(self) -> torch.Tensor:
if self.correct == 0:
return torch.tensor(0.0)
return self.correct / self.total # type:ignore[operator]
class PolygonAccuracy(torchmetrics.Metric):
correct: torch.Tensor
total: torch.Tensor
def __init__(self) -> None:
super().__init__()
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
pass
def update(
self,
point_sets: PackedSequence,
predictions: PackedSequence,
targets: PackedSequence,
) -> None:
pad = functools.partial(
torch.nn.utils.rnn.pad_packed_sequence, batch_first=True
)
if predictions.data.ndim == 2:
predictions = predictions._replace(data=predictions.data.argmax(1))
correct, total = 0, 0
for i, (
point_set,
point_set_len,
prediction,
prediction_len,
target,
target_len,
) in enumerate(zip(*pad(point_sets), *pad(predictions), *pad(targets))):
target_polygon = shapely.geometry.Polygon(
point_set[target[: target_len - 1] - 1].tolist()
)
predicted_polygon = shapely.geometry.Polygon(
point_set[prediction[: prediction_len - 1] - 1].tolist()
)
correct += target_polygon.equals(predicted_polygon)
total += 1
self.correct += correct
self.total += total
def compute(self) -> torch.Tensor:
if self.correct == 0:
return torch.tensor(0.0)
return self.correct / self.total
class AverageAreaCoverage(torchmetrics.Metric):
coverages: tp.List[torch.Tensor]
is_valid: tp.List[torch.Tensor]
def __init__(self, is_valid_threshold: float = 0.1) -> None:
super().__init__()
self.is_valid_threshold = is_valid_threshold
self.add_state("coverages", default=[], dist_reduce_fx="cat")
self.add_state("is_valid", default=[], dist_reduce_fx="cat")
def update(
self,
point_sets: PackedSequence,
predictions: PackedSequence,
targets: PackedSequence,
) -> None:
pad = functools.partial(
torch.nn.utils.rnn.pad_packed_sequence, batch_first=True
)
coverages: tp.List[float] = []
is_valid: tp.List[bool] = []
for (
point_set,
point_set_len,
prediction,
prediction_len,
target,
target_len,
) in zip(*pad(point_sets), *pad(predictions), *pad(targets)):
target_polygon = shapely.geometry.Polygon(
point_set[target[: target_len - 1] - 1].tolist()
)
predicted_polygon = shapely.geometry.Polygon(
point_set[prediction[: prediction_len - 1] - 1].tolist()
)
coverages.append(predicted_polygon.area / target_polygon.area)
is_valid.append(predicted_polygon.is_simple)
self.coverages.append(torch.tensor(coverages))
self.is_valid.append(torch.tensor(is_valid))
def compute(self) -> torch.Tensor:
is_valid = torch.cat(self.is_valid, dim=0)
coverages = torch.cat(self.coverages, dim=0)
if torch.sum(~is_valid) > self.is_valid_threshold * len(is_valid):
return torch.tensor(-1.0)
return torch.mean(coverages[is_valid])
class TourDistance(torchmetrics.Metric):
tour_distances: tp.List[torch.Tensor]
def __init__(self) -> None:
super().__init__()
self.add_state("tour_distances", default=[], dist_reduce_fx="cat")
def update(self, point_sets: PackedSequence, prediction: PackedSequence) -> None:
batch_size = point_sets.batch_sizes[0]
device = point_sets.data.device
point_sets_padded, npoints = torch.nn.utils.rnn.pad_packed_sequence(
point_sets, batch_first=True
)
prediction_padded, prediction_lens = torch.nn.utils.rnn.pad_packed_sequence(
prediction, batch_first=True
)
max_pred_len = prediction_padded.shape[1]
batch_arange = torch.arange(batch_size, device=device)
assert torch.all(
prediction_padded[batch_arange, prediction_lens - 1] == 0
), "all prediction should finish with a 0"
assert torch.all(
prediction_padded[batch_arange, prediction_lens - 2]
== prediction_padded[:, 0]
), "all tours should end where they start"
# pad with the first value, so that summing distances after closing
# tour doesn't increase the tour distance
prediction_padded += (
torch.arange(max_pred_len, device=device).expand_as(prediction_padded)
>= (prediction_lens.to(device) - 1)[:, None]
) * prediction_padded[:, 0:1]
# NOTE: i just trust from decoding that there are no repeated points
# and all points are visited
curr = point_sets_padded[batch_arange[:, None], prediction_padded[:, :-1] - 1]
next_ = point_sets_padded[batch_arange[:, None], prediction_padded[:, 1:] - 1]
tour_distances = torch.sum(
torch.sqrt(torch.sum((next_ - curr) ** 2, dim=2)), dim=1
)
self.tour_distances.append(tour_distances)
def compute(self) -> torch.Tensor:
all_tour_distances = torch.cat(self.tour_distances)
return all_tour_distances.mean()
|
gchaperon/pointer-networks
|
ptrnets/metrics.py
|
metrics.py
|
py
| 8,846 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "torch.Tensor",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torchmetrics.Metric",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.repeat_interleave",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torchmetrics.Metric",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "torch.sum",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.all",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "torchmetrics.Metric",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry.geometry.Polygon",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.geometry",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "shapely.geometry.geometry.Polygon",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.geometry",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "torchmetrics.Metric",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry.geometry.Polygon",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.geometry",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "shapely.geometry.geometry.Polygon",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.geometry",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "torchmetrics.Metric",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.PackedSequence",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.all",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.all",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 238,
"usage_type": "attribute"
}
] |
35616526877
|
# https://adventofcode.com/2022/day/15
from dataclasses import dataclass
from aoctk.data import Range, weighted_union_size
from aoctk.input import get_lines
from aoctk.metric import manhattan2d as md
@dataclass
class Sensor:
pos: complex
beacon: complex
distance: int
def __init__(self, desc):
self.pos, self.beacon = eval(
desc.replace("Sensor at x=", "complex(")
.replace("y=", "")
.replace(": closest beacon is at x=", "), complex(")
+ ")"
)
self.distance = md(self.beacon, self.pos)
def get_intervals(y, sensors):
beacons = tuple({int(_.beacon.real) for _ in sensors if _.beacon.imag == y})
intervals = []
for s in sensors:
left = s.distance - int(abs(s.pos.imag - y))
if left >= 0:
intervals.extend(
Range(int(s.pos.real - left), int(s.pos.real + left)).split(*beacons)
)
return Range.weighted_union(intervals)
def part_one(data="input.txt", y=2000000):
return weighted_union_size(get_intervals(y, [Sensor(_) for _ in get_lines(data)]))
def part_two(data="input.txt", y=2000000, lo=0, hi=4000000):
sensors = [Sensor(_) for _ in get_lines(data)]
beacons = {_.beacon for _ in sensors}
v_max = hi - lo + 1
for cy in (
_ for p in zip(range(y - 1, lo - 1, -1), range(y + 1, hi + 1)) for _ in p
):
intervals = get_intervals(cy, sensors)
for i, _ in intervals:
i.clip(lo, hi)
if weighted_union_size(intervals) < v_max:
(x,) = set(range(lo, hi + 1)) - set.union(
*(set(i) for i, w in intervals if w > 0)
)
if complex(x, cy) not in beacons:
return x * 4000000 + cy
def test():
assert part_one("test.txt", 10) == 26
assert part_two("test.txt", 10, 0, 20) == 56000011
|
P403n1x87/aoc
|
2022/15/code.py
|
code.py
|
py
| 1,883 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "aoctk.metric.manhattan2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "aoctk.data.Range",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "aoctk.data.Range.weighted_union",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "aoctk.data.Range",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "aoctk.data.weighted_union_size",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "aoctk.input.get_lines",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "aoctk.input.get_lines",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "aoctk.data.weighted_union_size",
"line_number": 56,
"usage_type": "call"
}
] |
23936105649
|
import numpy as np
from scipy.io import loadmat
from variables import*
def load_mat_file(mat_file):
mat_data = loadmat(mat_file)
x, y = mat_data['X'], mat_data['y']
x = x.reshape(
-1,
input_shape[0],
input_shape[1],
input_shape[2]
)
y = y.reshape(-1,)
return x, y
def load_data():
X, Y = load_mat_file(train_dir)
Xtest, Ytest = load_mat_file(test_dir)
X = X/rescale
Xtest = Xtest/rescale
return X, Y, Xtest, Ytest
|
1zuu/SVHN-Image-Classification
|
util.py
|
util.py
|
py
| 543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scipy.io.loadmat",
"line_number": 7,
"usage_type": "call"
}
] |
70720136829
|
import re
from zipfile import ZipFile
nothing = 90052
nothings = []
with ZipFile('channel.zip', 'r') as myzip:
def get_path(nothing):
return '{0}.txt'.format(nothing)
def get_next_nothing(nothing):
data = myzip.read(get_path(nothing)).decode('utf-8')
m = re.search('(\d*)$', data)
next_nothing = m.group(1)
return next_nothing
def get_comment(nothing):
return myzip.getinfo(get_path(nothing)).comment.decode('utf-8')
while(1):
try:
if nothing:
nothings.append(nothing)
nothing = get_next_nothing(nothing)
except:
break
print("".join([get_comment(n) for n in nothings]))
#http://www.pythonchallenge.com/pc/def/oxygen.html
|
akiran/pythonchallenge
|
challenge7.py
|
challenge7.py
|
py
| 758 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "zipfile.ZipFile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 12,
"usage_type": "call"
}
] |
40253497699
|
from django.urls import path
from . import views
app_name = 'chat'
urlpatterns = [
path('', views.index, name='index'),
path('create_room/', views.create_room, name='create_room'),
path('my_rooms/', views.rooms_list, name='rooms_list'),
path('<str:room_name>/', views.room, name='room'),
]
|
michalr45/django-chat
|
chat/urls.py
|
urls.py
|
py
| 308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
}
] |
71997536508
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import *
from .models import *
# from my side...
@login_required(login_url='/useraccount/common_login')
def business_location_list(request):
if request.method == 'POST':
form = BusinessLocationForm(request.POST)
if form.is_valid():
form_save = form.save(commit=False)
form_save.save()
if form_save.location_id == '' or form_save.location_id == None:
form_save.location_id = 'LOC-1000' + str(form_save.id)
form_save.save()
messages.success(request, 'added a business location ' + str(form_save.name))
return redirect('business-location')
else:
form = BusinessLocationForm()
list_locations = BusinessLocation.objects.filter(status=True).order_by('-id')
return render(request, 'divmart_dashboard/business_location_list.html', {'lists':list_locations,'form':form})
@login_required(login_url='/useraccount/common_login')
def business_location_update(request, id):
loc_obj = BusinessLocation.objects.get(id=id)
if request.method == 'POST':
form = BusinessLocationForm(request.POST, instance=loc_obj)
if form.is_valid():
form.save()
messages.success(request, str(loc_obj.name) +' update success...')
return redirect('business-location')
else:
form = BusinessLocationForm(instance=loc_obj)
return render(request, 'divmart_dashboard/business_location_edit.html', {'form':form, 'loc_obj':loc_obj})
@login_required(login_url='/useraccount/common_login')
def business_location_delete(request, id):
loc_obj = BusinessLocation.objects.get(id=id)
if loc_obj:
loc_obj.status = False
loc_obj.save()
messages.info(request, str(loc_obj.name) + ' remove success..')
return redirect('business-location')
@login_required(login_url='/useraccount/common_login')
def add_tax_rate(request):
if request.method == 'POST':
form = TaxRateForm(request.POST)
if form.is_valid():
form_save = form.save(commit=False)
form_save.status = True
form_save.save()
return redirect('tax-rate')
else:
form = TaxRateForm()
tax_rates = TaxRate.objects.filter(status=True)
return render(request, 'divmart_dashboard/tax_rate.html', {'rates':tax_rates})
@login_required(login_url='/useraccount/common_login')
def edit_tax_rate(request, id):
tax_rate_obj = TaxRate.objects.get(id=id, status=True)
if request.method == 'POST':
form = TaxRateForm(request.POST, instance = tax_rate_obj)
if form.is_valid():
form.save()
return redirect('tax-rate')
else:
form = TaxRateForm(instance=tax_rate_obj)
return render(request, 'divmart_dashboard/tax_rate_edit.html', {'obj':tax_rate_obj})
@login_required(login_url='/useraccount/common_login')
def delete_tax_rate(request, id):
tax_rate_obj = TaxRate.objects.get(id=id, status=True)
tax_rate_obj.status = False
tax_rate_obj.save()
return redirect('tax-rate')
|
chaitphani/New-DivMart
|
div_settings/views.py
|
views.py
|
py
| 3,238 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.messages.success",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 86,
"usage_type": "call"
}
] |
71969681149
|
"""This module is useful for generating yaml files for the withParams tests and for running unformal
compiler tests during development."""
import time
from kfp.compiler import compiler
from kfp import dsl
from kfp.dsl import _for_loop
class Coder:
def __init__(self, ):
self._code_id = 0
def get_code(self, ):
self._code_id += 1
return '{code:0{num_chars:}d}'.format(code=self._code_id, num_chars=_for_loop.LoopArguments.NUM_CODE_CHARS)
dsl.ParallelFor._get_unique_id_code = Coder().get_code
if __name__ == '__main__':
do_output = True
params = {}
if do_output:
@dsl.pipeline(name='my-pipeline')
def pipeline():
op0 = dsl.ContainerOp(
name="my-out-cop0",
image='python:alpine3.6',
command=["sh", "-c"],
arguments=['python -c "import json; import sys; json.dump([{\'a\': 1, \'b\': 2}, {\'a\': 10, \'b\': 20}], open(\'/tmp/out.json\', \'w\'))"'],
file_outputs={'out': '/tmp/out.json'},
)
with dsl.ParallelFor(op0.output) as item:
op1 = dsl.ContainerOp(
name="my-in-cop1",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo do output op1 item.a: %s" % item.a],
)
op_out = dsl.ContainerOp(
name="my-out-cop2",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo do output op2, outp: %s" % op0.output],
)
job_name = f'do-output=TRUE-passed-{time.time()}'
else:
@dsl.pipeline(name='my-pipeline')
def pipeline(loopidy_doop=[{'a': 1, 'b': 2}, {'a': 10, 'b': 20}]):
op0 = dsl.ContainerOp(
name="my-out-cop0",
image='python:alpine3.6',
command=["sh", "-c"],
arguments=['python -c "import json; import sys; json.dump([i for i in range(20, 31)], open(\'/tmp/out.json\', \'w\'))"'],
file_outputs={'out': '/tmp/out.json'},
)
with dsl.ParallelFor(loopidy_doop) as item:
op1 = dsl.ContainerOp(
name="my-in-cop1",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op1, item: %s" % item.a],
).after(op0)
op_out = dsl.ContainerOp(
name="my-out-cop2",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op2, outp: %s" % op0.output],
)
job_name = f'do-output=FALSE-global-{time.time()}'
yaml_text = compiler.Compiler().compile(pipeline, None)
print(yaml_text)
import kfp
import time
client = kfp.Client(host='127.0.0.1:8080/pipeline')
print(client.list_experiments())
pkg_path = '/tmp/witest_pkg.tar.gz'
compiler.Compiler().compile(pipeline, package_path=pkg_path)
exp = client.create_experiment('withparams_exp')
client.run_pipeline(
experiment_id=exp.id,
job_name=job_name,
pipeline_package_path=pkg_path,
params=params,
)
|
kubeflow/kfp-tekton-backend
|
sdk/python/tests/compiler/compiler_withparams_test_helper.py
|
compiler_withparams_test_helper.py
|
py
| 3,332 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "kfp.dsl._for_loop.LoopArguments",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "kfp.dsl._for_loop",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ParallelFor",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "kfp.dsl",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ParallelFor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.pipeline",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ParallelFor",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.ContainerOp",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "kfp.dsl.pipeline",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "kfp.dsl",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "kfp.compiler.compiler.Compiler",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "kfp.compiler.compiler",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "kfp.Client",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "kfp.compiler.compiler.Compiler",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "kfp.compiler.compiler",
"line_number": 90,
"usage_type": "name"
}
] |
6969799516
|
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
from torch.jit.annotations import List
#added
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from load_utils import load_state_dict_from_url
from cub_voc import CUB_VOC
import os
from tqdm import tqdm
import shutil
import numpy as np
from newPad2d import newPad2d
#from torch.autograd import Variable
MEMORY_EFFICIENT = True
IS_TRAIN = 0 # 0/1
IS_MULTI = 0 # 0/1
LAYERS = '121'
DATANAME = 'bird' # bird/cat/.../cub/helen/voc_multi
NUM_CLASSES =6 if IS_MULTI else 2
cub_file = '/data/sw/dataset/frac_dataset'
voc_file = '/data/sw/dataset/VOCdevkit/VOC2010/voc2010_crop'
log_path = '/data/fjq/iccnn/densenet/' # for model
save_path = '/data/fjq/iccnn/basic_fmap/densenet/' # for get_feature
acc_path = '/data/fjq/iccnn/basic_fmap/densenet/acc/'
dataset = '%s_densenet_%s_ori' % (LAYERS, DATANAME)
log_path = log_path + dataset + '/'
pretrain_model = log_path + 'model_2000.pth'
BATCHSIZE = 1
LR = 0.00001
EPOCH = 1000
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=MEMORY_EFFICIENT):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=0, #new padding
bias=False)),
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
self.pad2d_1 = newPad2d(1) #nn.ReplicationPad2d(1)#new padding
def bn_function(self, inputs):
# type: (List[Tensor]) -> Tensor
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input):
# type: (List[Tensor]) -> bool
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input):
# type: (List[Tensor]) -> Tensor
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input):
# type: (List[Tensor]) -> (Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input):
# type: (Tensor) -> (Tensor)
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input): # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.pad2d_1(self.relu2(self.norm2(bottleneck_output))))#new padding
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=MEMORY_EFFICIENT):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=2, memory_efficient=MEMORY_EFFICIENT):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=0, bias=False)), # new padding
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)), # new padding
]))
self.pad2d_1 = newPad2d(1)#nn.ZeroPad2d(1) #new padding
self.pad2d_3 = newPad2d(3)#nn.ZeroPad2d(3) #new padding
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
for i, layer in enumerate(self.features):
if i == 0:
x = self.pad2d_3(x) # new padding
if i == 3:
x = self.pad2d_1(x) # new padding
x = layer(x)
out = F.relu(x, inplace=True)
f_map = out.detach() # get_feature
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out, f_map #out
def _load_state_dict(model, model_url, progress):
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
# print(key)
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
pretrained_dict = {k: v for k, v in state_dict.items() if 'classifier' not in k}
model_dict = model.state_dict()
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict, strict=False)
def _densenet(arch, growth_rate, block_config, num_init_features, num_class, pretrained, progress,
**kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, num_classes=num_class, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
else:
if pretrain_model is not None:
device = torch.device("cuda")
model = nn.DataParallel(model).to(device)
model.load_state_dict(torch.load(pretrain_model))
else:
print('Error: pretrain_model == None')
return model
def densenet121(num_class, pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, num_class, pretrained, progress,
**kwargs)
def densenet161(num_class, pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, num_class, pretrained, progress,
**kwargs)
def densenet169(num_class, pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, num_class, pretrained, progress,
**kwargs)
def densenet201(num_class, pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, num_class, pretrained, progress,
**kwargs)
def get_Data(is_train, dataset_name, batch_size):
transform = transforms.Compose([
transforms.RandomResizedCrop((224, 224), scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
voc_helen = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'helen', 'voc_multi']
##cub dataset###
label = None if is_train else 0
if not is_train:
batch_size = 1
if dataset_name == 'cub':
trainset = CUB_VOC(cub_file, dataset_name, 'ori', train=True, transform=transform, is_frac=label)
testset = CUB_VOC(cub_file, dataset_name, 'ori', train=False, transform=val_transform, is_frac=label)
###cropped voc dataset###
elif dataset_name in voc_helen:
trainset = CUB_VOC(voc_file, dataset_name, 'ori', train=True, transform=transform, is_frac=label)
testset = CUB_VOC(voc_file, dataset_name, 'ori', train=False, transform=val_transform, is_frac=label)
###celeb dataset###
#elif dataset_name == 'celeb':
# trainset = Celeb(training = True, transform=None)
# testset = Celeb(training = False, transform=None)
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False)
return train_loader, test_loader
def net_train():
trainset_loader, testset_loader = get_Data(IS_TRAIN, DATANAME, BATCHSIZE)
if os.path.exists(log_path):
shutil.rmtree(log_path);os.makedirs(log_path)
else:
os.makedirs(log_path)
device = torch.device("cuda")
net = None
if LAYERS == '121':
net = densenet121(num_class=NUM_CLASSES, pretrained=True)
if LAYERS == '161':
net = densenet161(num_class=NUM_CLASSES, pretrained=True)
net = nn.DataParallel(net).to(device)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.module.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.6)
# Train the model
best_acc = 0.0; save_loss = []; test_loss = []; train_acc = []; test_acc = [];
for epoch in range(EPOCH+1):
scheduler.step()
net.train()
total_loss = 0.0; correct = .0; total = .0;
for batch_step, input_data in tqdm(enumerate(trainset_loader,0),total=len(trainset_loader),smoothing=0.9):
inputs, labels = input_data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
output, _ = net(inputs)
#print(output)
_, predicted = torch.max(output.data, 1)
correct += (predicted == labels).sum()
total += labels.size(0)
loss = criterion(output, labels)
#print(module.features.conv0.weight)
loss.backward()
#if batch_step>0:
# return
#for name, parms in net.named_parameters():
# print('after* name:', name, 'grad_value:',parms.grad)
optimizer.step()
total_loss += loss.item()
total_loss = float(total_loss) / (batch_step+1)
correct = float(correct) / total
testacc, testloss = test(net, testset_loader)
save_loss.append(total_loss); train_acc.append(correct);
test_loss.append(testloss); test_acc.append(testacc);
np.savez(log_path+'loss.npz', train_loss=np.array(save_loss), test_loss=np.array(test_loss),\
train_acc=np.array(train_acc), test_acc=np.array(test_acc))
print('Epoch', epoch, 'train loss: %.4f' % total_loss, 'train accuracy:%.4f' % correct, \
'test loss: %.4f' % testloss, 'test accuracy:%.4f' % testacc)
if epoch % 50 == 0:
torch.save(net.state_dict(), log_path+'model_%.3d.pth' % epoch)
if epoch % 1 == 0:
if testacc > best_acc:
best_acc = testacc
torch.save(net.state_dict(), log_path+'model_%.3d_%.4f.pth' % (epoch, best_acc))
print('Finished Training')
return net
def get_feature():
print('pretrain_model:', pretrain_model)
_, testset_test = get_Data(True, DATANAME, BATCHSIZE)
_, testset_feature = get_Data(False, DATANAME, BATCHSIZE)
device = torch.device("cuda")
net = None
if LAYERS == '121':
net = densenet121(num_class=NUM_CLASSES, pretrained=False)
if LAYERS == '161':
net = densenet161(num_class=NUM_CLASSES, pretrained=False)
net = nn.DataParallel(net).to(device)
# Test the model
acc, _ = test(net, testset_test)
f = open(acc_path+dataset+'_test.txt', 'w+')
f.write('%s\n' % dataset)
f.write('acc:%f\n' % acc)
print('test acc:', acc)
all_feature = []
testset = testset_test if DATANAME == 'voc_multi' else testset_feature
for batch_step, input_data in tqdm(enumerate(testset,0),total=len(testset),smoothing=0.9):
inputs, labels = input_data
inputs, labels = inputs.to(device), labels.to(device)
net.eval()
output, f_map = net(inputs)
all_feature.append(f_map.cpu().numpy())
all_feature = np.concatenate(all_feature,axis=0)
f.write('sample num:%d' % (all_feature.shape[0]))
f.close()
print(all_feature.shape)
np.savez_compressed(save_path+LAYERS+'_densenet_'+DATANAME+'_ori.npz', f_map=all_feature[...])
print('Finished Operation!')
return net
def test(net, testdata):
criterion = nn.CrossEntropyLoss()
correct, total = .0, .0
total_loss = .0
for batch_step, input_data in tqdm(enumerate(testdata,0),total=len(testdata),smoothing=0.9):
inputs, labels = input_data
inputs, labels = inputs.cuda(), labels.cuda()
net.eval()
outputs, _ = net(inputs)
loss = criterion(outputs, labels)
total_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
total_loss = float(total_loss)/(batch_step+1)
return float(correct)/total, total_loss
def densenet_ori_train():
if IS_TRAIN == 1:
net = net_train()
elif IS_TRAIN == 0:
net = get_feature()
|
ada-shen/icCNN
|
densenet_ori_train.py
|
densenet_ori_train.py
|
py
| 20,335 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "newPad2d.newPad2d",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.utils.checkpoint.checkpoint",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.utils.checkpoint",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "torch.jit",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "torch.jit",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "torch.jit",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "torch.jit.is_scripting",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.dropout",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleDict",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "newPad2d.newPad2d",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "newPad2d.newPad2d",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.kaiming_normal_",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.adaptive_avg_pool2d",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "torch.flatten",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "load_utils.load_state_dict_from_url",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "cub_voc.CUB_VOC",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "cub_voc.CUB_VOC",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "cub_voc.CUB_VOC",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "cub_voc.CUB_VOC",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "numpy.savez",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.savez_compressed",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 471,
"usage_type": "call"
}
] |
36724497528
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Count
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse_lazy, reverse
from django.utils import timezone
from django.views.generic import UpdateView, ListView, DeleteView
from .forms import NewTopicForm, NewPostForm
from .models import (
Board,
Topic,
Post
)
class BoardListView(ListView):
model = Board
context_object_name = 'boards'
template_name = 'home.html'
class TopicListView(ListView):
model = Topic
context_object_name = 'topics'
paginate_by = 20
def get_context_data(self, **kwargs):
kwargs['board'] = self.board
return super().get_context_data(**kwargs)
def get_queryset(self):
self.board = get_object_or_404(Board, pk=self.kwargs.get('pk'))
queryset = self.board.topics.order_by('-last_update').annotate(
replies=Count('posts') - 1)
return queryset
@login_required
def topic_new(request, pk):
board = get_object_or_404(Board, pk=pk)
if request.method == 'POST':
form = NewTopicForm(request.POST)
if form.is_valid():
user = request.user
topic = form.save(commit=False)
topic.board = board
topic.starter = user
topic.save()
message = form.cleaned_data.get('message')
Post.objects.create(
message=message,
topic=topic,
created_by=user,
)
return redirect('boards:topic-posts', pk=pk, topic_pk=topic.pk)
else:
form = NewTopicForm()
context = {
'board': board,
'form': form
}
return render(request, 'boards/topic_new.html', context)
class PostListView(ListView):
model = Post
context_object_name = 'posts'
paginate_by = 20
def get_context_data(self, **kwargs):
session_key = f'viewed_topic_{self.topic.id}'
if not self.request.session.get(session_key, False):
self.topic.views += 1
self.topic.save()
self.request.session[session_key] = True
kwargs['topic'] = self.topic
return super().get_context_data(**kwargs)
def get_queryset(self):
self.topic = get_object_or_404(Topic,
board__pk=self.kwargs.get('pk'),
pk=self.kwargs.get('topic_pk'))
queryset = self.topic.posts.order_by('created_at')
return queryset
@login_required
def topic_reply(request, pk, topic_pk):
topic = get_object_or_404(Topic, board__pk=pk, pk=topic_pk)
if request.method == "POST":
form = NewPostForm(request.POST)
if form.is_valid():
user = request.user
post = form.save(commit=False)
post.topic = topic
post.created_by = user
post.save()
topic.last_update = timezone.now()
topic.save()
topic_url = reverse('boards:topic-posts',
kwargs={
'pk': topic.board.pk,
'topic_pk': topic.pk
})
topic_post_url = f'{topic_url}?page={topic.get_page_count()}#{post.pk}'
return redirect(topic_post_url)
else:
form = NewPostForm()
context = {
'form': form,
'topic': topic
}
return render(request, 'boards/topic_reply.html', context)
class PostEditView(LoginRequiredMixin, UpdateView):
model = Post
fields = ('message',)
template_name = 'boards/post_edit.html'
pk_url_kwarg = 'post_pk'
context_object_name = 'post'
def get_queryset(self):
queryset = super().get_queryset()
if not self.request.user.is_staff:
queryset = queryset.filter(created_by=self.request.user)
return queryset
def form_valid(self, form):
post = form.save(commit=False)
post.updated_by = self.request.user
post.save()
return redirect('boards:topic-posts', pk=post.topic.board.pk,
topic_pk=post.topic.pk)
class TopicDeleteView(DeleteView):
def get_object(self, queryset=None):
user = self.request.user
board_pk = self.kwargs.get('pk')
topic_pk = self.kwargs.get('topic_pk')
topic = get_object_or_404(Topic, board__pk=board_pk, pk=topic_pk)
if not topic.starter == user and not user.is_staff:
raise Http404
return topic
def get_success_url(self):
board_pk = self.kwargs.get('pk')
return reverse_lazy('boards:topic-list', kwargs={'pk': board_pk})
|
zawi99/web-boards
|
boards/views.py
|
views.py
|
py
| 4,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.ListView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.Board",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "models.Topic",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.Board",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "django.db.models.Count",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Board",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "forms.NewTopicForm",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.create",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "forms.NewTopicForm",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.Topic",
"line_number": 85,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "models.Topic",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "forms.NewPostForm",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "forms.NewPostForm",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "django.views.generic.DeleteView",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "models.Topic",
"line_number": 151,
"usage_type": "argument"
},
{
"api_name": "django.http.Http404",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 158,
"usage_type": "call"
}
] |
73008021309
|
#Lesson / Exercise 23 my code, sort the customer total amount
from pyspark import SparkConf, SparkContext #boilerplate
conf = SparkConf().setMaster("local").setAppName("TotalAmountOrdered")
sc = SparkContext(conf = conf)
def parseLine(line):
fields = line.split(',')
return (int(fields[0]), float(fields[2]))
#return (float(fields[2]), int(fields[1])) # ALTERNATE OPTION I think
lines = sc.textFile("file:///C:/Users/cenzo/SparkCourse/CSV/customer-orders.csv") #read from correct file
rdd = lines.map(parseLine)
totalAmount = rdd.reduceByKey(lambda x, y: x+y)
totalSortedAmount = totalAmount.map(lambda x: (x[1], x[0])).sortByKey()
#to sort this, we want to sort it by total spent so we can see who is biggest spender. TO do this we need to swap the key and values so the amount spent becomes the key
results = totalSortedAmount.collect()
for result in results:
print(str(result[1]) + "\t\t" + str(result[0])) #need to change how output is printed, we do not want the total amount to be pritned first. We also have to cast the result to a string
|
CenzOh/Python_Spark
|
MyCode/customerTotalAmountSorted.py
|
customerTotalAmountSorted.py
|
py
| 1,078 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.SparkConf",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 5,
"usage_type": "call"
}
] |
28749398084
|
import urllib2
import json
import mraa
import threading
import sys
import time
moveSensor = mraa.Gpio(20)
moveSensor.dir(mraa.DIR_IN)
soundSensor = mraa.Gpio(21)
soundSensor.dir(mraa.DIR_IN)
fotoSensor = mraa.Gpio(43)
fotoSensor.dir(mraa.DIR_IN)
gasSensor = mraa.Gpio(17)
gasSensor.dir(mraa.DIR_IN)
def update():
threading.Timer(3.0, update).start()
moveVal = moveSensor.read()
if (moveVal == 1):
moveValue = True
else:
moveValue = False
gasVal = gasSensor.read()
if (gasVal == 0):
gasValue = True
else:
gasValue = False
fotoVal = fotoSensor.read()
if (fotoVal == 1):
fotoValue = True
else:
fotoValue = False
soundVal = soundSensor.read()
if (soundVal == 1):
soundValue = True
else:
soundValue = False
url = 'http://%s:5000/api/rooms/0' % host
data = json.dumps({'movement': moveValue, 'gas': gasValue, 'light': fotoValue, 'noise': soundValue, 'timestamp': time.time()})
req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
host = sys.argv[1]
update();
|
Jasiu0/SmartGlassIoT
|
client-linkit/rest_client.py
|
rest_client.py
|
py
| 1,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mraa.Gpio",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mraa.DIR_IN",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mraa.Gpio",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mraa.DIR_IN",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mraa.Gpio",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mraa.DIR_IN",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "mraa.Gpio",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mraa.DIR_IN",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "urllib2.Request",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 49,
"usage_type": "attribute"
}
] |
22458997174
|
import flask
from flask import Flask,request,jsonify
import json
from sqlib import cek_data_user, input_data,input_dataa, show_data, node1_suhu, node1_kelembapanudara, node1_kelembapantanah, node1_keltanah_konversi, node1_intensitascahaya, node1_curahhujan, node1_curahhujan_konversi, node2_suhu, node2_kelembapanudara, node2_kelembapantanah, node2_keltanah_konversi, node2_curahhujan, node2_curahhujan_konversi ,node2_intensitascahaya, show_dataa,input_user,cek_username,update_ip
app = Flask(__name__)
@app.route('/monitor/node1', methods=['POST'])
def node1():
json_data = flask.request.json
if json_data ==None:
result = {"pesan":"data not found"}
resp = jsonify(result)
return resp,404
else :
if 'Suhu' not in json_data or 'Kelembapan_udara' not in json_data or 'Intensitas_cahaya' not in json_data or 'Curah_hujan' not in json_data or 'Kelembapan_tanah' not in json_data :
result = {"pesan": "bad request"}
resp = jsonify(result)
return resp,403
else :
Suhu = json_data ['Suhu']
Kelembapan_udara = json_data ['Kelembapan_udara']
Intensitas_cahaya = json_data ['Intensitas_cahaya']
Curah_hujan = json_data ['Curah_hujan']
Kelembapan_tanah = json_data ['Kelembapan_tanah']
input_data(Suhu,Kelembapan_udara,Intensitas_cahaya,Curah_hujan,Kelembapan_tanah)
result = {"pesan" : " input berhasil"}
resp= jsonify(result)
return resp, 200
@app.route('/monitor/node2', methods=['POST'])
def node2():
json_data = flask.request.json
if json_data ==None:
result = {"pesan":"data not found"}
resp = jsonify(result)
return resp,404
else :
if 'Suhu' not in json_data or 'Kelembapan_udara' not in json_data or 'Intensitas_cahaya' not in json_data or 'Curah_hujan' not in json_data or 'Kelembapan_tanah' not in json_data :
result = {"pesan": "bad request"}
resp = jsonify(result)
return resp,403
else :
Suhu = json_data ['Suhu']
Kelembapan_udara = json_data ['Kelembapan_udara']
Intensitas_cahaya = json_data ['Intensitas_cahaya']
Curah_hujan = json_data ['Curah_hujan']
Kelembapan_tanah = json_data ['Kelembapan_tanah']
input_dataa (Suhu,Kelembapan_udara,Intensitas_cahaya,Curah_hujan,Kelembapan_tanah)
result = {"pesan" : " input berhasil"}
resp= jsonify(result)
return resp, 200
@app.route('/monitor/node1', methods=['GET'])
def monitor_node1() :
resp = show_data()
return resp,200
@app.route('/monitor/suhu', methods=['GET'])
def monitor_suhu() :
resp = node1_suhu()
return resp,200
@app.route('/monitor/udara', methods=['GET'])
def monitor_udara() :
resp = node1_kelembapanudara()
return resp,200
@app.route('/monitor/tanah', methods=['GET'])
def monitor_tanah() :
resp = node1_kelembapantanah()
return resp,200
@app.route('/monitor/tanahkonversi', methods=['GET'])
def monitor_tanahkonversi() :
resp = node1_keltanah_konversi()
return resp,200
@app.route('/monitor/cahaya', methods=['GET'])
def monitor_cahaya() :
resp = node1_intensitascahaya()
return resp,200
@app.route('/monitor/hujan', methods=['GET'])
def monitor_hujan() :
resp = node1_curahhujan()
return resp,200
@app.route('/monitor/hujankonversi', methods=['GET'])
def monitor_hujankonversi() :
resp = node1_curahhujan_konversi()
return resp,200
@app.route('/monitor/node2', methods=['GET'])
def monitor_node2() :
resp = show_dataa()
return resp,200
@app.route('/monitor/suhu2', methods=['GET'])
def monitor_suhu2() :
resp = node2_suhu()
return resp,200
@app.route('/monitor/udara2', methods=['GET'])
def monitor_udara2() :
resp = node2_kelembapanudara()
return resp,200
@app.route('/monitor/tanah2', methods=['GET'])
def monitor_tanah2() :
resp = node2_kelembapantanah()
return resp,200
@app.route('/monitor/tanahkonversi2', methods=['GET'])
def monitor_tanahkonversi2() :
resp = node2_keltanah_konversi()
return resp,200
@app.route('/monitor/cahaya2', methods=['GET'])
def monitor_cahaya2() :
resp = node2_intensitascahaya()
return resp,200
@app.route('/monitor/hujan2', methods=['GET'])
def monitor_hujan2() :
resp = node2_curahhujan()
return resp,200
@app.route('/monitor/hujankonversi2', methods=['GET'])
def monitor_hujankonversi2() :
resp = node2_curahhujan_konversi()
return resp,200
@app.route('/monitor/register/user',methods=['POST'])
def user_register():
json_data = request.json
if json_data==None:
result = {"pesan":"data not found"}
resp = jsonify(result)
return resp,404
else:
if 'Username' not in json_data or 'Password' not in json_data or 'IP_Address' not in json_data:
result = {"pesan": "bad request"}
resp = jsonify(result)
return resp,403
else:
Username = json_data['Username']
Password = json_data['Password']
IP_Address = json_data['IP_Address']
cek = cek_username(Username)
if cek == False:
result = {"pesan" : " User Already Existed"}
resp= jsonify(result)
return resp, 208
else:
input_user(Username,Password,IP_Address)
result = {"pesan" : " input berhasil"}
resp= jsonify(result)
return resp, 200
@app.route('/monitor/login/user',methods=['POST'])
def user_login():
json_data = request.json
if json_data==None:
result = {"pesan":"data not found"}
resp = jsonify(result)
return resp,404
else:
if 'Username' not in json_data or 'Password' not in json_data or 'IP_Address' not in json_data:
result = {"pesan": "bad request"}
resp = jsonify(result)
return resp,403
else:
Username = json_data['Username']
Password = json_data['Password']
IP_Address = json_data['IP_Address']
cek = cek_data_user(Username,Password)
if cek==False:
result = {"pesan": "Forbidden"}
resp = jsonify(result)
return resp,203
else:
update_ip(IP_Address,Username,Password)
result = {"pesan" : " Selamat Datang "+Username}
resp= jsonify(result)
return resp, 200
if __name__ == "__main__" :
#serve(app, host="0.0.0.0", port=4001)
app.run(port=4001, debug=True)
|
triani16/Aplikasi-Monitoring-Tanaman
|
penerima.py
|
penerima.py
|
py
| 6,735 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlib.input_data",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlib.input_dataa",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlib.show_data",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_suhu",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_kelembapanudara",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_kelembapantanah",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_keltanah_konversi",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_intensitascahaya",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_curahhujan",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sqlib.node1_curahhujan_konversi",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sqlib.show_dataa",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_suhu",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_kelembapanudara",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_kelembapantanah",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_keltanah_konversi",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_intensitascahaya",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_curahhujan",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sqlib.node2_curahhujan_konversi",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sqlib.cek_username",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "sqlib.input_user",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sqlib.cek_data_user",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "sqlib.update_ip",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 185,
"usage_type": "call"
}
] |
18454402127
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import sys
import argparse
import logging.config
from pathlib import Path
sys.path.append(str(Path().absolute()))
from mx_crm.main import run_completing
from mx_crm.settings import LOGGING
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Complete/Update data')
parser.add_argument('--all', action='store_true', help='Run all completers')
parser.add_argument('--websites', action='store_true', help='Complite missing websites')
parser.add_argument('--update-wiki', action='store_true', help='Update already parsed wiki data by existing url')
parser.add_argument('--update-xing', action='store_true', help='Update already parsed xing data by existing url')
parser.add_argument('--parse-wiki', action='store_true', help='Parse not parsed/found data for wiki')
parser.add_argument('--parse-xing', action='store_true', help='Parse not parsed/found data for xing')
parser.add_argument('--force-update', action='store_true', help='Force update')
parser.add_argument('--google-evaluation', action='store_true', help='Parse not parsed google evaluation')
args = parser.parse_args()
logger.info("""
Arguments:
--all={all}
--websites={websites}
--update-wiki={update_wiki}
--update-xing={update_xing}
--parse-wiki={parse_wiki}
--parse-xing={parse_xing}
--force-update={force_update}
""".format(
all=args.all,
websites=args.websites,
update_wiki=args.update_wiki,
update_xing=args.update_xing,
parse_wiki=args.parse_wiki,
parse_xing=args.parse_xing,
force_update=args.force_update,
google_evaluation=args.google_evaluation,
))
try:
run_completing(
force_update=args.force_update,
c_all=args.all,
c_websites=args.websites,
c_update_wiki=args.update_wiki,
c_update_xing=args.update_xing,
c_parse_wiki=args.parse_wiki,
c_parse_xing=args.parse_xing,
c_google_evaluation=args.google_evaluation
)
except IOError as e:
logger.error(e)
|
alexpinkevichwork/squirrel
|
complete_data.py
|
complete_data.py
|
py
| 2,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.config.config.dictConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mx_crm.settings.LOGGING",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "logging.config.config",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.config",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.config.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mx_crm.main.run_completing",
"line_number": 51,
"usage_type": "call"
}
] |
26425792111
|
import pandas as pd
import optuna
import numpy as np
from pathlib import Path
import datetime
import lightgbm
import pickle
from functools import partial
import logging
import argparse
from clearml import Task
WORK_DIR = Path(".")
STUDY_PATH = WORK_DIR.joinpath(
f'total_dataset_study_{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
)
kvant = {7105: 10, 7103: 10, 7115: 5, 7101: 3, 7175: 3, 517: 10}
price_df = pd.DataFrame({'game_code': [7105, 7103, 7115, 7101, 7175],
'price': [100, 100, 75, 50, 75]})
price_df = price_df.set_index('game_code')
utilization_coefficients = {7105: 30,
7103: 35,
7115: 50,
7101: 50,
7175: 50,
517: 30}
utilization_coefficients = {int(game): 100 / (100 - int(utilization_coefficients[game])) for game in
list(utilization_coefficients.keys())}
def mse(real, forecast):
real_array = np.array(real)
forecast_array = np.array(forecast)
return np.mean(np.power((real_array - forecast_array), 2))
def smape(A, F):
return 100 / len(A) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F)))
def score(test, predict, active_ops=False, compare_with_real_sales=True):
# return pandas df
df = test.copy()
df['predict'] = predict
df.predict = df.predict.astype(int)
if compare_with_real_sales:
s = sales[sales.game_code.isin(list(test.game_code.unique()))]
s = s[s.ds.isin(list(test.ds.unique()))]
if 'value' in df.columns:
df = df.drop(['value'], 1)
df = df.merge(s[['game_code', 'ds', 'value', 'ops_id']], on=['game_code', 'ds', 'ops_id'], how='left') # outer
df.value = df.value.fillna(0)
df.predict = df.predict.fillna(0)
# df = df.merge(sales)
if active_ops:
f = prod[prod.ds.isin(list(test.ds.values))]
f = f[f.game_code.isin(list(test.game_code.values))]
df = df.merge(f[['ops_id', 'ds', 'base_forecast', 'game_code']], on=['ops_id', 'ds', 'game_code'], how='outer')
df['value'] = df.value.fillna(0)
# business processing (add utilization and round to kvant)
df['distributing'] = df.apply(
lambda x: max(np.ceil(
x.predict * utilization_coefficients[x.game_code] / kvant[x.game_code]
), 1) * kvant[x.game_code]
, 1)
df['plan_transfer'] = df.apply(
lambda x: max(np.ceil(
x.value * utilization_coefficients[x.game_code] / kvant[x.game_code]
), 1) * kvant[x.game_code]
, 1)
if active_ops:
df['distributing'].loc[df.base_forecast.isna()] = 0
df['distributing'].loc[df.predict.isna()] = df.loc[df.predict.isna()].game_code.map(kvant)
df['predict'].loc[df.base_forecast.isna()] = 0
df['predict'].loc[df.predict.isna()] = df.loc[df.predict.isna()].game_code.map(kvant)
score_result = pd.concat(
[
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.value)], columns=['sales'])
),
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.predict)], columns=['origin_predict'])
),
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.distributing)], columns=['predict'])
),
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame([len(x.predict)], columns=['ops_count'])
),
df[df.value < df.predict].groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.predict - x.value)], columns=['origin_over_sales'])
),
df[df.value > df.predict].groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.value - x.predict)], columns=['origin_lost_sales'])
),
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame(
[
sum(
x[x.value > x.distributing].value - x[x.value > x.distributing].distributing
) / sum(x.value) * 100
], columns=['lost_percent'])
),
df.groupby(['game_code']).apply(
lambda x: pd.DataFrame([100 - sum(x.value) / sum(x.distributing) * 100], columns=['util_coef'])
),
df[df.value < df.distributing].groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.distributing - x.value)], columns=['over_sales'])
),
df[df.plan_transfer < df.distributing].groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.distributing - x.plan_transfer)], columns=['over_plan_sales'])
),
df[df.value > df.distributing].groupby(['game_code']).apply(
lambda x: pd.DataFrame([sum(x.value - x.distributing)], columns=['lost_sales'])
)
], 1
)
# score_result = score_result.set_index('game_code')
score_result = score_result.join(price_df, on='game_code', how='left')
score_result['over_plan_losses'] = score_result['lost_sales'] * score_result['price'] + score_result[
'over_plan_sales'] * 5
score_result['lost_sales_losses'] = score_result['lost_sales'] * score_result['price']
score_result['losses'] = score_result['lost_sales'] * score_result['price'] + score_result['over_sales'] * 5
return score_result
def train_model(args, X, Y, params):
"""Train LightGBM model"""
train_params = {key: value for key, value in params.items() if key != "max_bin"}
if args and args.use_gpu:
train_params["device"] = "gpu"
train_params["gpu_device_id"] = 2
train_params["gpu_platform_id"] = 1
train_params["num_threads"] = 10
dataset = lightgbm.Dataset(X, Y, params={"max_bin": params["max_bin"]})
model = lightgbm.train(train_params, dataset)
return model
def scoring(
trial: object,
dss,
args
) -> float:
# Objective function for binary classification.
# Calculates the average precision in holdout
# for the model with picked parameters.
# Args:
# trial (object): a process of evaluating an objective funtion
# x_train (pd.DataFrame, optional): features for training. Defaults to None.
# y_train (pd.DataFrame, optional): labels for training. Defaults to None.
# x_val (pd.DataFrame, optional): features for validation. Defaults to None.
# y_val (pd.DataFrame, optional): labels for validation. Defaults to None.
# Returns:
# float: average precision on test data.
trial_params = {
"seed": 424242,
"verbosity": -1,
"num_gpu": 2,
"n_estimators": trial.suggest_int("n_estimators", 100, 3500, step=100), #
"max_depth": trial.suggest_int("max_depth", -1, 12),
"max_bin": trial.suggest_int("max_bin", 63, 255, step=10),
"learning_rate": trial.suggest_loguniform("learning_rate", 1e-3, 1e-1),
"num_leaves": trial.suggest_int("num_leaves", 7, 100, step=10),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.2, 1.0, step=0.1),
"colsample_bynode": trial.suggest_float("colsample_bynode", 0.2, 1.0, step=0.1),
"lambda_l1": trial.suggest_float("lambda_l1", 0, 10, step=0.1), #
"max_delta_step": trial.suggest_float("max_delta_step", 0, 10, step=0.1), #
"subsample_freq": trial.suggest_int("subsample_freq", 0, 50, step=1),
"min_child_samples": trial.suggest_int("min_child_samples", 1, 1000, step=10),
"subsample": trial.suggest_float("subsample", 0.5, 1.0, step=0.05),
"cegb_penalty_split": trial.suggest_float("cegb_penalty_split", 0.0, 3.0, step=0.1),
# 'extra_trees': trial.suggest_categorical('extra_trees', [False, True]),
}
dates = ["2020-01-12", '2020-01-26', '2020-03-01', '2020-03-08', '2020-04-05']
drop_columns = ['ds', 'game_code', 'ops_id', 'value']
scores = []
lossses_list = []
for j, ds in enumerate(dss):
X_train = ds[0].drop(drop_columns, 1)
Y_train = ds[0]['value']
drop_test = ds[1][drop_columns].copy()
X_valid = ds[1].drop(drop_columns, 1)
y_valid = ds[1]['value']
# rgr.fit(X_train, Y_train)
model = train_model(args, X_train, Y_train, trial_params)
y_predict = model.predict(X_valid)
X_valid['base_forecast'] = y_predict.astype('int')
X_valid['base_forecast'] = X_valid['base_forecast'].fillna(0)
X_valid[['ds', 'game_code', 'ops_id', 'value']] = drop_test[['ds', 'game_code', 'ops_id', 'value']]
score_table = score(X_valid.drop(['base_forecast'], 1), X_valid['base_forecast'], active_ops=False,
compare_with_real_sales=True)
lossses = score_table["losses"].sum()
lossses_list.append(lossses)
scores.append(score_table.assign(test_date=dates[j]))
# lossses_sum = np.sum(lossses_list)
all_scores = pd.concat(scores)
i = trial.number
all_scores.to_csv(f"./scores_optuna_2/optuna_scores_{i}.csv")
lossses_sum = all_scores['losses'].sum()
lost_sales = all_scores['lost_sales'].sum()
over_sales = all_scores['over_sales'].sum() * 5
lost_sales_losses = all_scores['lost_sales_losses'].sum()
logger.report_text(f"itaration: {i}", iteration=i)
logger.report_text(f"params: {trial_params}", iteration=i)
logger.report_scalar("losses", "base", value=lossses_sum, iteration=i)
logger.report_scalar("lost_sales_losses", "base", value=lost_sales_losses, iteration=i)
logger.report_scalar("over_sales", "base", value=over_sales, iteration=i)
logger.report_table("scores", f"scores_{i}", table_plot=all_scores, iteration=i)
return lossses_sum
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--study-name", type=str, dest="study_name")
parser.add_argument("--use-gpu", dest="use_gpu", action="store_true")
parser.add_argument("--ntrials", type=int, dest="n_trials")
args = parser.parse_args()
LOG_PATH = Path(WORK_DIR / "tuneparams.log")
logging.basicConfig(
filename=LOG_PATH,
filemode="a",
level=logging.INFO,
)
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.info("Loading data")
train0 = pd.read_parquet('./optuna_splits/train_14features_0.parquet')
test0 = pd.read_parquet('./optuna_splits/test_14features_0.parquet')
train1 = pd.read_parquet('./optuna_splits/train_14features_1.parquet')
test1 = pd.read_parquet('./optuna_splits/test_14features_1.parquet')
train2 = pd.read_parquet('./optuna_splits/train_14features_2.parquet')
test2 = pd.read_parquet('./optuna_splits/test_14features_2.parquet')
train3 = pd.read_parquet('./optuna_splits/train_14features_3.parquet')
test3 = pd.read_parquet('./optuna_splits/test_14features_3.parquet')
train4 = pd.read_parquet('./optuna_splits/train_14features_4.parquet')
test4 = pd.read_parquet('./optuna_splits/test_14features_4.parquet')
dss = [(train0, test0), (train1, test1), (train2, test2), (train3, test3), (train4, test4)]
read_data = []
for i, p in enumerate(
Path("./sales.parquet").iterdir()
):
if "parquet" in p.name:
df = pd.read_parquet(p)
read_data.append(df)
sales = pd.concat(read_data)
sales.ds = pd.to_datetime(sales.ds)
sales.game_code = sales.game_code.astype(int)
sales.value = sales.value.astype(int)
sales.shape
# Run optuna study
log.info("Starting optuna study")
log.info(
f'Starting optuna study_{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
)
# init ClearML
model_name = 'lightgbm'
target = 'stoloto'
task = Task.init(
project_name="stoloto",
task_name="optuna_14f",
tags=['opt_params']
)
logger = task.get_logger()
study_name = (
args.study_name if args.study_name else "stoloto-optuna-study"
) # Unique identifier of the study.
storage_name = "sqlite:///{}.db".format(study_name)
total_study = optuna.create_study(
direction="minimize",
study_name=study_name,
storage=storage_name,
load_if_exists=True,
)
first_trial_params = {'colsample_bynode': 0.6,
'colsample_bytree': 0.5,
'lambda_l1': 6.4,
'learning_rate': 0.06878228501024089,
'max_bin': 163,
'max_depth': 7,
'min_child_samples': 13,
'n_estimators': 1400,
'num_leaves': 87,
'subsample': 0.8,
'subsample_freq': 14,
'max_delta_step': 0.0,
'cegb_penalty_split': 0.0
}
total_study.enqueue_trial(params=first_trial_params)
scoring = partial(
scoring,
dss=dss,
args=args,
)
try:
total_study.optimize(scoring, n_trials=args.n_trials, show_progress_bar=True)
except KeyboardInterrupt:
pass
with open(STUDY_PATH, "wb") as f:
pickle.dump(total_study, f)
log.info("Save study at studypath")
log.info("Best LightGBM parameters")
log.info(total_study.best_params)
log.info(
f'Save study results_{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
)
with open(STUDY_PATH, "wb") as fs:
pickle.dump(total_study, fs)
task.mark_completed()
|
Anaksibia/Ticket_distribution_system
|
scripts/run_optuna_5_dates.py
|
run_optuna_5_dates.py
|
py
| 13,803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "lightgbm.Dataset",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "lightgbm.train",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "clearml.Task.init",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "clearml.Task",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "optuna.create_study",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 342,
"usage_type": "call"
}
] |
39634585253
|
# supervised training
import argparse
import os
import numpy as np
import math
import itertools
import datetime
import time
import sys
import torchvision.transforms as transforms
import torchvision.models as models
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from adaptive_conv_models import *
from discriminator import *
import torch.nn as nn
import torch.nn.functional as F
import torch
from h5topng.data import transforms as T
from h5topng.common import subsample
from Vtils.pytorch_msssim_master.pytorch_msssim import MS_SSIM, gaussian_filter
from adaptive_conv_models.vtils import Random_Rotate, Random_Flip, Random_Translate
class To_h_space:
def __init__(self, mask=None, center_fractions=[0.04], accelerations=[8], seed=None):
self.mask = mask
self.seed = seed
if mask == None:
self.mask_func = subsample.MaskFunc(center_fractions, accelerations)
def __call__(self, data):
device = data.device
# to complex data (B,1,H,W,2)
data = data.unsqueeze(dim=-1).transpose(1,-1)
# to fft domian
data = T.fft2(data)
# apply mask
if self.mask == None:
data, _ = T.apply_mask(data, self.mask_func, seed=self.seed)
else:
self.mask = self.mask.to(device)
data = torch.where(self.mask == 0, torch.Tensor([0.]).to(device), data)
# to image domain
data = T.ifft2(data)
return data.transpose(1,-1).squeeze(dim=-1)
class To_k_space:
def __init__(self, mask=None, center_fractions=[0.04], accelerations=[8], seed=None):
self.mask = mask
self.seed = seed
if mask == None:
self.mask_func = subsample.MaskFunc(center_fractions, accelerations)
def __call__(self, data):
device = data.device
# to complex data (B,1,H,W,2)
data = data.unsqueeze(dim=-1).transpose(1,-1)
# to fft domian
data = T.fft2(data)
# apply mask
if self.mask == None:
data, _ = T.apply_mask(data, self.mask_func, seed=self.seed)
else:
self.mask = self.mask.to(device)
data = torch.where(self.mask == 0, torch.Tensor([0.]).to(device), data)
# to (B,2,H,W)
return data.transpose(1,-1).squeeze(dim=-1)
from utils import torch_fft, torch_ifft, sigtoimage, HLoss, normalize2d
class Soft_Data_Consistency(nn.Module):
'''mask: (B=1, C=1, H, W)'''
def __init__(self, mask):
super().__init__()
self.mask = mask
self.mask_c = torch.ones_like(mask) - mask # complementary of support
# def __call__(self, data, data_u):
def forward(self, data, data_u):
'''input: (B,2,H,W)'''
device = data.device
self.mask = self.mask.to(device)
self.mask_c = self.mask_c.to(device)
# # to complex data (B,1,H,W,2)
# data = data.unsqueeze(dim=-1).transpose(1,-1)
# data_u = data_u.unsqueeze(dim=-1).transpose(1,-1)
# # to fft domian
# data = T.fft2(data)
# data_u = T.fft2(data_u)
data = torch_fft(data)
data_u = torch_fft(data_u)
# DC operation
data_dc = data*self.mask_c + data_u*self.mask
# to image domain
data_dc = torch_ifft(data_dc)
# return data_dc.transpose(1,-1).squeeze(dim=-1)
return data_dc
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="NYU_MRI", help="name of the dataset")
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--mask', default=None, help='path to dataset')
parser.add_argument("--batch_size", type=int, default=8, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_depth", type=int, default=1, help="size of image depth, e.g. coils")
parser.add_argument("--img_height", type=int, default=256, help="size of image height")
parser.add_argument("--img_width", type=int, default=256, help="size of image width")
parser.add_argument("--channels", type=int, default=2, help="number of image channels")
parser.add_argument("--repeat_dim", type=int, default=1, help="number of random samples in test")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between saving generator samples")
parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints")
parser.add_argument("--lambda_adv", type=float, default=1., help="pixelwise loss weight")
parser.add_argument("--lambda_pixel", type=float, default=10, help="pixelwise reconstruction loss weight")
parser.add_argument("--lambda_latent", type=float, default=0.5, help="latent loss weight")
parser.add_argument("--lambda_vgg", type=float, default=1., help="perceptual loss weight")
parser.add_argument("--lambda_grad", type=float, default=10., help="gradient penalty")
parser.add_argument("--mphn", default=False, action='store_true', help="mphn model")
parser.add_argument("--not_ML_dense", default=False, action='store_true', help="multi-level dense architecture")
parser.add_argument("--not_plus", default=False, action='store_true', help="no feature repeation to balance the model parameter size")
parser.add_argument("--dense", default=False, action='store_true', help="dense connections")
parser.add_argument("--stasm", default=False, action='store_true', help="add STASM modules")
parser.add_argument("--stasm_groups", type=int, default=1)
parser.add_argument("--data_consistency", default=False, action='store_true', help="interleaved data consistency")
opt = parser.parse_args()
# print(opt)
os.makedirs("images/%s" % opt.dataset_name, exist_ok=True)
os.makedirs("saved_models/%s" % opt.dataset_name, exist_ok=True)
cuda = True if torch.cuda.is_available() else False
input_shape = (opt.channels, opt.img_depth, opt.img_height, opt.img_width)
# mean square normalize
def mean_square_normalize(data, thresh=0.05, ratio=0.1, dilate=1.0):
data[data.abs()<thresh] = 0.0 # threshold
shape = data.shape
mean_square = (data**2).sum(1).sqrt().mean((-2,-1))
mean_square = mean_square.view((shape[0],1,1,1)).repeat((1,shape[1],shape[2],shape[3]))
# normalize
data = data/mean_square*ratio
data = torch.tanh(data*dilate)
return data
def sample_images(epoch, i):
"""Saves a generated sample rom the validation set"""
generator.eval()
# imgs = next(iter(val_dataloader))
img_samples = None
attention_samples = []
for img_A, img_B in zip(to_cyc(val_dataset.type(Tensor)), val_dataset.type(Tensor)):
# for img_A, img_B in zip(To_h_space(mask=None)(val_dataset.type(Tensor)), val_dataset.type(Tensor)):
img_A = img_A.unsqueeze(dim=0) # (1, C, H W)
img_B = img_B.unsqueeze(dim=0)
# Repeat input image by number of desired columns
repeat_dim = opt.repeat_dim
real_A = img_A.repeat(repeat_dim, 1, 1, 1)
real_A = Variable(real_A)
# Generate samples
with torch.no_grad():
fake_B, _ = generator(real_A.contiguous().unsqueeze(dim=2), zero_filled=real_A.clone(), csm=None, dc_operator=multi_coil_dc)
fake_B = fake_B.contiguous().squeeze(dim=2)
'''compute magnitude maps'''
# (B,2,H,W) to (B,2,H,W,1), B=1
img_A = img_A.unsqueeze(-1)
img_B = img_B.unsqueeze(-1)
fake_B = fake_B.unsqueeze(-1)
# to complex format (B,1,H,W,2)
img_A = img_A.transpose(1,-1)
img_B = img_B.transpose(1,-1)
fake_B = fake_B.transpose(1,-1)
# to magnitude in (B,1,H,W)
img_A = T.complex_abs(img_A)
img_B = T.complex_abs(img_B)
fake_B = T.complex_abs(fake_B)
# diff
diff = (fake_B-img_B).abs()
# Concatenate samples horisontally
fake_B = torch.cat([x for x in fake_B], -1) # (C, H, 2*N*W)
diff = torch.cat([x for x in diff], -1) # (C, H, 2*N*W)
img_sample = torch.cat((img_A.squeeze(dim=0), fake_B, img_B.squeeze(dim=0), diff), -1) # (C, H, (N+2)*W)
img_sample = img_sample.view(1, *img_sample.shape) # (1, C, H, (N+2)*W)
# Concatenate with previous samples vertically
img_samples = img_sample if img_samples is None else torch.cat([img_samples, img_sample], -2) # (1, C, M*H, (N+2)*W)
# print(img_samples.shape, img_sample.shape)
save_image(img_samples, "images/%s/Adap_GAN_epoch_%d_%d.png" % (opt.dataset_name, epoch, i), nrow=8, normalize=False)
generator.train()
# measurement method to produce real_A from real_B: (1 ,1, 1, 256, 1)
if opt.mask == None:
mask = opt.mask
else:
mask = torch.load(opt.mask)
to_cyc = To_h_space(mask=mask)
to_k = To_k_space(mask=mask)
# to_cyc = To_h_space(mask=None, center_fractions=[0.04], accelerations=[8]) # sampling pattern diversity
# to_k = To_k_space(mask=None, center_fractions=[0.04], accelerations=[8])
soft_dc = Soft_Data_Consistency(mask=mask.squeeze(dim=-1)) # DC opeerator
def multi_coil_dc(inputs, zero_filled, CSM=None):
outputs = soft_dc(inputs, zero_filled) # data consistency
return outputs
# Loss functions
# mae_loss = torch.nn.MSELoss()
mae_loss = torch.nn.L1Loss()
eps = 1e-12
Smooth_L1 = lambda output, target: torch.sqrt((output - target)**2+eps).mean()
ms_ssim = MS_SSIM(data_range=1, channel=2, K=(0.01, 0.03)) # Try a larger K2 constant (e.g. 0.4)
win = ms_ssim.win
# Initialize generator, encoder and discriminators
# generator = AdapGenerator(input_shape)
# D_VAE = RA_MultiDiscriminator([input_shape[0], *input_shape[2:]]) # as we often distinguish among single-coil views
if opt.not_ML_dense:
generator = Sequential_Dense_Network(img_shape=(2,256,256), out_channel=2, scaler_c=2, dense_dilation=False, stages=3, dense=opt.dense, no_plus = opt.not_plus)
else:
generator = Multi_Level_Dense_Network(img_shape=(2,256,256), out_channel=2, scaler_c=2, dense_dilation=False, stages=3, stasm=opt.stasm, groups=opt.stasm_groups, data_consistency=opt.data_consistency)
D_VAE = RA_MultiDiscriminator_CBAM([input_shape[0], *input_shape[2:]], p=0.1)
# D_VAE = RA_MultiDiscriminator_Unet([input_shape[0], *input_shape[2:]])
# generator = Deep_Projection_Network(input_shape, mask=mask.squeeze(dim=-1))
vgg = models.vgg11_bn(pretrained=True).features[:19].cuda()
for param in vgg.parameters():
param.requires_grad = False # no longer parameter(), but can receive and transmit gradients; it saves computational costs and memory
VGGList = nn.ModuleList()
VGGList.add_module('vgg_0', vgg[:9])
VGGList.add_module('vgg_1', vgg[9:12])
VGGList.add_module('vgg_2', vgg[12:16])
VGGList.add_module('vgg_3', vgg[16:])
from utils import Weight_init
if cuda:
generator = generator.cuda()
D_VAE = D_VAE.cuda()
mae_loss.cuda()
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D_VAE = torch.optim.Adam(D_VAE.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load("saved_models/%s/generator_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'), strict=False)
D_VAE.load_state_dict(torch.load("saved_models/%s/D_VAE_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
optimizer_G.load_state_dict(torch.load("saved_models/%s/optimizer_G_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
optimizer_D_VAE.load_state_dict(torch.load("saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
# prepare dataset
dataset = torch.load(opt.dataroot) # complex MRI data (B,2,H,W)
start_ = 100
val_dataset = dataset[[10, 30, 35, 55, 75],:,start_:start_+256] # cropped validation samples, range(15,26,5)
# val_dataset = dataset[list(range(10,81,5)),:,start_:start_+256]
dataset = dataset[164:,:,list(range(start_, start_+256))] # cropped training samples
# create dataloaders for training and validation
dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
# ----------
# Training
# ----------
if __name__ == '__main__':
# Adversarial loss
valid = 1.
fake = 0.
prev_time = time.time()
for epoch in range(opt.epoch+1, opt.n_epochs+opt.epoch+1):
for i, batch in enumerate(dataloader):
'''data augmentation'''
# Runs the forward pass with autocasting.
optimizer_G.zero_grad()
# Runs the forward pass with autocasting.
with torch.cuda.amp.autocast(enabled=False):
# Set model input
real_B = Variable(batch.type(Tensor))
real_A = Variable(to_cyc(batch.type(Tensor)).detach())
real_K = Variable(to_k(batch.type(Tensor)).detach())
# Produce output using real_A
fake_B, _ = generator(real_A, zero_filled=real_A.clone(), csm=None, dc_operator=multi_coil_dc)
'''non-uniform mean'''
# Pixelwise loss of translated image by VAE
alpha = 0.64 # 0.84
# L1_loss = torch.sqrt(nn.MSELoss()(fake_B, real_B))
# L1_loss = (fake_B - real_B).abs()
L1_loss = torch.sqrt((fake_B - real_B)**2 + eps)
L1_loss = gaussian_filter(L1_loss, win.to(L1_loss.device)).mean() # Gaussian coefficients indicating the contribution
# SSIM
MS_SSIM_Loss = 1. - ms_ssim((fake_B+1.)/2, (real_B+1.)/2)
# total pixel loss
loss_pixel = (1-alpha)*L1_loss + alpha*MS_SSIM_Loss
# Adversarial loss
loss_VAE_GAN = D_VAE.compute_loss(real_B, fake_B, valid=fake, fake=valid, sg=False) # relativistic average
# loss_VAE_GAN = D_VAE.compute_loss(fake_B, None, fake=valid, sg=False)
# feature attention using a U-net-like D
loss_FA = torch.Tensor(1).fill_(0.).type(Tensor)
# loss_FA = torch.sqrt(((1.-relative_score.detach())*(fake_B - real_B))**2 + eps).mean()
# Total Loss (Generator + Encoder)
loss_GE = opt.lambda_adv*loss_VAE_GAN + opt.lambda_pixel * (loss_pixel + 0.5*loss_FA)
# ---------
# cLR-GAN
# ---------
loss_latent = opt.lambda_latent * Smooth_L1(to_k(fake_B), real_K)
# loss_latent = loss_latent.detach()
# VGG loss
content_loss = []
gram_loss = []
lambda_gram = 0.005
weight_list = [1., 1.5, 3., 4.5]
# VGG loss via vgg11_bn
real_content = sigtoimage(real_B).repeat(1,3,1,1)
fake_content = sigtoimage(fake_B).repeat(1,3,1,1)
for k, m in enumerate(VGGList):
real_content = m(real_content).detach()
fake_content = m(fake_content)
# real_vgg = norm(real_content) # instance normalize features
# fake_vgg = norm(fake_content)
real_vgg = real_content.clone()
fake_vgg = fake_content.clone()
# content_loss += [nn.L1Loss()(real_vgg, fake_vgg)]
content_loss += [Smooth_L1(real_vgg, fake_vgg)]
# content_loss += [5.*pdl_loss(real_vgg, fake_vgg, metric='charbonier', m=20)]
# gram matrices
gram_real = real_vgg.view(real_vgg.shape[0],real_vgg.shape[1],-1) @ real_vgg.view(real_vgg.shape[0],real_vgg.shape[1],-1).transpose(-2,-1)
gram_fake = fake_vgg.view(fake_vgg.shape[0],fake_vgg.shape[1],-1) @ fake_vgg.view(fake_vgg.shape[0],fake_vgg.shape[1],-1).transpose(-2,-1)
# gram_loss += [weight_list[k]*nn.L1Loss()(gram_real, gram_fake)]
gram_loss += [weight_list[k]*Smooth_L1(gram_real, gram_fake)]
loss_VGG = sum(content_loss) + lambda_gram*sum(gram_loss)
loss_VGG *= opt.lambda_vgg
loss_G = loss_GE + loss_latent + loss_VGG
# loss_G = loss_GE + loss_VGG # DC has been applied
loss_G.backward()
optimizer_G.step()
# optimizer_G_atasm.step()
# scaler_G.scale_G(loss_G).backward()
# scaler_G.step_G(optimizer_G)
# scaler_G.update()
# ----------------------------------
# Train Discriminator (cVAE-GAN)
# ----------------------------------
# if opt.epoch>0 and epoch == (opt.epoch+1) and i == 0:
# print('load optimizers here')
# print('load optimizers here')
# # Load pretrained models
# optimizer_D_VAE.load_state_dict(torch.load("saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, opt.epoch)))
# print('load optimizers here')
# print('load optimizers here')
optimizer_D_VAE.zero_grad()
clone_B = torch.ones(fake_B.shape).cuda() # avoid issues caused by .detach()
clone_B.copy_(fake_B)
# clone_B = fake_B.new_tensor(fake_B)
with torch.cuda.amp.autocast(enabled=False):
loss_D_VAE = D_VAE.compute_loss(real_B, clone_B.detach(), valid=valid, fake=fake, sg=True) # relativistic average
# loss_D_VAE = D_VAE.compute_loss(real_B, None, fake=valid, sg=False) + D_VAE.compute_loss(fake_B.detach(), None, fake=fake, sg=False)
loss_D_VAE *= opt.lambda_adv
# gradient penalty
loss_grad_VAE = 0.
loss_grad_VAE = 30.*D_VAE.compute_gradient_penalty(real_B, fake_B.detach()) # gradient penalty
loss_grad_VAE *= opt.lambda_adv
loss_D = loss_D_VAE + loss_grad_VAE
loss_D.backward()
optimizer_D_VAE.step()
# scaler_D.scale(loss_D).backward()
# scaler_D.step(optimizer_D_VAE)
# scaler_D.update()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
sys.stdout.write(
"\r[E %d/%d, %d/%d] [D: (%.3f, %.3f)] [G: (%.3f), pixel: (%.3f, %.3f, %.3f), LR: %.4f vgg: (%.3f, %.3f, %.3f), (%.3f, %.3f, %.3f)] ETA: %s"
% (
epoch,
opt.n_epochs,
i,
len(dataloader),
loss_D_VAE.item(),
loss_grad_VAE,
loss_GE.item()-opt.lambda_pixel * loss_pixel.item(),
opt.lambda_pixel*(1-alpha)*L1_loss.item(),
opt.lambda_pixel*alpha*MS_SSIM_Loss.item(),
opt.lambda_pixel*0.5*loss_FA.item(),
loss_latent.item(),
opt.lambda_vgg*content_loss[0],
opt.lambda_vgg*content_loss[1],
opt.lambda_vgg*content_loss[2],
opt.lambda_vgg*lambda_gram*gram_loss[0],
opt.lambda_vgg*lambda_gram*gram_loss[1],
opt.lambda_vgg*lambda_gram*gram_loss[2],
time_left,
)
)
if batches_done % opt.sample_interval == 0:
sample_images(epoch, i)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), "saved_models/%s/generator_%d.pth" % (opt.dataset_name, epoch))
torch.save(D_VAE.state_dict(), "saved_models/%s/D_VAE_%d.pth" % (opt.dataset_name, epoch))
torch.save(optimizer_G.state_dict(), "saved_models/%s/optimizer_G_%d.pth" % (opt.dataset_name, epoch))
torch.save(optimizer_D_VAE.state_dict(), "saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, epoch))
# torch.save(optimizer_G_atasm.state_dict(), "saved_models/%s/optimizer_G_atasm_%d.pth" % (opt.dataset_name, epoch))
|
JingshuaiLiu/HFMRI
|
single_coil_dense_network.py
|
single_coil_dense_network.py
|
py
| 21,183 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "h5topng.common.subsample.MaskFunc",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "h5topng.common.subsample",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.fft2",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.apply_mask",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.where",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms.ifft2",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "h5topng.common.subsample.MaskFunc",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "h5topng.common.subsample",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.fft2",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.apply_mask",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.where",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.ones_like",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "utils.torch_fft",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "utils.torch_fft",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "utils.torch_ifft",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "torch.tanh",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms.complex_abs",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.complex_abs",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "h5topng.data.transforms.complex_abs",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "h5topng.data.transforms",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torch.nn.L1Loss",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "torch.sqrt",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "Vtils.pytorch_msssim_master.pytorch_msssim.MS_SSIM",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "torchvision.models.vgg11_bn",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "Vtils.pytorch_msssim_master.pytorch_msssim.gaussian_filter",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "utils.sigtoimage",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "utils.sigtoimage",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 497,
"usage_type": "call"
}
] |
37502345925
|
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from mask2former.modeling.pixel_decoder.msdeformattn import MSDeformAttnTransformerEncoderOnly
from mask2former.modeling.transformer_decoder.position_encoding import PositionEmbeddingSine
@SEM_SEG_HEADS_REGISTRY.register()
class MSSharePixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
# deformable transformer encoder args
transformer_in_features: List[str],
common_stride: int,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
transformer_input_shape = {
k: v for k, v in input_shape.items() if k in transformer_in_features
}
# this is the input shape of pixel decoder
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
self.feature_strides = [v.stride for k, v in input_shape]
self.feature_channels = [v.channels for k, v in input_shape]
# this is the input shape of transformer encoder (could use less features than pixel decoder
transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride)
self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5"
transformer_in_channels = [v.channels for k, v in transformer_input_shape]
self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers
self.transformer_num_feature_levels = len(self.transformer_in_features)
if self.transformer_num_feature_levels > 1:
input_proj_list = []
# from low resolution to high resolution (res5 -> res2)
for in_channels in transformer_in_channels[::-1]:
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
))
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
)])
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
self.transformer = MSDeformAttnTransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
num_feature_levels=self.transformer_num_feature_levels,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
'''
self.mask_dim = mask_dim
# use 1x1 conv instead
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
weight_init.c2_xavier_fill(self.mask_features)
'''
self.maskformer_num_feature_levels = 3 # always use 3 scales
self.common_stride = common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
'''
share_mask_branch = []
for idx in range(self.maskformer_num_feature_levels):
share_norm = get_norm(norm, mask_dim)
share_mask_conv = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=2,
padding=1,
norm=share_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(share_mask_conv)
self.add_module("share_{}".format(idx + 1), share_mask_conv)
share_mask_branch.append(share_mask_conv)
self.share_mask_branch = share_mask_branch
'''
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
# ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES
ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE
return ret
@autocast(enabled=False)
def forward_features(self, features):
srcs = []
pos = []
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.transformer_in_features[::-1]):
x = features[f].float() # deformable detr does not support half precision
srcs.append(self.input_proj[idx](x))
pos.append(self.pe_layer(x))
y, spatial_shapes, level_start_index = self.transformer(srcs, pos)
bs = y.shape[0]
split_size_or_sections = [None] * self.transformer_num_feature_levels
for i in range(self.transformer_num_feature_levels):
if i < self.transformer_num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]):
x = features[f].float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
'''
mask_feats = []
feat = features['res2'].float()
for idx in range(self.maskformer_num_feature_levels):
feat = self.share_mask_branch[idx](feat)
mask_feats.append(feat)
feat = features['res2']
mask_feat = mask_feats[0]
for idx in range(self.maskformer_num_feature_levels-1, 0, -1):
mask_feat = mask_feats[idx] + F.interpolate(mask_feat, size=mask_feats[idx].shape[-2:], mode="bilinear", align_corners=False)
mask_feat = feat + F.interpolate(mask_feat, size=feat.shape[-2:], mode="bilinear", align_corners=False)
'''
return out[-1], out[0], multi_scale_features
#return self.mask_features(mask_feat), out[0], multi_scale_features
|
zfonemore/NewVIS
|
minvis/share_mask_fpn.py
|
share_mask_fpn.py
|
py
| 10,597 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "detectron2.layers.ShapeSpec",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.xavier_uniform_",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "mask2former.modeling.pixel_decoder.msdeformattn.MSDeformAttnTransformerEncoderOnly",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "mask2former.modeling.transformer_decoder.position_encoding.PositionEmbeddingSine",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "detectron2.layers.get_norm",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "detectron2.layers.get_norm",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "detectron2.layers.Conv2d",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "detectron2.layers.Conv2d",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "fvcore.nn.weight_init.c2_xavier_fill",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "fvcore.nn.weight_init",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "fvcore.nn.weight_init.c2_xavier_fill",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "fvcore.nn.weight_init",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "detectron2.config.configurable",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "detectron2.layers.ShapeSpec",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "torch.split",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "detectron2.modeling.SEM_SEG_HEADS_REGISTRY.register",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "detectron2.modeling.SEM_SEG_HEADS_REGISTRY",
"line_number": 18,
"usage_type": "name"
}
] |
73401806589
|
from torch import nn
class Mojmyr(nn.Module):
def __init__(self, input_shape, hidden_units, output_shape):
super().__init__()
# Copy TinyVGG structure, modify it slightly for this specific case
self.conv_block_1 = nn.Sequential(
nn.Conv2d(input_shape, hidden_units, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(hidden_units, hidden_units, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(hidden_units, hidden_units, 3, 1),
nn.ReLU(),
nn.Conv2d(hidden_units, hidden_units, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=hidden_units*14*14, out_features=output_shape)
)
# Required forward method that takes the input 'x' through all the conv_blocks and the classifier, returning logits because of the last Linear layer
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = self.classifier(x)
return x
|
PopeCorn/myr
|
code/model.py
|
model.py
|
py
| 1,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.Flatten",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
}
] |
650322467
|
#! /bin/python
import os
import sys
import json
from concurrent import futures
import numpy as np
import vigra
import luigi
import z5py
import nifty
import nifty.tools as nt
import nifty.distributed as ndist
from elf.segmentation.lifted_multicut import get_lifted_multicut_solver
from elf.segmentation.multicut import get_multicut_solver
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Lifted Multicut Tasks
#
class SolveLiftedSubproblemsBase(luigi.Task):
""" SolveLiftedSubproblems base class
"""
task_name = 'solve_lifted_subproblems'
src_file = os.path.abspath(__file__)
# input volumes and graph
problem_path = luigi.Parameter()
lifted_prefix = luigi.Parameter()
scale = luigi.IntParameter()
#
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'agglomerator': 'kernighan-lin',
'time_limit_solver': None})
return config
def run_impl(self):
# get the global config and init configs
# shebang, block_shape, roi_begin, roi_end = self.global_config_values()
shebang, block_shape, roi_begin, roi_end, block_list_path\
= self.global_config_values(with_block_list_path=True)
self.init(shebang)
with vu.file_reader(self.problem_path, 'r') as f:
shape = tuple(f['s0/graph'].attrs['shape'])
factor = 2**self.scale
block_shape = tuple(bs * factor for bs in block_shape)
# update the config with input and graph paths and keys
# as well as block shape
config = self.get_task_config()
config.update({'problem_path': self.problem_path, 'scale': self.scale,
'block_shape': block_shape, 'lifted_prefix': self.lifted_prefix})
# make output datasets
out_key = 's%i/sub_results_lmc' % self.scale
with vu.file_reader(self.problem_path) as f:
out = f.require_group(out_key)
# NOTE, gzip may fail for very small inputs, so we use raw compression for now
# might be a good idea to give blosc a shot ...
out.require_dataset('cut_edge_ids', shape=shape, chunks=block_shape,
compression='raw', dtype='uint64')
out.require_dataset('node_result', shape=shape, chunks=block_shape,
compression='raw', dtype='uint64')
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end,
block_list_path)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
prefix = 's%i' % self.scale
self.prepare_jobs(n_jobs, block_list, config, prefix)
self.submit_jobs(n_jobs, prefix)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs, prefix)
# part of the luigi API
def output(self):
return luigi.LocalTarget(os.path.join(self.tmp_folder,
self.task_name + '_s%i.log' % self.scale))
class SolveLiftedSubproblemsLocal(SolveLiftedSubproblemsBase, LocalTask):
""" SolveLiftedSubproblems on local machine
"""
pass
class SolveLiftedSubproblemsSlurm(SolveLiftedSubproblemsBase, SlurmTask):
""" SolveLiftedSubproblems on slurm cluster
"""
pass
class SolveLiftedSubproblemsLSF(SolveLiftedSubproblemsBase, LSFTask):
""" SolveLiftedSubproblems on lsf cluster
"""
pass
#
# Implementation
#
def _find_lifted_edges(lifted_uv_ids, node_list):
lifted_indices = np.arange(len(lifted_uv_ids), dtype='uint64')
# find overlap of node_list with u-edges
inner_us = np.in1d(lifted_uv_ids[:, 0], node_list)
inner_indices = lifted_indices[inner_us]
inner_uvs = lifted_uv_ids[inner_us]
# find overlap of node_list with v-edges
inner_vs = np.in1d(inner_uvs[:, 1], node_list)
return inner_indices[inner_vs]
def _solve_block_problem(block_id, graph, uv_ids, ds_nodes,
costs, lifted_uvs, lifted_costs,
lifted_solver, solver,
ignore_label, blocking, out, time_limit):
fu.log("Start processing block %i" % block_id)
# load the nodes in this sub-block and map them
# to our current node-labeling
chunk_id = blocking.blockGridPosition(block_id)
nodes = ds_nodes.read_chunk(chunk_id)
if nodes is None:
fu.log_block_success(block_id)
return
# if we have an ignore label, remove zero from the nodes
# (nodes are sorted, so it will always be at pos 0)
if ignore_label and nodes[0] == 0:
nodes = nodes[1:]
removed_ignore_label = True
if len(nodes) == 0:
fu.log_block_success(block_id)
return
else:
removed_ignore_label = False
# we allow for invalid nodes here,
# which can occur for un-connected graphs resulting from bad masks ...
inner_edges, outer_edges = graph.extractSubgraphFromNodes(nodes, allowInvalidNodes=True)
# if we only have no inner edges, return
# the outer edges as cut edges
if len(inner_edges) == 0:
if len(nodes) > 1:
assert removed_ignore_label,\
"Can only have trivial sub-graphs for more than one node if we removed ignore label"
cut_edge_ids = outer_edges
sub_result = None
fu.log("Block %i: has no inner edges" % block_id)
# otherwise solve the multicut for this block
else:
# find the lifted uv-ids that correspond to the inner edges
inner_lifted_edges = _find_lifted_edges(lifted_uvs, nodes)
fu.log("Block %i: Solving sub-block with %i nodes, %i edges and %i lifted edges" % (block_id,
len(nodes),
len(inner_edges),
len(inner_lifted_edges)))
sub_uvs = uv_ids[inner_edges]
# relabel the sub-nodes and associated uv-ids for more efficient processing
nodes_relabeled, max_id, mapping = vigra.analysis.relabelConsecutive(nodes,
start_label=0,
keep_zeros=False)
sub_uvs = nt.takeDict(mapping, sub_uvs)
n_local_nodes = max_id + 1
sub_graph = nifty.graph.undirectedGraph(n_local_nodes)
sub_graph.insertEdges(sub_uvs)
sub_costs = costs[inner_edges]
assert len(sub_costs) == sub_graph.numberOfEdges
# we only need to run lifted multicut if we have lifted edges in
# the subgraph
if len(inner_lifted_edges) > 0:
fu.log("Block %i: have lifted edges and use lifted multicut solver" % block_id)
sub_lifted_uvs = nt.takeDict(mapping, lifted_uvs[inner_lifted_edges])
sub_lifted_costs = lifted_costs[inner_lifted_edges]
# solve multicut and relabel the result
sub_result = lifted_solver(sub_graph, sub_costs, sub_lifted_uvs, sub_lifted_costs,
time_limit=time_limit)
# otherwise we run normal multicut
else:
fu.log("Block %i: don't have lifted edges and use multicut solver")
# solve multicut and relabel the result
sub_result = solver(sub_graph, sub_costs, time_limit=time_limit)
assert len(sub_result) == len(nodes), "%i, %i" % (len(sub_result), len(nodes))
sub_edgeresult = sub_result[sub_uvs[:, 0]] != sub_result[sub_uvs[:, 1]]
assert len(sub_edgeresult) == len(inner_edges)
cut_edge_ids = inner_edges[sub_edgeresult]
cut_edge_ids = np.concatenate([cut_edge_ids, outer_edges])
_, res_max_id, _ = vigra.analysis.relabelConsecutive(sub_result, start_label=1,
keep_zeros=False,
out=sub_result)
fu.log("Block %i: Subresult has %i unique ids" % (block_id, res_max_id))
# IMPORTANT !!!
# we can only add back the ignore label after getting the edge-result !!!
if removed_ignore_label:
sub_result = np.concatenate((np.zeros(1, dtype='uint64'),
sub_result))
# get chunk id of this block
block = blocking.getBlock(block_id)
chunk_id = tuple(beg // sh for beg, sh in zip(block.begin, blocking.blockShape))
# serialize the cut-edge-ids and the (local) node labeling
ds_edge_res = out['cut_edge_ids']
fu.log("Block %i: Serializing %i cut edges" % (block_id, len(cut_edge_ids)))
ds_edge_res.write_chunk(chunk_id, cut_edge_ids, True)
if sub_result is not None:
ds_node_res = out['node_result']
fu.log("Block %i: Serializing %i node results" % (block_id, len(sub_result)))
ds_node_res.write_chunk(chunk_id, sub_result, True)
fu.log_block_success(block_id)
def solve_lifted_subproblems(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path) as f:
config = json.load(f)
# input configs
problem_path = config['problem_path']
scale = config['scale']
block_shape = config['block_shape']
block_list = config['block_list']
lifted_prefix = config['lifted_prefix']
agglomerator_key = config['agglomerator']
time_limit = config.get('time_limit_solver', None)
n_threads = config.get('threads_per_job', 1)
fu.log("reading problem from %s" % problem_path)
problem = z5py.N5File(problem_path)
# load the costs
# NOTE we use different cost identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the costs come from the CostsWorkflow and
# hence the identifier is identical
costs_key = 's%i/costs_lmc' % scale if scale > 0 else 's0/costs'
fu.log("reading costs from path in problem: %s" % costs_key)
ds = problem[costs_key]
ds.n_threads = n_threads
costs = ds[:]
# load the graph
# NOTE we use different graph identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the graph comes from the GraphWorkflow and
# hence the identifier is identical
graph_key = 's%i/graph_lmc' % scale if scale > 0 else 's0/graph'
shape = problem[graph_key].attrs['shape']
fu.log("reading graph from path in problem: %s" % graph_key)
graph = ndist.Graph(problem_path, graph_key, numberOfThreads=n_threads)
uv_ids = graph.uvIds()
# check if the problem has an ignore-label
ignore_label = problem[graph_key].attrs['ignore_label']
fu.log("ignore label is %s" % ('true' if ignore_label else 'false'))
fu.log("using agglomerator %s" % agglomerator_key)
lifted_solver = get_lifted_multicut_solver(agglomerator_key)
# TODO enable different multicut agglomerator
solver = get_multicut_solver(agglomerator_key)
# load the lifted edges and costs
nh_key = 's%i/lifted_nh_%s' % (scale, lifted_prefix)
lifted_costs_key = 's%i/lifted_costs_%s' % (scale, lifted_prefix)
ds = problem[nh_key]
fu.log("reading lifted uvs")
ds.n_threads = n_threads
lifted_uvs = ds[:]
fu.log("reading lifted costs")
ds = problem[lifted_costs_key]
ds.n_threads = n_threads
lifted_costs = ds[:]
# the output group
out = problem['s%i/sub_results_lmc' % scale]
# NOTE we use different sub-graph identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the sub-graphs come from the GraphWorkflow and
# are hence identical
sub_graph_identifier = 'sub_graphs' if scale == 0 else 'sub_graphs_lmc'
ds_nodes = problem['s%i/%s/nodes' % (scale, sub_graph_identifier)]
blocking = nt.blocking([0, 0, 0], shape, list(block_shape))
fu.log("start processsing %i blocks" % len(block_list))
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(_solve_block_problem,
block_id, graph, uv_ids, ds_nodes,
costs, lifted_uvs, lifted_costs,
lifted_solver, solver, ignore_label,
blocking, out, time_limit)
for block_id in block_list]
[t.result() for t in tasks]
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
solve_lifted_subproblems(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/lifted_multicut/solve_lifted_subproblems.py
|
solve_lifted_subproblems.py
|
py
| 13,622 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "luigi.Task",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "luigi.IntParameter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "luigi.TaskParameter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask.default_task_config",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.blocks_in_volume",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "luigi.LocalTarget",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.SlurmTask",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LSFTask",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.in1d",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.in1d",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "vigra.analysis.relabelConsecutive",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "vigra.analysis",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "nifty.tools.takeDict",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "nifty.graph.undirectedGraph",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "nifty.graph",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "nifty.tools.takeDict",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "vigra.analysis.relabelConsecutive",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "vigra.analysis",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "z5py.N5File",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "nifty.distributed.Graph",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "nifty.distributed",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "elf.segmentation.lifted_multicut.get_lifted_multicut_solver",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "elf.segmentation.multicut.get_multicut_solver",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "nifty.tools.blocking",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "concurrent.futures",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_job_success",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 345,
"usage_type": "attribute"
}
] |
31153800734
|
import pygame
import random
import numpy as np
class Explosion(pygame.sprite.Sprite):
def __init__(self, frames, xcoord, ycoord, scale=1.5, update_n=1):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
self.frame = 0
self.frames = frames
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.x = xcoord
self.y = ycoord
self.scale = scale
self.update_n = update_n
self.update_counter = self.update_n
def update(self):
self.update_counter -= 1
if self.frame >= len(self.frames) - 1:
self.kill()
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.image = pygame.transform.scale(self.image, (int(self.rect.size[0] * self.scale),
int(self.rect.size[1] * self.scale)))
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
if self.update_counter == 0:
self.frame += 1
self.update_counter = self.update_n
gameDisplay.blit(self.image, self.rect)
def update_moving(self, xspeedboss, yspeedboss):
if self.frame >= len(self.frames) - 1:
self.kill()
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.image = pygame.transform.scale(self.image, (int(self.rect.size[0] * 1.5), int(self.rect.size[1] * 1.5)))
self.rect = self.image.get_rect()
self.x += xspeedboss
self.y += yspeedboss
self.rect.x = self.x
self.rect.y = self.y
self.frame += 1
gameDisplay.blit(self.image, self.rect)
class Explosion2(pygame.sprite.Sprite):
def __init__(self, frames, xcoord, ycoord):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
self.frame = 0
self.frames = frames
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.x = xcoord
self.y = ycoord
self.expansion = 0.8
self.update_counter = 3
def update(self):
self.update_counter -= 1
if self.frame >= len(self.frames) - 1:
self.kill()
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.image = pygame.transform.scale(self.image, (int(self.rect.size[0] * self.expansion),
int(self.rect.size[1] * self.expansion)))
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.rect.centerx = self.x
self.rect.centery = self.y
if self.update_counter == 0:
self.expansion += 0.045
self.frame += 1
self.update_counter = 4
gameDisplay.blit(self.image, self.rect)
class Ship(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
self.image = player_ship
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = (display_width - self.width) * 0.5
self.y = display_height - self.height * 1.2
self.speed = 0 # This variable changes with key presses
self.endspeed = 1
self.mask = pygame.mask.from_surface(self.image)
def update(self):
# Update variables of the game for next update
self.x += self.speed
if self.x > display_width - self.width:
self.x = display_width - self.width # boundaries for ship
elif self.x < 0:
self.x = 0 # boundaries for ship
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
def to_end_position(self, xcoord):
statement = False
self.speed = 0
if self.x < xcoord - 1:
self.x += self.endspeed
elif self.x > xcoord + 1:
self.x -= self.endspeed
else:
statement = True
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
return statement
class Meteor(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
meteor_choices = [meteor1, meteor2, meteor3, meteor4]
self.image = meteor_choices[random.randrange(0, 3)]
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = random.randrange(0, display_width - self.width)
self.y = -200
self.mask = pygame.mask.from_surface(self.image)
self.speed = 7
def update(self):
self.y += self.speed
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
class Laser(pygame.sprite.Sprite):
def __init__(self, xcoord, ycoord):
pygame.sprite.Sprite.__init__(self)
self.image = laser_blue
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = xcoord - 0.5 * self.width # depends on ship location
self.y = ycoord # These will be set at spawn because it depends on ship location
self.speed = -20
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.y += self.speed
if self.y < 0 - self.height:
self.kill()
else:
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
class EnemyGoon(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
enemy_choices = [enemy1, enemy2, enemy3]
self.image = enemy_choices[random.randrange(0, 2)]
self.rect = self.image.get_rect()
self.image = pygame.transform.scale(self.image, (int(self.rect.size[0] / 1.5), int(self.rect.size[1] / 1.5)))
self.rect = self.image.get_rect() # after transforming need to acquire new rect
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = random.choice(np.linspace(0, display_width - self.width, 10))
self.y = 100 + self.height
self.mask = pygame.mask.from_surface(self.image)
self.update_timer = fps * 2 # update every 60 frames
self.x_speed = random.choice([-3, 3])
def update(self):
self.update_timer -= 1
if self.update_timer == 0:
self.fire()
self.update_timer = fps * 2
self.y = 100 * np.sin(timer / 500) + 100
self.x += self.x_speed
if self.x > display_width - self.width:
self.x = display_width - self.width # boundaries for enemy
self.x_speed = -self.x_speed # flip speed so that enemy moves into opposite direction
elif self.x < 0:
self.x = 0 # boundaries for ship
self.x_speed = -self.x_speed # flip speed so that enemy moves into opposite direction
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
def fire(self):
enemy_lasers.add(EnemyLaser(self.x + 0.5 * self.width, self.y))
pygame.mixer.Channel(2).play(enemy_laser_sound)
class EnemyLaser(pygame.sprite.Sprite):
def __init__(self, xcoord, ycoord):
pygame.sprite.Sprite.__init__(self)
self.image = laser_red
self.image = pygame.transform.flip(self.image, 0, 1)
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = xcoord - 0.5 * self.width # depends on ship location
self.y = ycoord # These will be set at spawn because it depends on ship location
self.speed = 7
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.y += self.speed
if self.y > display_height:
self.kill()
else:
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
class ChooseFont(object):
def __init__(self, fonttype, fontsize, color):
self.font = pygame.font.Font(fonttype, fontsize)
self.color = color
def message(self, text, xcoord, ycoord, centered=False):
text_surface = self.font.render(text, True, self.color)
text_rect = text_surface.get_rect()
if centered is True:
text_rect.center = (xcoord, ycoord)
elif centered is False:
text_rect.x = xcoord
text_rect.y = ycoord
gameDisplay.blit(text_surface, text_rect)
class Boss(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # call Sprite initializer
self.image = boss_image
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = display_width * 0.5 - self.width * 0.5
self.y = 50
self.y_speed = 0
self.mask = pygame.mask.from_surface(self.image)
self.laser_timer_max = fps * 2 # update every 120 frames
self.laser_timer = fps * 2 # update every 120 frames
self.laser_list = [5, 10, 15, 20, 25]
self.bomb_timer_max = fps * 4
self.bomb_timer = self.bomb_timer_max
self.x_speed = 3
self.hp = 100
self.maxhp = self.hp
self.dead_timer = 170
self.add_explosion_timer = 10
self.randx = None
self.randy = None
self.hp_50 = False
self.hp_25 = False
def update(self):
if self.hp > 0:
self.laser_timer -= 1
self.bomb_timer -= 1
if self.laser_timer in self.laser_list: # frames at which ship fires
self.fire_laser()
if self.laser_timer == 0:
self.laser_timer = self.laser_timer_max
if self.bomb_timer == 0:
self.bomb_timer = self.bomb_timer_max
self.fire_bomb()
if self.hp < self.maxhp * 0.5 and self.hp_50 is False:
if self.x_speed > 0:
self.x_speed = 5
elif self.x_speed < 0:
self.x_speed = -5
self.laser_timer_max = fps * 1.7
self.bomb_timer_max = fps * 3.5
self.hp_50 = True
if self.hp < self.maxhp * 0.25 and self.hp_25 is False:
if self.x_speed > 0:
self.x_speed = 7
elif self.x_speed < 0:
self.x_speed = -7
self.laser_timer_max = fps * 1.5
self.bomb_timer_max = fps * 3
self.hp_25 = True
elif self.dead_timer > 0:
self.x_speed = 1
self.add_explosion_timer -= 1
self.dead_timer -= 1
if self.add_explosion_timer == 0:
self.add_explosions()
self.add_explosion_timer = 10
for explosion in explosions_boss:
explosion.update_moving(self.x_speed, self.y_speed)
self.x += self.x_speed
if self.x > display_width - self.width:
self.x = display_width - self.width # boundaries for enemy
self.x_speed = -self.x_speed # flip speed so that enemy moves into opposite direction
elif self.x < 0:
self.x = 0 # boundaries for ship
self.x_speed = -self.x_speed # flip speed so that enemy moves into opposite direction
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
self.draw_health()
def fire_laser(self):
enemy_lasers.add(EnemyLaser(self.x + 0.35 * self.width, self.y + 0.8 * self.height))
enemy_lasers.add(EnemyLaser(self.x + 0.65 * self.width, self.y + 0.8 * self.height))
pygame.mixer.Channel(2).play(enemy_laser_sound)
def fire_bomb(self):
boss_bomb.add(BossBomb(self.x, self.y))
pygame.mixer.Channel(4).play(bomb_release_sound)
def draw_health(self):
color = red
width_hp = self.width * (self.hp / self.maxhp)
healthbar = pygame.Rect((self.x, self.y - 10, width_hp, 10))
pygame.draw.rect(gameDisplay, color, healthbar)
def add_explosions(self):
for i in range(2):
self.randx = random.randint(np.round(self.x) + 10 - 32, np.round(self.x) + self.width - 10 - 32)
self.randy = random.randint(np.round(self.y) + 10 - 64, np.round(self.y) + self.height - 10 - 64)
explosions_boss.add(Explosion(explosion1, self.randx, self.randy))
pygame.mixer.Channel(3).play(explosion_sound)
class BossBomb(pygame.sprite.Sprite):
def __init__(self, xcoord, ycoord):
pygame.sprite.Sprite.__init__(self)
self.image = missile
self.image = pygame.transform.flip(self.image, 0, 1)
self.rect = self.image.get_rect()
self.width = self.rect.size[0]
self.height = self.rect.size[1]
self.x = xcoord - 0.5 * self.width # depends on ship location
self.y = ycoord # These will be set at spawn because it depends on ship location
self.xspeed = 0
self.xspeedincr = 0.3
self.xspeedmax = 5
self.yspeed = 3
self.mask = pygame.mask.from_surface(self.image)
def update(self, xship, yship):
if xship > self.x:
if self.xspeed < self.xspeedmax:
self.xspeed += self.xspeedincr
elif xship < self.x:
if self.xspeed > -self.xspeedmax:
self.xspeed -= self.xspeedincr
self.x += self.xspeed
self.y += self.yspeed
if self.y >= display_height - 200:
self.kill()
explosions.add(Explosion2(explosion2, self.x, self.y))
pygame.mixer.Channel(5).play(bomb_explosion_sound)
else:
self.rect.x = self.x
self.rect.y = self.y # set the rect (not just blit) or collision won't work!
gameDisplay.blit(self.image, self.rect)
def main_menu():
button_width = start_button.get_rect().size[0]
scheme_width = controlscheme.get_rect().size[0]
button_x_center = (display_width - button_width) * 0.5
scheme_x_center = (display_width - scheme_width) * 0.5
# End game when this becomes true
in_main_menu = True
# Play the soundtrack
pygame.mixer.Channel(0).play(game_music, loops=-1)
# This is the game loop where all game logic happens
while in_main_menu:
# This checks all events that happen (which are located in pygame.event.get()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pos = pygame.mouse.get_pos()
if startbutton.collidepoint(pos):
pygame.mixer.Channel(1).play(button_sound)
global time_since_startbutton
time_since_startbutton = pygame.time.get_ticks()
game_loop()
elif creditbutton.collidepoint(pos):
credit_loop()
elif quitbutton.collidepoint(pos):
pygame.mixer.Channel(1).play(button_sound)
pygame.quit()
quit()
# Update main menu
gameDisplay.blit(background, (0, 0))
startbutton = gameDisplay.blit(start_button, (button_x_center, display_height * 0.4))
creditbutton = gameDisplay.blit(credit_button, (button_x_center, display_height * 0.5))
quitbutton = gameDisplay.blit(quit_button, (button_x_center, display_height * 0.6))
gameDisplay.blit(controlscheme, (scheme_x_center, display_height * 0.7))
pygame.display.update()
def credit_loop():
credits = True
while credits:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
credits = False
# Update
gameDisplay.blit(credit_background, (0, 0))
pygame.display.update()
def game_loop():
# Instantiate Ship & Meteor and create a group for lasersprites
global ship, ship_group, meteors, lasers, score_count, enemies, fps, timer, enemy_lasers, score_count
global boss_bomb, explosions, explosions_boss
ship_group = pygame.sprite.GroupSingle()
boss_group = pygame.sprite.GroupSingle()
boss_bomb = pygame.sprite.Group()
enemies = pygame.sprite.Group()
meteors = pygame.sprite.Group()
lasers = pygame.sprite.Group()
enemy_lasers = pygame.sprite.Group()
explosions = pygame.sprite.Group()
explosions_boss = pygame.sprite.Group()
# Set variables and events needed for meteor shower
add_meteor_event = pygame.USEREVENT + 1
add_meteor_timer = 300 # add new meteor evert 300 ms
ms_event = pygame.USEREVENT + 2
ms_start = 60000 # ms after which meteor shower arrives
ms_duration = 20000
pygame.time.set_timer(add_meteor_event, add_meteor_timer)
pygame.time.set_timer(ms_event, ms_start)
ms_passed = False
ms_announcement = False
# Set variables needed to spawn enemies
add_enemies_event = pygame.USEREVENT + 3
add_enemies_timer = 5000 # add new enemies every 5000 ms
pygame.time.set_timer(add_enemies_event, add_enemies_timer)
num_enemies = 3
enemies_meteors_spawning = True
# Set variables for boss battle
boss_battle = False
won = False
ship_centered = False
boss_announcement = False
# Instatiate other variables
score_count = 0 # score
meteors_dodged = 0
enemies_killed = 0
bosses_killed = 0
fps = 60
ship = Ship()
ship_group.add(ship) # Add ship once before playing loop starts
# This is the game loop where all game logic happens
playing = True
while playing:
timer = pygame.time.get_ticks() - time_since_startbutton # ms that have passed since start
if 30000 < timer <= 40000:
num_enemies = 4
elif 40000 < timer <= 50000:
num_enemies = 5
elif 50000 < timer <= 60000:
num_enemies = 6
# Check for global events
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if not won:
# Check for user-inputted event
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
ship.speed = -10
elif event.key == pygame.K_RIGHT:
ship.speed = 10
elif event.key == pygame.K_SPACE:
lasers.add(Laser(ship.x + 0.5*ship.width, ship.y))
pygame.mixer.Channel(1).play(laser_sound)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
ship.speed = 0
if ship_centered is True:
# Button to return to main menu after defeating boss
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
playing = False
# Check for events that only happen within certain time range
if event.type == ms_event and ms_passed is False: # This only occurs once
ms_announcement_timer = timer
ms_announcement = True
enemies_meteors_spawning = False
if event.type == add_enemies_event and enemies_meteors_spawning:
for i in range(num_enemies):
enemies.add(EnemyGoon())
try:
if timer - ms_announcement_timer < 2000 and ms_announcement is True: # display message 2000 ms
continue
elif ms_announcement is True:
ms_announcement = False # This makes sure announcement doesn't return anymore
ms_start_timer = timer # Timestamp start of meteor shower
except UnboundLocalError:
continue
try:
if timer - ms_start_timer < ms_duration and ms_passed is False:
if event.type == add_meteor_event: # add a meteor every time event is in event queue
meteors.add(Meteor())
elif ms_passed is False:
ms_passed = True # This makes sure ms doesn't return after it passed event is queued again
boss_announcement_timer = timer
boss_announcement = True
except UnboundLocalError:
continue
try:
if timer - boss_announcement_timer < 2000 and boss_announcement is True:
continue
elif boss_announcement is True:
boss_announcement = False
boss_battle = True
boss = Boss()
boss_group.add(boss)
except UnboundLocalError:
continue
# Update display and sprites
gameDisplay.blit(background, (0, 0))
ship.update()
if len(meteors) < 1 and enemies_meteors_spawning:
meteors.add(Meteor())
for meteor in meteors:
meteor.update()
if meteor.y > display_height:
meteor.kill()
meteors_dodged += 1
score_count += 10
for laser in lasers:
laser.update()
for enemy in enemies:
enemy.update()
for laser in enemy_lasers:
laser.update()
if boss_battle is True:
boss.update()
for bomb in boss_bomb:
bomb.update(ship.x + 0.5 * ship.width, ship.y + 0.5 * ship.height)
boss_hit = pygame.sprite.groupcollide(lasers, boss_group, 1, 0, pygame.sprite.collide_mask)
for sprite in boss_hit:
if boss_hit[sprite]:
explosions_boss.add(Explosion(explosion1, sprite.x - 32, sprite.y - 64)) # 64 is w/l of explosion
pygame.mixer.Channel(3).play(explosion_sound)
boss.hp -= 1
for explosion in explosions_boss:
explosion.update_moving(boss.x_speed, boss.y_speed)
if boss.dead_timer <= 0:
explosions.add(Explosion(explosion3, boss.x - boss.width*0.5, boss.y - boss.height*0.5, 3, 5))
del boss
boss_battle = False
won = True
score_count += 1000
bosses_killed += 1
for explosion in explosions:
explosion.update()
if boss_battle is True:
burned = pygame.sprite.groupcollide(ship_group, explosions, 0, 0, pygame.sprite.collide_mask)
if burned:
explosions.add(Explosion(explosion3, ship.x - ship.width * 0.5, ship.y - ship.height * 0.5, 2, 5))
crashed_text.message('you died. BUT DO NOT PANIC!',
display_width * 0.5, display_height * 0.5, centered=True)
pygame.display.update()
waiting = True
while waiting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
waiting = False
playing = False
for explosion in explosions:
explosion.update()
performance_text.message('Return to main menu by pressing Enter and try again.',
display_width * 0.5, 500, centered=True)
pygame.display.update()
if boss_battle is False and won is True:
if ship_centered is False:
ship_centered = ship.to_end_position(display_width*0.5 - ship.width * 0.5)
# Check for collisions after new display if updated
crashed = pygame.sprite.groupcollide(ship_group, meteors, 0, 0, pygame.sprite.collide_mask)
hit = pygame.sprite.groupcollide(enemy_lasers, ship_group, 1, 0, pygame.sprite.collide_mask)
if crashed or hit:
explosions.add(Explosion(explosion3, ship.x - ship.width * 0.5, ship.y - ship.height * 0.5, 2, 5))
crashed_text.message('you died. BUT DO NOT PANIC!',
display_width * 0.5, display_height * 0.5, centered=True)
pygame.display.update()
waiting = True
while waiting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
waiting = False
playing = False
for explosion in explosions:
explosion.update()
performance_text.message('Return to main menu by pressing Enter and try again.',
display_width * 0.5, 500, centered=True)
pygame.display.update()
# Kill sprites after collision
pygame.sprite.groupcollide(lasers, meteors, 1, 0, pygame.sprite.collide_mask)
pygame.sprite.groupcollide(enemy_lasers, meteors, 1, 0, pygame.sprite.collide_mask)
enemy_hit = pygame.sprite.groupcollide(enemies, lasers, 1, 1, pygame.sprite.collide_mask)
for sprite in enemy_hit:
if enemy_hit[sprite]:
explosions.add(Explosion(explosion1, sprite.x, sprite.y))
pygame.mixer.Channel(3).play(explosion_sound)
score_count += 100
enemies_killed += 1
# Lastly, show text
performance_text.message('score: ' + str(score_count), 5, 0)
performance_text.message('%i' % (timer/1000), display_width - 45, 0)
if ms_announcement:
shower_text.message('METEOR SHOWER INCOMING', display_width * 0.5, display_height * 0.5, centered=True)
if boss_announcement:
shower_text.message('FINAL BOSS INCOMING', display_width * 0.5, display_height * 0.5, centered=True)
if ship_centered is True:
performance_text.message('meteors dodged: %i' % meteors_dodged, display_width * 0.5, 360, centered=True)
performance_text.message('enemies destroyed: %i:' % enemies_killed, display_width * 0.5, 380, centered=True)
performance_text.message('bosses destroyed: %i' % bosses_killed, display_width * 0.5, 400, centered=True)
endgame_score_text.message('Final score: %i' % score_count, display_width * 0.5, 430, centered=True)
performance_text.message('press enter to return to main menu', display_width * 0.5, 500, centered=True)
pygame.display.update()
# Set FPS
clock.tick(fps)
# Here we initialize pygame, set variables and start the actual game
pygame.init()
# pygame.mouse.set_cursor(*pygame.cursors.diamond)
pygame.mouse.set_cursor(*pygame.cursors.broken_x)
# Define some colors
black = (0, 0, 0) # (R,G,B)
red = (255, 0, 0)
green = (0, 255, 0)
# Setup a window for the game
display_width = 800
display_height = 800
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('MyFirstGame') # Window Title
# -- Load sprites from spritesheets
spritesheet_explosion1 = pygame.image.load('Textures/explosions.png')
explosion1 = []
x_all = [628, 628, 628, 628, 576, 566, 562, 562, 562, 562, 924, 858, 792, 726, 660, 594, 924, 858, 792, 726, 660, 594,
924, 764]
y_all = [772, 706, 640, 574, 938, 872, 772, 706, 640, 574, 502, 496, 496, 496, 496, 496, 436, 430, 430, 430, 430, 430,
370, 826]
height = 64
width = 64
for i in range(24):
frame = str(i)
if len(frame) is 1:
frame = '0' + frame
x = x_all[i]
y = y_all[i]
explosion1.append(spritesheet_explosion1.subsurface(pygame.Rect(x, y, width, height)))
explosion3 = []
x_all = [100, 100, 100, 100, 888, 790, 692, 594, 496, 398, 300, 202, 104, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 100]
y_all = [398, 300, 202, 104, 2, 2, 2, 2, 2, 2, 2, 2, 2, 884, 786, 688, 590, 492, 394, 296, 198, 100, 2, 496]
h = 96
w = 96
for i in range(24):
frame = str(i)
if len(frame) is 1:
frame = '0' + frame
x = x_all[i]
y = y_all[i]
explosion3.append(spritesheet_explosion1.subsurface(pygame.Rect(x, y, h, w)))
spritesheet_explosion2 = pygame.image.load('Textures/particlefx_06.png')
height_exp = 128
width_exp = 128
explosion2 = []
for i in range(8):
for j in range(8):
explosion2.append(spritesheet_explosion2.subsurface(pygame.Rect(
i*height_exp, j*width_exp, height_exp, width_exp)))
spritesheetspace = pygame.image.load('Textures/spritesheet_space.png')
start_button = spritesheetspace.subsurface(pygame.Rect(0, 117, 222, 39))
credit_button = spritesheetspace.subsurface(pygame.Rect(0, 78, 222, 39))
quit_button = spritesheetspace.subsurface(pygame.Rect(0, 0, 222, 39))
enemy1 = spritesheetspace.subsurface(pygame.Rect(423, 728, 93, 84))
enemy2 = spritesheetspace.subsurface(pygame.Rect(120, 604, 104, 84))
enemy3 = spritesheetspace.subsurface(pygame.Rect(144, 156, 103, 84))
laser_blue = spritesheetspace.subsurface(pygame.Rect(856, 421, 9, 54))
laser_red = spritesheetspace.subsurface(pygame.Rect(858, 230, 9, 54))
meteor1 = spritesheetspace.subsurface(pygame.Rect(224, 664, 101, 84))
meteor2 = spritesheetspace.subsurface(pygame.Rect(0, 520, 120, 98))
meteor3 = spritesheetspace.subsurface(pygame.Rect(518, 810, 89, 82))
meteor4 = spritesheetspace.subsurface(pygame.Rect(327, 452, 98, 96))
player_ship = spritesheetspace.subsurface(pygame.Rect(224, 832, 99, 75))
spritesheetspace2 = pygame.image.load('Textures/spritesheet_space2.png')
missile = spritesheetspace2.subsurface(pygame.Rect(1093, 711, 19, 40))
boss_image = spritesheetspace2.subsurface(pygame.Rect(276, 0, 172, 151))
controlscheme = pygame.image.load('Textures/controlscheme.png')
background = pygame.image.load('Textures/space_background.png').convert()
credit_background = pygame.image.load('Textures/credits.png').convert()
# Load files used in the game
game_music = pygame.mixer.Sound('Sounds/desert-travel.ogg') # Channel 0
game_music.set_volume(0.5)
button_sound = pygame.mixer.Sound('Sounds/click_menu_sound.wav') # Channel 1
laser_sound = pygame.mixer.Sound('Sounds/laser5.wav') # Channel 1
enemy_laser_sound = pygame.mixer.Sound('Sounds/laser8.wav') # Channel 2
enemy_laser_sound.set_volume(0.5)
explosion_sound = pygame.mixer.Sound('Sounds/explodemini.wav') # Channel 3
bomb_release_sound = pygame.mixer.Sound('Sounds/weaponfire4.wav') # Channel 4
bomb_explosion_sound = pygame.mixer.Sound('Sounds/explosion2.wav') # Channel 5
# Load fonts to use in the game
performance_text = ChooseFont('Fonts/xirod.ttf', 15, green)
endgame_score_text = ChooseFont('Fonts/xirod.ttf', 30, green)
crashed_text = ChooseFont('Fonts/xirod.ttf', 30, red)
shower_text = ChooseFont('Fonts/xirod.ttf', 30, red)
# Define game clock to time things
clock = pygame.time.Clock()
main_menu()
|
Hiimbawb/Spacey
|
Spacey.py
|
Spacey.py
|
py
| 32,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.flip",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 338,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 348,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.flip",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 405,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RETURN",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.GroupSingle",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 449,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.GroupSingle",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 454,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"line_number": 471,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 506,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 511,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 518,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
"line_number": 520,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 521,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 521,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RETURN",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 597,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 601,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 619,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 627,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 628,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 631,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RETURN",
"line_number": 632,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 639,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 647,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 652,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 655,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 656,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 660,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RETURN",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 669,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 669,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 672,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 673,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 673,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 679,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 700,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 707,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 709,
"usage_type": "attribute"
},
{
"api_name": "pygame.cursors",
"line_number": 709,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 720,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 720,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 721,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 724,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 762,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 765,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 765,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 766,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 769,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 771,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 772,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 773,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 774,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 778,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 779,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 779,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 782,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 783,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 784,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 787,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 787,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 789,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 789,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 790,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 790,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 791,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 791,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 793,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 793,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 794,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 795,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 804,
"usage_type": "attribute"
}
] |
27284711802
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader as data
from sklearn.preprocessing import MinMaxScaler
# noinspection PyUnresolvedReferences
import silence_tensorflow.auto # for ignoring tensorflow info and warnings
from keras.layers import Dense, Dropout, LSTM
from keras.models import Sequential
from datetime import date
# starting and ending of data frame
start = '2010-01-01'
end = date.today().strftime('%Y-%m-%d')
# data frame
df = data.DataReader('SBI', 'yahoo', start, end)
df = df.reset_index()
df = df.drop(['Date', 'Adj Close'], axis=1)
# splitting data into Training and Testing
data_training = pd.DataFrame(df['Close'][0:int(len(df) * 0.70)])
data_testing = pd.DataFrame(df['Close'][int(len(df) * 0.70): int(len(df))])
# scaling down the training data and converting it into an array
scale = MinMaxScaler(feature_range=(0, 1))
data_training_array = scale.fit_transform(data_training)
# splitting data into x_train and y_train
# x_train is taken as fist 100 values and y_train as 101 value
# then first value of x_train is dropped and y_train is inserted into x_train
# next y_train is taken as 102 value and same continues till last value
x_train = []
y_train = []
for i in range(100, data_training_array.shape[0]):
x_train.append(data_training_array[i - 100: i])
y_train.append(data_training_array[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
# Simple LSTM Model
model = Sequential()
# layer 1
model.add(LSTM(units=50, activation='relu', return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
# layer 2
model.add(LSTM(units=60, activation='relu', return_sequences=True))
model.add(Dropout(0.3))
# layer 3
model.add(LSTM(units=80, activation='relu', return_sequences=True))
model.add(Dropout(0.4))
# layer 4
model.add(LSTM(units=120, activation='relu'))
model.add(Dropout(0.5))
# dense layer
model.add(Dense(units=1))
# compile model with adam optimizer
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=50)
# saving model
model.save('keras_model.h5')
# predicting values for testing data
past_100_days = data_training.tail(100)
final_df = past_100_days.append(data_testing, ignore_index=True)
# scaling down the testing data and converting it into an array
input_data = scale.fit_transform(final_df)
# splitting data into x_test and y_test
x_test = []
y_test = []
for i in range(100, input_data.shape[0]):
x_test.append(input_data[i - 100: i])
y_test.append(input_data[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
# Making Prediction
y_predicted = model.predict(x_test)
# scaling up the predicted data
scale_factor = 1/scale.scale_[0]
y_predicted = y_predicted * scale_factor
y_test = y_test * scale_factor
# plotting original vs predicted data
plt.figure(figsize=(12, 6))
plt.plot(y_test, 'b', label='Original Price')
plt.plot(y_predicted, 'r', label='Predicted Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
|
aashima1433/StockProject
|
LSTM_model.py
|
LSTM_model.py
|
py
| 3,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date.today",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pandas_datareader.DataReader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
}
] |
811063416
|
# Network Delay Time - https://leetcode.com/problems/network-delay-time/
'''There are N network nodes, labelled 1 to N.
Given times, a list of travel times as directed edges times[i] = (u, v, w), where u is the source node,
v is the target node, and w is the time it takes for a signal to travel from source to target.
Now, we send a signal from a certain node K. How long will it take for all nodes to receive the signal? If it is impossible, return -1.
Example 1:
Input: times = [[2,1,1],[2,3,1],[3,4,1]], N = 4, K = 2
Output: 2'''
# Dijkstra's Algorithm - 0(n2)
from collections import defaultdict
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
def getMinDistance(distance, seen):
minNode = float('inf')
minIndex = -1
for i in range(1, N+1):
if distance[i] < minNode and not seen[i]:
minNode = distance[i]
minIndex = i
return minIndex
graph = defaultdict(list)
for u, v, w in times:
graph[u].append((v,w))
seen = [False] * (N+1)
distance = {node: float('inf') for node in range(1, N+1)}
distance[K] = 0
while True:
u = getMinDistance(distance, seen)
if u < 0:
break
seen[u] = True
for neighbour, time in graph[u]:
if distance[neighbour] > distance[u] + time:
distance[neighbour] = distance[u] + time
output = max(distance.values())
return output if output != float('inf') else -1
# Dijkstra's Algorithm - 0(nlogn)
from collections import defaultdict
import heapq
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = defaultdict(list)
for u, v, w in times:
graph[u].append((v,w))
distance = {}
q = [(0, K)]
while q:
time, node = heapq.heappop(q)
if node in distance:
continue
distance[node] = time
for neighbour, timeTravelled in graph[node]:
if neighbour not in distance:
heapq.heappush(q, (time+timeTravelled, neighbour))
return max(distance.values()) if len(distance) == N else -1
|
Saima-Chaity/Leetcode
|
Graph/networkDelayTime.py
|
networkDelayTime.py
|
py
| 2,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 70,
"usage_type": "call"
}
] |
10356870047
|
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
import argparse
import os
# pylint: disable=E1101, W0612
"""
# GPU CLUSTER
source = '/vol/gpudata/rar2417/src/model1' #path to code location
data_path = '/vol/gpudata/rar2417/Data' #path to the parent directory of 'audio'
output_path = '/vol/gpudata/rar2417/results/model1' #path to output the results
model_path = output_path + '/models/resnet_bgru_1.ckpt' #path to find pre-trained model
"""
# HOME SETUP
source = '/home/r2d9/Desktop/SpeechRecognitionProject' #path to code location
data_path = '/home/r2d9/Desktop/Data/train' #path to the parent directory of 'audio'
output_path = '/home/r2d9/Desktop/results' #path to output the results
model_path = output_path + '/models/resnet_bgru_1.ckpt' #path to find pre-trained model
parser = argparse.ArgumentParser()
parser.add_argument('-key', '--filekey', type = str, help='key for multiple trainings')
parser.add_argument('-lr', '--learning_rate', type = float, help='LEARNING_RATE')
parser.add_argument('-md', '--mode', type = int, help='1, 2 or 3')
args = parser.parse_args()
KEY = '' #provided for convenience, easy way to differenciate experiments
if args.filekey is not None:
KEY = args.filekey
MODE = 4 #3-step training procedure
if args.mode is not None:
MODE = args.mode
os.chdir(source)
from dataset import dataset
from model_resnet_bgru import Network, accuracy
# Configuration
start = time.time()
torch.set_default_tensor_type('torch.FloatTensor')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparams
NUM_EPOCHS = 50
BATCH_SIZE = 20
LAMBDA = 0.87
LEARNING_RATE = 0.0003
if args.learning_rate is not None:
LEARNING_RATE = args.learning_rate
# Model & Dataset
data = dataset(data_path + '/training_list.txt', data_path + '/audio')
valset = dataset(data_path + '/validation_list.txt', data_path + '/audio')
testset = dataset(data_path + '/testing_list.txt', data_path + '/audio')
if MODE == 1: #training only the resnet
model = Network(mode=1).to(device)
if MODE == 2: #training only the bgru
model = Network().to(device)
model.load_state_dict(torch.load(model_path))
for name, param in model.named_parameters():
if 'gru' in name:
param.requires_grad = True
if 'resnet' in name:
param.requires_grad = False
if MODE == 3: #training resnet and bgru from pre-trained model
model = Network().to(device)
model.load_state_dict(torch.load(model_path))
for params in model.parameters():
params.requires_grad = True
if MODE == 4: #training everything in one go from scratch
model = Network().to(device)
for params in model.parameters():
params.requires_grad = True
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LEARNING_RATE)
scheduler = ExponentialLR(optimizer, LAMBDA) #learning rate decay, halved every 5 epochs
epoch, estop, maxval, maxind = 0, False, 0, 0
while epoch < NUM_EPOCHS and not estop: #early stopping
dataloader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True, drop_last=False)
if epoch > 4: #fixed learning rate for first 5 epochs
scheduler.step()
for i_batch, batch in enumerate(dataloader):
# Forward
optimizer.zero_grad()
outputs = model(batch['audio'])
loss = criterion(outputs, batch['label'].to(device))
# Backward and optimize
loss.backward()
optimizer.step()
# Save loss
with open(output_path +'/loss_'+KEY+'.txt', 'a') as myfile:
myfile.write(str(loss.item())+'\n')
# Save model, accuracy at each epoch
newval = accuracy(model, valset, output_path + '/val_'+KEY+'.txt', 4) #accuracy on validation set for early-stopping
accuracy(model, dataset, output_path + '/train_'+KEY+'.txt', 4) #accuracy on training set to monitor overfitting
accuracy(model, testset, output_path + '/test_'+KEY+'.txt', 4) #accuracy on testing set
# Early stopping
if newval > maxval:
maxval = newval
maxind = epoch
if MODE == 1:
torch.save(model.state_dict(), output_path + '/models/resnet_'+KEY+'.ckpt')
if MODE == 2:
torch.save(model.state_dict(), output_path +'/models/bgru_'+KEY+'.ckpt')
if MODE == 3:
torch.save(model.state_dict(), output_path +'/models/resnet_bgru_3_'+KEY+'.ckpt')
if MODE == 4:
torch.save(model.state_dict(), output_path +'/models/resnet_bgru_'+KEY+'.ckpt')
if epoch > maxind + 4:
estop = True
epoch += 1
data.resample_unknown_class()
print('key ', KEY)
print('time ', time.time()-start)
print('epochs ', epoch)
|
remit0/SpeechRecognitionProject
|
legacy/training3.py
|
training3.py
|
py
| 4,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.set_default_tensor_type",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "dataset.dataset",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "dataset.dataset",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "dataset.dataset",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.Network",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.Network",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.Network",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.Network",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.ExponentialLR",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.accuracy",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "model_resnet_bgru.accuracy",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "dataset.dataset",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "model_resnet_bgru.accuracy",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 128,
"usage_type": "call"
}
] |
44530888336
|
import cv2
import numpy as np
from dlclive import DLCLive, Processor
from skimage.transform import (hough_line, hough_line_peaks)
folder = 'model/'
dlc_proc = Processor()
dlc_live = DLCLive(folder, processor=dlc_proc)
dlc_live.init_inference()
i = 0
while True:
# Load frame
i += 1
frame = cv2.imread('frames/ (' + str(i) + ').jpg')
frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
# Get poses
pose = dlc_live.get_pose(frame)
nose = (int(pose[0, 0]), int(pose[0, 1]))
head = (int(pose[1, 0]), int(pose[1, 1]))
body = (int(pose[2, 0]), int(pose[2, 1]))
# Draw lines on Stage for angle measurement
stage = np.zeros((frame.shape[0], frame.shape[1]), np.uint8) # Clear frame
stage = cv2.line(stage, nose, head, 255, 1)
# Perform Hough Transformation to detect lines
hspace, angles, distances = hough_line(stage)
# Find angle
angle = []
for _, a, distances in zip(*hough_line_peaks(hspace, angles, distances)):
angle.append(a)
# Obtain angle for each line
angles = [a * 180 / np.pi for a in angle]
# Get length of radius for angle visualization
radius = cv2.norm(head, nose)
axes = (int(radius), int(radius))
# Get 360 degree readout
degree = int(angles[0])
if nose[0] > head[0] and degree < 0:
degree = 180 + degree
elif nose[0] < head[0] and degree < 0:
degree = 360 + degree
elif nose[0] < head[0] and degree > 0:
degree = 180 + degree
# Draw lines
frame = cv2.line(frame, nose, head, (255, 255, 0), 1, lineType=cv2.LINE_AA)
frame = cv2.line(frame, (head[0], int(head[1] - radius)), head, (255, 255, 0), 1, lineType=cv2.LINE_AA)
frame = cv2.putText(frame, str(degree), (head[0] - 50, head[1] - 50), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255),
lineType=cv2.LINE_AA)
# Draw arc of angle
if nose[0] >= head[0]:
frame = cv2.ellipse(frame, head, axes, -90, degree, 0, (255, 255, 0), lineType=cv2.LINE_AA)
else:
frame = cv2.ellipse(frame, head, axes, -90, 0, degree, (255, 255, 0), lineType=cv2.LINE_AA)
# Show video
cv2.imshow('Pose', frame)
cv2.imwrite("output/head_angle/" + str(i) + ".png", frame)
cv2.waitKey(1)
# Reset loop
if i == 969:
i = 0
|
nghess/dlc-live-test
|
head-angle-vf.py
|
head-angle-vf.py
|
py
| 2,324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dlclive.Processor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dlclive.DLCLive",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "skimage.transform.hough_line",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "skimage.transform.hough_line_peaks",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.norm",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "cv2.ellipse",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "cv2.ellipse",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 68,
"usage_type": "call"
}
] |
21480391270
|
from collections import namedtuple, defaultdict
import numpy as np
import bmesh
import bpy
from ..math import get_dist_sq
from ..log import log, logd
from ..helpers import get_context, get_modifier_mask
# shape_key_apply_modifiers TODO:
# - Specialcase more merging modifiers, solidify for example
# - Transfer vertex order. Is it still necessary if all merging modifiers are covered?
# Is it possible to identify which face went where without guessing?
class ShapeKeyInfo(namedtuple('ShapeKeyInfo', ['coords', 'interpolation', 'mute', 'name',
'slider_max', 'slider_min', 'value', 'vertex_group'])):
__slots__ = ()
"""Helper to preserve shape key information."""
@classmethod
def from_shape_key_with_empty_data(cls, shape_key):
return cls(
coords=np.empty(0, dtype=np.single),
interpolation=shape_key.interpolation,
mute=shape_key.mute,
name=shape_key.name,
# relative_key=shape_key.relative_key.name,
slider_max=shape_key.slider_max,
slider_min=shape_key.slider_min,
value=shape_key.value,
vertex_group=shape_key.vertex_group,
)
@classmethod
def from_shape_key(cls, shape_key):
info = cls.from_shape_key_with_empty_data(shape_key)
info.get_coords_from(shape_key.data)
return info
def get_coords_from(self, vertices):
self.coords.resize(len(vertices) * 3, refcheck=False)
vertices.foreach_get('co', self.coords)
def put_coords_into(self, vertices):
vertices.foreach_set('co', self.coords)
def weld_mesh(mesh, weld_map):
"""Welds mesh vertices according to a source index to destination index weld map."""
bm = bmesh.new()
bm.from_mesh(mesh)
bm.verts.ensure_lookup_table()
targetmap = {bm.verts[src_idx]: bm.verts[dst_idx] for src_idx, dst_idx in weld_map.items()}
bmesh.ops.weld_verts(bm, targetmap=targetmap)
bm.to_mesh(mesh)
bm.free()
def apply_modifier(modifier):
try:
bpy.ops.object.modifier_apply(get_context(modifier.id_data), modifier=modifier.name)
except RuntimeError:
logd(f"Couldn't apply {modifier.type} modifier {modifier.name}")
class ModifierHandler:
"""Subclass this to define special behavior when applying different modifiers."""
modifier_type = None
modifier_name = None
def __init__(self, modifier):
self.modifier_name = modifier.name
@classmethod
def poll(cls, modifier):
return cls.modifier_type is None or modifier.type == cls.modifier_type
def apply(self, obj):
apply_modifier(obj.modifiers[self.modifier_name])
class MirrorModifierHandler(ModifierHandler):
modifier_type = 'MIRROR'
weld_map = None # Specifies vertex pairs to be welded
def __init__(self, modifier):
super().__init__(modifier)
self.merge_dist = modifier.merge_threshold
self.num_mirrors = sum(modifier.use_axis)
@classmethod
def poll(cls, modifier):
return super().poll(modifier) and modifier.use_mirror_merge and any(modifier.use_axis)
def apply(self, obj):
modifier = obj.modifiers[self.modifier_name]
modifier.use_mirror_merge = False
bpy.ops.object.modifier_apply(get_context(obj), modifier=modifier.name)
if not self.weld_map:
self.fill_weld_map(obj)
weld_mesh(obj.data, self.weld_map)
def fill_weld_map(self, obj):
mesh = obj.data
num_verts = len(mesh.vertices) // (2 ** self.num_mirrors) # Num of verts before mirroring
merge_dist_sq = self.merge_dist ** 2
# Only consider pairs of mirrored vertices for merging. Probably breaks if flip is enabled
welds = []
for n in range(self.num_mirrors):
num_part_verts = num_verts * (2 ** n)
new_welds = []
for src_idx, dst_idx in welds:
new_welds.append((src_idx + num_part_verts, dst_idx + num_part_verts))
welds.extend(new_welds)
for vert_idx in range(num_part_verts):
vert = mesh.vertices[vert_idx]
other_vert_idx = vert_idx + num_part_verts
other_vert = mesh.vertices[other_vert_idx]
if get_dist_sq(vert.co, other_vert.co) <= merge_dist_sq:
welds.append((other_vert_idx, vert_idx))
# Resolve the welds into a single dict. Not too robust but weld_verts doesn't complain
self.weld_map = weld_map = {}
weld_map_reverse = defaultdict(list)
for src_idx, dst_idx in welds:
dst_idx = weld_map.get(dst_idx, dst_idx)
weld_map[src_idx] = dst_idx
old_idxs = weld_map_reverse.get(src_idx, [])
for old_idx in old_idxs:
weld_map[old_idx] = dst_idx
weld_map_reverse[dst_idx].append(old_idx)
weld_map_reverse[dst_idx].append(src_idx)
class WeldModifierHandler(ModifierHandler):
modifier_type = 'WELD'
weld_map = None # Specifies vertex pairs to be welded
def __init__(self, modifier):
super().__init__(modifier)
self.merge_dist = modifier.merge_threshold
self.vertex_group = modifier.vertex_group
self.invert_vertex_group = modifier.invert_vertex_group
@classmethod
def poll(cls, modifier):
return super().poll(modifier) and modifier.mode == 'ALL'
def apply(self, obj):
modifier = obj.modifiers[self.modifier_name]
bpy.ops.object.modifier_remove(get_context(obj), modifier=modifier.name)
if not self.weld_map:
self.fill_weld_map(obj)
weld_mesh(obj.data, self.weld_map)
def fill_weld_map(self, obj):
mesh = obj.data
vg = obj.vertex_groups.get(self.vertex_group)
invert = self.invert_vertex_group
bm = bmesh.new()
bm.from_mesh(mesh)
bm.verts.ensure_lookup_table()
deform_layer = bm.verts.layers.deform.active
if deform_layer and vg:
# Handle vertex group filtering
verts = [v for v in bm.verts if bool(v[deform_layer].get(vg.index, 0.0)) != invert]
else:
verts = bm.verts
targetmap = bmesh.ops.find_doubles(bm, verts=verts, dist=self.merge_dist)['targetmap']
self.weld_map = {src.index: dst.index for src, dst in targetmap.items()}
bm.free()
modifier_handler_classes = (
MirrorModifierHandler,
WeldModifierHandler,
ModifierHandler,
)
# Incomplete map of modifier type to icon
modifier_icons = {
'DATA_TRANSFER': 'MOD_DATA_TRANSFER',
'MESH_CACHE': 'MOD_MESHDEFORM',
'MESH_SEQUENCE_CACHE': 'MOD_MESHDEFORM',
'NORMAL_EDIT': 'MOD_NORMALEDIT',
'WEIGHTED_NORMAL': 'MOD_NORMALEDIT',
'UV_PROJECT': 'MOD_UVPROJECT',
'UV_WARP': 'MOD_UVPROJECT',
'VERTEX_WEIGHT_EDIT': 'MOD_VERTEX_WEIGHT',
'VERTEX_WEIGHT_MIX': 'MOD_VERTEX_WEIGHT',
'VERTEX_WEIGHT_PROXIMITY': 'MOD_VERTEX_WEIGHT',
'ARRAY': 'MOD_ARRAY',
'BEVEL': 'MOD_BEVEL',
'BOOLEAN': 'MOD_BOOLEAN',
'BUILD': 'MOD_BUILD',
'DECIMATE': 'MOD_DECIM',
'EDGE_SPLIT': 'MOD_EDGESPLIT',
'NODES': 'NODETREE',
'MASK': 'MOD_MASK',
'MIRROR': 'MOD_MIRROR',
'MULTIRES': 'MOD_MULTIRES',
'REMESH': 'MOD_REMESH',
'SCREW': 'MOD_SCREW',
'SKIN': 'MOD_SKIN',
'SOLIDIFY': 'MOD_SOLIDIFY',
'SUBSURF': 'MOD_SUBSURF',
'TRIANGULATE': 'MOD_TRIANGULATE',
'VOLUME_TO_MESH': 'VOLUME_DATA',
'WELD': 'AUTOMERGE_OFF',
'WIREFRAME': 'MOD_WIREFRAME',
'ARMATURE': 'MOD_ARMATURE',
'CAST': 'MOD_CAST',
'CURVE': 'MOD_CURVE',
'DISPLACE': 'MOD_DISPLACE',
'HOOK': 'HOOK',
'LAPLACIANDEFORM': 'MOD_MESHDEFORM',
'LATTICE': 'MOD_LATTICE',
'MESH_DEFORM': 'MOD_MESHDEFORM',
'SHRINKWRAP': 'MOD_SHRINKWRAP',
'SIMPLE_DEFORM': 'MOD_SIMPLEDEFORM',
'SMOOTH': 'MOD_SMOOTH',
'CORRECTIVE_SMOOTH': 'MOD_SMOOTH',
'LAPLACIANSMOOTH': 'MOD_SMOOTH',
'SURFACE_DEFORM': 'MOD_MESHDEFORM',
'WARP': 'MOD_WARP',
'WAVE': 'MOD_WAVE',
}
ignored_modifier_types = frozenset((
'CLOTH',
'COLLISION',
'DYNAMIC_PAINT',
'EXPLODE',
'FLUID',
'OCEAN',
'PARTICLE_INSTANCE',
'PARTICLE_SYSTEM',
'SOFT_BODY',
))
class GRET_OT_shape_key_apply_modifiers(bpy.types.Operator):
"""Applies viewport modifiers while preserving shape keys"""
bl_idname = "gret.shape_key_apply_modifiers"
bl_label = "Apply Modifiers with Shape Keys"
bl_context = "objectmode"
bl_options = {'REGISTER', 'UNDO'}
modifier_mask: bpy.props.BoolVectorProperty(
name="Apply Modifier",
description="Whether this modifier should be applied",
size=32, # Maximum allowed by Blender, will need some hack if more are required
default=[True] * 32,
)
modifier_info = [] # Only used to draw buttons when operator is invoked
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT' and context.object and context.object.type == 'MESH'
def draw(self, context):
layout = self.layout
layout.ui_units_x = 10.0
obj = context.object
layout.label(text="Select modifiers to apply:")
col = layout.column(align=True)
for modifier_index, (modifier_type, modifier_name) in enumerate(self.modifier_info):
if modifier_type in ignored_modifier_types:
continue
icon = modifier_icons.get(modifier_type, 'BLANK1')
col.prop(self, 'modifier_mask', index=modifier_index, icon=icon, text=modifier_name)
def invoke(self, context, event):
obj = context.object
# Cache modifier info to be shown on panel. Otherwise redo_last won't work correctly
# Side note: the displayed icon for show_viewport is hardcoded to change when toggled on
def should_apply_modifier(mod):
return (mod.show_viewport
and mod.type not in ignored_modifier_types
and mod.type != 'ARMATURE') # Don't apply armatures by default
self.modifier_info = [(mod.type, mod.name) for mod in obj.modifiers]
self.modifier_mask = get_modifier_mask(obj, should_apply_modifier)
return context.window_manager.invoke_props_dialog(self)
def execute(self, context):
obj = context.active_object
if not any(self.modifier_mask[:len(obj.modifiers)]):
# There are no modifiers to apply
return {'FINISHED'}
if obj.data.users > 1:
# Make single user copy
obj.data = obj.data.copy()
num_shape_keys = len(obj.data.shape_keys.key_blocks) if obj.data.shape_keys else 0
if not num_shape_keys:
# No shape keys, just apply the modifiers
for modifier, mask in zip(obj.modifiers[:], self.modifier_mask):
if mask:
apply_modifier(modifier)
return {'FINISHED'}
print(f"Applying modifiers with {num_shape_keys} shape keys")
mesh_copy = obj.data.copy() # Copy for convenience, to be able to call from_existing(fcurve)
shape_keys = obj.data.shape_keys.key_blocks if obj.data.shape_keys else []
shape_key_infos = []
saved_active_shape_key_index = obj.active_shape_key_index
saved_show_only_shape_key = obj.show_only_shape_key
# Start by separating each shape key so modifiers can be applied one by one
shape_key_objs = []
for shape_key in shape_keys:
shape_key_info = ShapeKeyInfo.from_shape_key(shape_key)
shape_key_infos.append(shape_key_info)
new_obj = obj.copy()
new_obj.name = f"{obj.name}_{shape_key.name}"
new_obj.data = obj.data.copy()
shape_key_objs.append(new_obj)
# Handle modifiers accordingly. This means recording welded vertex pairs for mirrors and such
obj.shape_key_clear()
modifier_handlers = []
for modifier, mask in zip(obj.modifiers[:], self.modifier_mask):
if mask:
for modifier_handler_cls in modifier_handler_classes:
if modifier_handler_cls.poll(modifier):
modifier_handler = modifier_handler_cls(modifier)
modifier_handler.apply(obj)
modifier_handlers.append(modifier_handler)
break
# Store vertex coordinates of each shape key with modifiers applied
for sk_info, sk_obj in zip(shape_key_infos, shape_key_objs):
sk_mesh = sk_obj.data
sk_obj.shape_key_clear()
sk_info.put_coords_into(sk_mesh.vertices)
for modifier_handler in modifier_handlers:
modifier_handler.apply(sk_obj)
sk_info.get_coords_from(sk_mesh.vertices)
bpy.data.objects.remove(sk_obj)
bpy.data.meshes.remove(sk_mesh)
# Add the shape keys back
for shape_key_info in shape_key_infos:
shape_key = obj.shape_key_add()
shape_key.interpolation = shape_key_info.interpolation
shape_key.mute = shape_key_info.mute
shape_key.name = shape_key_info.name
shape_key.slider_max = shape_key_info.slider_max
shape_key.slider_min = shape_key_info.slider_min
shape_key.value = shape_key_info.value
shape_key.vertex_group = shape_key_info.vertex_group
if len(shape_key.data) * 3 != len(shape_key_info.coords):
self.report({'ERROR'}, f"Vertex count for {shape_key.name} did not match, "
"the shape key will be lost.")
continue
shape_key_info.put_coords_into(shape_key.data)
# Recreate drivers
if mesh_copy.shape_keys and mesh_copy.shape_keys.animation_data:
for fcurve in mesh_copy.shape_keys.animation_data.drivers:
if obj.data.shape_keys.animation_data is None:
obj.data.shape_keys.animation_data_create()
obj.data.shape_keys.animation_data.drivers.from_existing(src_driver=fcurve)
# Clean up
obj.show_only_shape_key = saved_show_only_shape_key
obj.active_shape_key_index = saved_active_shape_key_index
bpy.data.meshes.remove(mesh_copy)
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_shape_key_apply_modifiers.bl_idname, icon='CHECKMARK')
def register(settings, prefs):
bpy.utils.register_class(GRET_OT_shape_key_apply_modifiers)
bpy.types.MESH_MT_shape_key_context_menu.append(draw_menu)
def unregister():
bpy.types.MESH_MT_shape_key_context_menu.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_shape_key_apply_modifiers)
|
greisane/gret
|
mesh/shape_key_apply_modifiers.py
|
shape_key_apply_modifiers.py
|
py
| 15,203 |
python
|
en
|
code
| 298 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.single",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "bmesh.new",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "bmesh.ops.weld_verts",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bmesh.ops",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.modifier_apply",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "helpers.get_context",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "log.logd",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bpy.ops.object.modifier_apply",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "helpers.get_context",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "math.get_dist_sq",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "bpy.ops.object.modifier_remove",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "helpers.get_context",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "bmesh.new",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "bmesh.ops.find_doubles",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "bmesh.ops",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.BoolVectorProperty",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "bpy.props",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "helpers.get_modifier_mask",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "bpy.data.objects.remove",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "bpy.data.meshes.remove",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "bpy.data.meshes.remove",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.MESH_MT_shape_key_context_menu.append",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.MESH_MT_shape_key_context_menu.remove",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 398,
"usage_type": "attribute"
}
] |
28969795163
|
"""
Artifact module.
"""
from __future__ import annotations
import typing
from typing import Self
from sdk.entities.artifact.metadata import build_metadata
from sdk.entities.artifact.spec import build_spec
from sdk.entities.base.entity import Entity
from sdk.entities.utils.utils import get_uiid
from sdk.utils.api import DTO_ARTF, api_ctx_create, api_ctx_update
from sdk.utils.exceptions import EntityError
from sdk.utils.factories import get_context, get_default_store
from sdk.utils.file_utils import check_file, get_dir
from sdk.utils.uri_utils import get_name_from_uri, get_uri_scheme, rebuild_uri
if typing.TYPE_CHECKING:
from sdk.entities.artifact.metadata import ArtifactMetadata
from sdk.entities.artifact.spec import ArtifactSpec
class Artifact(Entity):
"""
A class representing a artifact.
"""
def __init__(
self,
project: str,
name: str,
kind: str = None,
metadata: ArtifactMetadata = None,
spec: ArtifactSpec = None,
local: bool = False,
embedded: bool = False,
uuid: str = None,
**kwargs,
) -> None:
"""
Initialize the Artifact instance.
Parameters
----------
project : str
Name of the project.
name : str
Name of the artifact.
kind : str
Kind of the artifact
metadata : ArtifactMetadata
Metadata of the object.
spec : ArtifactSpec
Specification of the object.
local: bool
If True, run locally.
embedded: bool
If True embed object in backend.
**kwargs
Keyword arguments.
"""
super().__init__()
self.project = project
self.name = name
self.kind = kind if kind is not None else "artifact"
self.metadata = metadata if metadata is not None else build_metadata(name=name)
self.spec = spec if spec is not None else build_spec(self.kind, **{})
self.embedded = embedded
self.id = uuid if uuid is not None else get_uiid()
self._local = local
# Temporary local artifact path (see as_file())
self._temp_path = None
# Set new attributes
self._any_setter(**kwargs)
# Set context
self._context = get_context(self.project)
# Set key in spec store://<project>/artifacts/<kind>/<name>:<uuid>
self.spec.key = (
f"store://{self.project}/artifacts/{self.kind}/{self.name}:{self.id}"
)
#############################
# Save / Export
#############################
def save(self, uuid: str = None) -> dict:
"""
Save artifact into backend.
Parameters
----------
uuid : str
UUID.
Returns
-------
dict
Mapping representation of Artifact from backend.
"""
if self._local:
raise EntityError("Use .export() for local execution.")
obj = self.to_dict()
if uuid is None:
api = api_ctx_create(self.project, DTO_ARTF)
return self._context.create_object(obj, api)
self.id = uuid
api = api_ctx_update(self.project, DTO_ARTF, self.name, uuid)
return self._context.update_object(obj, api)
def export(self, filename: str = None) -> None:
"""
Export object as a YAML file.
Parameters
----------
filename : str
Name of the export YAML file. If not specified, the default value is used.
Returns
-------
None
"""
obj = self.to_dict()
filename = (
filename
if filename is not None
else f"artifact_{self.project}_{self.name}.yaml"
)
self._export_object(filename, obj)
#############################
# Artifacts Methods
#############################
def as_file(self, target: str = None) -> str:
"""
Get artifact as file. In the case of a local store, the store returns the current
path of the artifact. In the case of a remote store, the artifact is downloaded in
a temporary directory.
Parameters
----------
target : str
Target path is the remote path of the artifact where it is stored
Returns
-------
str
Temporary path of the artifact.
"""
# Get store
store = get_default_store()
# If local store, return local artifact path
if store.is_local():
self._check_src()
return self.spec.src_path
# Check if target path is specified
self._check_target(target)
# Check if target path is remote
self._check_remote()
# Download artifact and return path
self._temp_path = store.download(self.spec.target_path)
return self._temp_path
def download(
self, target: str = None, dst: str = None, overwrite: bool = False
) -> str:
"""
Download artifact from backend.
Parameters
----------
target : str
Target path is the remote path of the artifact
dst : str
Destination path as filename
overwrite : bool
Specify if overwrite an existing file
Returns
-------
str
Path of the downloaded artifact.
"""
# Check if target path is specified
self._check_target(target)
# Check if target path is remote
self._check_remote()
# Check if download destination path is specified and rebuild it if necessary
dst = self._rebuild_dst(dst)
# Check if destination path exists for overwrite
self._check_overwrite(dst, overwrite)
# Get store
store = get_default_store()
# Download artifact and return path
return store.download(self.spec.target_path, dst)
def upload(self, source: str = None, target: str = None) -> str:
"""
Upload artifact to backend.
Parameters
----------
source : str
Source path is the local path of the artifact
target : str
Target path is the remote path of the artifact
Returns
-------
str
Path of the uploaded artifact.
"""
# Check if source path is provided.
self._check_src(source)
# Check if source path is local
self._check_local()
# Check if target path is provided.
self._check_target(target, upload=True)
# Check if target path is remote
self._check_remote()
# Get store
store = get_default_store()
# Upload artifact and return remote path
return store.upload(self.spec.src_path, self.spec.target_path)
#############################
# Private Helpers
#############################
def _check_target(self, target: str = None, upload: bool = False) -> None:
"""
Check if target path is specified.
Parameters
----------
target : str
Target path is the remote path of the artifact
upload : bool
Specify if target path is for upload
Returns
-------
None
"""
if self.spec.target_path is None:
if target is None:
if not upload:
raise EntityError("Target path is not specified.")
path = get_dir(self.spec.src_path)
filename = get_name_from_uri(self.spec.src_path)
target_path = rebuild_uri(f"{path}/{filename}")
self.spec.target_path = target_path
return
self.spec.target_path = target
return
def _check_src(self, src: str = None) -> None:
"""
Check if source path is specified.
Parameters
----------
src : str
Source path is the local path of the artifact
Returns
-------
None
Raises
------
Exception
If source path is not specified.
"""
if self.spec.src_path is None:
if src is None:
raise EntityError("Source path is not specified.")
self.spec.src_path = src
def _check_remote(self) -> None:
"""
Check if target path is remote.
Parameters
----------
ignore_raise : bool
Specify if raise an exception if target path is not remote
Returns
-------
None
Raises
------
Exception
If target path is not remote.
"""
if self.spec.target_path is None:
return
if get_uri_scheme(self.spec.target_path) in ["", "file"]:
raise EntityError("Only remote source URIs are supported for target paths")
def _check_local(self) -> None:
"""
Check if source path is local.
Returns
-------
None
Raises
------
Exception
If source path is not local.
"""
if get_uri_scheme(self.spec.src_path) not in ["", "file"]:
raise EntityError("Only local paths are supported for source paths.")
def _rebuild_dst(self, dst: str = None) -> None:
"""
Check if destination path is specified.
Parameters
----------
dst : str
Destination path as filename
Returns
-------
str
Destination path as filename.
"""
if dst is None:
dst = f"./{get_name_from_uri(self.spec.target_path)}"
return dst
@staticmethod
def _check_overwrite(dst: str, overwrite: bool) -> None:
"""
Check if destination path exists for overwrite.
Parameters
----------
dst : str
Destination path as filename.
overwrite : bool
Specify if overwrite an existing file.
Raises
------
Exception
If destination path exists and overwrite is False.
"""
if check_file(dst) and not overwrite:
raise EntityError(f"File {dst} already exists.")
#############################
# Getters and Setters
#############################
@property
def local(self) -> bool:
"""
Get local flag.
"""
return self._local
@property
def temp_path(self) -> str:
"""
Get temporary path.
"""
return self._temp_path
#############################
# Generic Methods
#############################
@classmethod
def from_dict(cls, obj: dict) -> Self:
"""
Create object instance from a dictionary.
Parameters
----------
obj : dict
Dictionary to create object from.
Returns
-------
Self
Self instance.
"""
parsed_dict = cls._parse_dict(obj)
obj_ = cls(**parsed_dict)
obj_._local = obj_._context.local
return obj_
@staticmethod
def _parse_dict(obj: dict) -> dict:
"""
Parse dictionary.
Parameters
----------
obj : dict
Dictionary to parse.
Returns
-------
dict
Parsed dictionary.
"""
# Mandatory fields
project = obj.get("project")
name = obj.get("name")
if project is None or name is None:
raise EntityError("Project or name are not specified.")
# Optional fields
uuid = obj.get("id")
kind = obj.get("kind")
embedded = obj.get("embedded")
# Build metadata and spec
spec = obj.get("spec")
spec = spec if spec is not None else {}
spec = build_spec(kind=kind, **spec)
metadata = obj.get("metadata", {"name": name})
metadata = build_metadata(**metadata)
return {
"project": project,
"name": name,
"kind": kind,
"uuid": uuid,
"metadata": metadata,
"spec": spec,
"embedded": embedded,
}
def artifact_from_parameters(
project: str,
name: str,
description: str = "",
kind: str = "artifact",
key: str = None,
src_path: str = None,
target_path: str = None,
local: bool = False,
embedded: bool = False,
uuid: str = None,
) -> Artifact:
"""
Create artifact.
Parameters
----------
project : str
Name of the project.
name : str
Identifier of the artifact.
description : str
Description of the artifact.
kind : str
The type of the artifact.
key : str
Representation of artfact like store://etc..
src_path : str
Path to the artifact on local file system or remote storage.
targeth_path : str
Destination path of the artifact.
local : bool
Flag to determine if object has local execution.
embedded : bool
Flag to determine if object must be embedded in project.
uuid : str
UUID.
Returns
-------
Artifact
Artifact object.
"""
meta = build_metadata(name=name, description=description)
spec = build_spec(kind, key=key, src_path=src_path, target_path=target_path)
return Artifact(
project=project,
name=name,
kind=kind,
metadata=meta,
spec=spec,
local=local,
embedded=embedded,
uuid=uuid,
)
def artifact_from_dict(obj: dict) -> Artifact:
"""
Create artifact from dictionary.
Parameters
----------
obj : dict
Dictionary to create artifact from.
Returns
-------
Artifact
Artifact object.
"""
return Artifact.from_dict(obj)
|
trubbio83/core
|
sdk/sdk/entities/artifact/entity.py
|
entity.py
|
py
| 14,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sdk.entities.base.entity.Entity",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sdk.entities.artifact.metadata.ArtifactMetadata",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sdk.entities.artifact.spec.ArtifactSpec",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sdk.entities.artifact.metadata.build_metadata",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sdk.entities.artifact.spec.build_spec",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sdk.entities.utils.utils.get_uiid",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "sdk.utils.factories.get_context",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sdk.utils.api.api_ctx_create",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sdk.utils.api.DTO_ARTF",
"line_number": 112,
"usage_type": "argument"
},
{
"api_name": "sdk.utils.api.api_ctx_update",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sdk.utils.api.DTO_ARTF",
"line_number": 116,
"usage_type": "argument"
},
{
"api_name": "sdk.utils.factories.get_default_store",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "sdk.utils.factories.get_default_store",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "sdk.utils.factories.get_default_store",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "sdk.utils.file_utils.get_dir",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "sdk.utils.uri_utils.get_name_from_uri",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "sdk.utils.uri_utils.rebuild_uri",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "sdk.utils.uri_utils.get_uri_scheme",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "sdk.utils.uri_utils.get_uri_scheme",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "sdk.utils.uri_utils.get_name_from_uri",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "sdk.utils.file_utils.check_file",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "typing.Self",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "sdk.utils.exceptions.EntityError",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "sdk.entities.artifact.spec.build_spec",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "sdk.entities.artifact.metadata.build_metadata",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "sdk.entities.artifact.metadata.build_metadata",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "sdk.entities.artifact.spec.build_spec",
"line_number": 514,
"usage_type": "call"
}
] |
392386374
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# random data
A = [2,5,7,9,11,16,19,23,22,29,29,35,37,40,46]
b = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
# Visualize data
plt.plot(A,b,'ro')
# array to [[ ]]
# change row vector to column vector
A = np.array([A]).T
b = np.array([b]).T
# Create vector 1
ones = np.ones_like(A, dtype = np.int8)
A = np.concatenate((A,ones),axis = 1)
# Use formula
x = np.linalg.inv(A.transpose().dot(A)).dot(A.transpose()).dot(b)
x0 = np.array([1,46]).T
y0 = x[0][0]*x0 + x[1][0]
# ko co phep toan matrix cong mot so
# nhung trong numpy cong mot so voi tat ca cac phan tu cua matrix
# Test predict data
x_test = 12
y_test = x[0][0]*x_test + x[1][0]
print(y_test)
# Visualize x0,y0
plt.plot(x0,y0)
plt.show()
|
suanthuy/AI_Project
|
Unit3.1_linear.py
|
Unit3.1_linear.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
22779572502
|
from collections import deque
vowels = deque(x for x in input().split())
consonants = [x for x in input().split()]
flowers = {
"rose": [],
"tulip": [],
"lotus": [],
"daffodil": []
}
def check_for_a_match():
for word, found in flowers.items():
if len(found) == len(word):
return word
while vowels and consonants:
current_vowel = vowels.popleft()
current_consonant = consonants.pop()
for flower in flowers.keys():
if current_vowel in flower and current_vowel not in flowers[flower]:
flowers[flower].extend(current_vowel * (flower.count(current_vowel)))
if current_consonant in flower and current_consonant not in flowers[flower]:
flowers[flower].extend(current_consonant * (flower.count(current_consonant)))
result = check_for_a_match()
if result:
print(f"Word found: {result}")
break
else:
print("Cannot find any word!")
if vowels:
print(f"Vowels left: {' '.join(vowels)}")
if consonants:
print(f"Consonants left: {' '.join(consonants)}")
|
DanieII/SoftUni-Advanced-2023-01
|
advanced/exam_practice/flower_finder.py
|
flower_finder.py
|
py
| 1,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 3,
"usage_type": "call"
}
] |
24531644863
|
import os
import json
import random as rd
from copy import deepcopy
from matplotlib.pylab import *
import math
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
# import torch_xla
# import torch_xla.core.xla_model as xm
device = torch.device("cuda")
def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False):
""" [batch_size, max token length, dim_emb]
"""
bS, mL, eS = wemb_l.shape
# sort before packking
l = array(l)
perm_idx = argsort(-l)
perm_idx_inv = generate_perm_inv(perm_idx)
# pack sequence
packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :],
l[perm_idx],
batch_first=True)
# Time to encode
if hc0 is not None:
hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx])
# ipdb.set_trace()
packed_wemb_l = packed_wemb_l.float() # I don't know why..
packed_wenc, hc_out = lstm(packed_wemb_l, hc0)
hout, cout = hc_out
# unpack
wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True)
if last_only:
# Take only final outputs for each columns.
wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb]
wenc.unsqueeze_(1) # [batch_size, 1, dim_emb]
wenc = wenc[perm_idx_inv]
if return_hidden:
# hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see.
hout = hout[:, perm_idx_inv].to(device)
cout = cout[:, perm_idx_inv].to(device) # Is this correct operation?
return wenc, hout, cout
else:
return wenc
def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs):
wenc_hpu, hout, cout = encode(lstm,
wemb_hpu,
l_hpu,
return_hidden=True,
hc0=None,
last_only=True)
wenc_hpu = wenc_hpu.squeeze(1)
bS_hpu, mL_hpu, eS = wemb_hpu.shape
hS = wenc_hpu.size(-1)
wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)
wenc_hs = wenc_hs.to(device)
# Re-pack according to batch.
# ret = [B_NLq, max_len_headers_all, dim_lstm]
st = 0
for i, l_hs1 in enumerate(l_hs):
wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]
st += l_hs1
return wenc_hs
def generate_perm_inv(perm):
# Definitly correct.
perm_inv = zeros(len(perm), dtype=int) # Was an undefine int32 variable
for i, p in enumerate(perm):
perm_inv[int(p)] = i
return perm_inv
def pred_sc(s_sc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc = []
for s_sc1 in s_sc:
pr_sc.append(s_sc1.argmax().item())
return pr_sc
def pred_sc_beam(s_sc, beam_size):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc_beam = []
for s_sc1 in s_sc:
val, idxes = s_sc1.topk(k=beam_size)
pr_sc_beam.append(idxes.tolist())
return pr_sc_beam
def pred_sa(s_sa):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sa = []
for s_sa1 in s_sa:
pr_sa.append(s_sa1.argmax().item())
return pr_sa
def pred_wn(s_wn):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_wn = []
for s_wn1 in s_wn:
pr_wn.append(s_wn1.argmax().item())
# print(pr_wn, s_wn1)
# if s_wn1.argmax().item() == 3:
# input('')
return pr_wn
def pred_wc(wn, s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted!
"""
# get g_num
pr_wc = []
for b, wn1 in enumerate(wn):
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn1]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
return pr_wc
def pred_wo(wn, s_wo):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# s_wo = [B, 4, n_op]
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
# get g_num
pr_wo = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1]))
return pr_wo
def topk_multi_dim(tensor, n_topk=1, batch_exist=True):
if batch_exist:
idxs = []
for b, tensor1 in enumerate(tensor):
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.cpu().numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs.append(idxs1)
else:
tensor1 = tensor
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs = idxs1
return idxs
def remap_sc_idx(idxs, pr_sc_beam):
for b, idxs1 in enumerate(idxs):
for i_beam, idxs11 in enumerate(idxs1):
sc_beam_idx = idxs[b][i_beam][0]
sc_idx = pr_sc_beam[b][sc_beam_idx]
idxs[b][i_beam][0] = sc_idx
return idxs
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check
def pred_wvi_se_beam(max_wn, s_wv, beam_size):
"""
s_wv: [B, 4, mL, 2]
- predict best st-idx & ed-idx
output:
pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed].
prob_wvi_beam = [B, max_wn, n_pairs]
"""
bS = s_wv.shape[0]
# [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st, s_wv_ed = s_wv.split(1, dim=3)
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
prob_wv_st = F.softmax(s_wv_st, dim=-1).detach().to('cpu').numpy()
prob_wv_ed = F.softmax(s_wv_ed, dim=-1).detach().to('cpu').numpy()
k_logit = int(ceil(sqrt(beam_size)))
n_pairs = k_logit**2
assert n_pairs >= beam_size
values_st, idxs_st = s_wv_st.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
values_ed, idxs_ed = s_wv_ed.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
# idxs = [B, k_logit, 2]
# Generate all possible combination of st, ed indices & prob
pr_wvi_beam = [] # [B, max_wn, k_logit**2 [st, ed] paris]
prob_wvi_beam = zeros([bS, max_wn, n_pairs])
for b in range(bS):
pr_wvi_beam1 = []
idxs_st1 = idxs_st[b]
idxs_ed1 = idxs_ed[b]
for i_wn in range(max_wn):
idxs_st11 = idxs_st1[i_wn]
idxs_ed11 = idxs_ed1[i_wn]
pr_wvi_beam11 = []
pair_idx = -1
for i_k in range(k_logit):
for j_k in range(k_logit):
pair_idx += 1
st = idxs_st11[i_k].item()
ed = idxs_ed11[j_k].item()
pr_wvi_beam11.append([st, ed])
p1 = prob_wv_st[b, i_wn, st]
p2 = prob_wv_ed[b, i_wn, ed]
prob_wvi_beam[b, i_wn, pair_idx] = p1*p2
pr_wvi_beam1.append(pr_wvi_beam11)
pr_wvi_beam.append(pr_wvi_beam1)
# prob
return pr_wvi_beam, prob_wvi_beam
def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu):
"""
- Convert to the string in whilte-space-separated tokens
- Add-hoc addition.
"""
pr_wv_str_wp = [] # word-piece version
pr_wv_str = []
for b, pr_wvi1 in enumerate(pr_wvi):
pr_wv_str_wp1 = []
pr_wv_str1 = []
wp_to_wh_index1 = wp_to_wh_index[b]
nlu_wp_t1 = nlu_wp_t[b]
nlu_t1 = nlu_t[b]
for i_wn, pr_wvi11 in enumerate(pr_wvi1):
st_idx, ed_idx = pr_wvi11
# Ad-hoc modification of ed_idx to deal with wp-tokenization effect.
# e.g.) to convert "butler cc (" ->"butler cc (ks)" (dev set 1st question).
pr_wv_str_wp11 = nlu_wp_t1[st_idx:ed_idx+1]
pr_wv_str_wp1.append(pr_wv_str_wp11)
st_wh_idx = wp_to_wh_index1[st_idx]
ed_wh_idx = wp_to_wh_index1[ed_idx]
pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx+1]
pr_wv_str1.append(pr_wv_str11)
pr_wv_str_wp.append(pr_wv_str_wp1)
pr_wv_str.append(pr_wv_str1)
return pr_wv_str, pr_wv_str_wp
def merge_wv_t1_eng(where_str_tokens, NLq):
"""
Almost copied of SQLNet.
The main purpose is pad blank line while combining tokens.
"""
nlq = NLq.lower()
where_str_tokens = [tok.lower() for tok in where_str_tokens]
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789$'
special = {'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'``': '"',
'\'\'': '"',
}
# '--': '\u2013'} # this generate error for test 5661 case.
ret = ''
double_quote_appear = 0
for raw_w_token in where_str_tokens:
# if '' (empty string) of None, continue
if not raw_w_token:
continue
# Change the special characters
# maybe necessary for some case?
w_token = special.get(raw_w_token, raw_w_token)
# check the double quote
if w_token == '"':
double_quote_appear = 1 - double_quote_appear
# Check whether ret is empty. ret is selected where condition.
if len(ret) == 0:
pass
# Check blank character.
elif len(ret) > 0 and ret + ' ' + w_token in nlq:
# Pad ' ' if ret + ' ' is part of nlq.
ret = ret + ' '
elif len(ret) > 0 and ret + w_token in nlq:
pass # already in good form. Later, ret + w_token will performed.
# Below for unnatural question I guess. Is it likely to appear?
elif w_token == '"':
if double_quote_appear:
ret = ret + ' ' # pad blank line between next token when " because in this case, it is of closing apperas
# for the case of opening, no blank line.
elif w_token[0] not in alphabet:
pass # non alphabet one does not pad blank line.
# when previous character is the special case.
elif (ret[-1] not in ['(', '/', '\u2013', '#', '$', '&']) and (ret[-1] != '"' or not double_quote_appear):
ret = ret + ' '
ret = ret + w_token
return ret.strip()
|
DebadityaPal/RoBERTa-NL2SQL
|
seq2sql_model_internal_functions.py
|
seq2sql_model_internal_functions.py
|
py
| 11,929 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.rnn.pack_padded_sequence",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.rnn.pad_packed_sequence",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 275,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.