seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
447397564
|
from ....schema import Schema
from ....exc import InvalidOperationError
from enum import Enum, auto
class SchemaType(Enum):
CATALOG = auto()
SCHEMA = auto()
class Dialect:
"""
A generic SQL dialect attempting to conform to the SQL standard rather than any particular DBMS. This can be used
when the specific dialect of the underlying database is not known, or as a base class for other dialects to
avoid duplicating common functionality.
"""
@classmethod
def get_schema_query(cls, schema: Schema):
"""
Get a query that will pull a list of database objects contained in the given schema.
:param schema: The schema whose contents to pull.
:return: A 2-tuple of the SQL query to retrieve the contents of the given schema and a list of its arguments.
"""
if schema.internal_type is None:
# the root; fetch catalogs
# this will actually not fetch a complete list of catalogs on any DBMS that I'm aware of,
# but I don't believe there is a widely-supported standard way to obtain such a list.
return 'SELECT DISTINCT CATALOG_NAME FROM INFORMATION_SCHEMA.SCHEMATA;', []
elif schema.internal_type is SchemaType.CATALOG:
# obtain a list of schemata
return 'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE CATALOG_NAME = {};', [schema.name]
elif schema.internal_type is SchemaType.SCHEMA:
# list tables, views, and routines in the schema
return '''
SELECT TABLE_NAME, TABLE_TYPE
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_CATALOG = {} AND TABLE_SCHEMA = {}
UNION ALL
SELECT ROUTINE_NAME, ROUTINE_TYPE
FROM INFORMATION_SCHEMA.ROUTINES
WHERE ROUTINE_CATALOG = {} AND ROUTINE_SCHEMA = {};
''', [schema.parent.name, schema.name, schema.parent.name, schema.name]
raise InvalidOperationError(f'Unknown schema type {schema.internal_type}')
@classmethod
def parse_schema_results(cls, schema: Schema, results):
objects = None
for result_set in results:
# there should be only one
objects = result_set
if schema.internal_type is None:
# objects is a list of catalogs
return [Schema(catalog[0], schema, SchemaType.CATALOG) for catalog in objects]
elif schema.internal_type is SchemaType.CATALOG:
# objects is a list of schemata
return [Schema(s[0], schema, SchemaType.SCHEMA) for s in objects]
elif schema.internal_type is SchemaType.SCHEMA:
# objects is a list tables, views, and routines
raise NotImplementedError('Tables, views, and routines are not yet implemented')
raise InvalidOperationError(f'Unknown schema type {schema.internal_type}')
| null |
ormr/drivers/sql/dialects/generic.py
|
generic.py
|
py
| 2,899 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "enum.Enum",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "enum.auto",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "schema.Schema",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "schema.internal_type",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "schema.internal_type",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "schema.name",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "schema.internal_type",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "schema.parent",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "schema.name",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "exc.InvalidOperationError",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "schema.internal_type",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "schema.Schema",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "schema.internal_type",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "schema.Schema",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "schema.internal_type",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "schema.Schema",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "schema.internal_type",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "exc.InvalidOperationError",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "schema.internal_type",
"line_number": 66,
"usage_type": "attribute"
}
] |
22839791
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from blog.models import BlogData
from login import userlogin
def index(request):
blogtoplist = list(BlogData.objects.all().order_by('-id')[:10])
return render_to_response("blog/index.html",
{
"blogtoplist":blogtoplist,
#"user_info":userlogin.get_seesion_userlogin(request)
},
context_instance=RequestContext(request)
)
def alllist(request):
bloglist = list(BlogData.objects.all().order_by('-id'));
return render_to_response("blog/alllist.html",
{
"bloglist":bloglist,
#"user_info":userlogin.get_seesion_userlogin(request)
},
context_instance=RequestContext(request)
)
def post(request,blog_id):
blogdata = BlogData.objects.get(pk=blog_id)
return render_to_response("blog/post.html",
{
"blogdata":blogdata,
#"user_info":userlogin.get_seesion_userlogin(request)
},
context_instance=RequestContext(request)
)
| null |
blog/views.py
|
views.py
|
py
| 1,062 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "blog.models.BlogData.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "blog.models.BlogData.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "blog.models.BlogData",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "blog.models.BlogData.objects.all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "blog.models.BlogData.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "blog.models.BlogData",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "blog.models.BlogData.objects.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "blog.models.BlogData.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "blog.models.BlogData",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 33,
"usage_type": "call"
}
] |
499193655
|
"""empty message
Revision ID: dadec3d6230a
Revises: 8a60cf7d738c
Create Date: 2020-02-12 15:44:34.235426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dadec3d6230a'
down_revision = '8a60cf7d738c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('userdata',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Text(), nullable=True),
sa.Column('email', sa.Text(), nullable=True),
sa.Column('raidid', sa.ARRAY(sa.Text()), nullable=True),
sa.Column('raidname', sa.ARRAY(sa.Text()), nullable=True),
sa.Column('pwhash', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('userdata')
# ### end Alembic commands ###
| null |
xivtools.db/migrations/versions/dadec3d6230a_.py
|
dadec3d6230a_.py
|
py
| 966 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ARRAY",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ARRAY",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 35,
"usage_type": "name"
}
] |
626465780
|
#! /usr/bin/env python
# all constants containing a value "set to ..." are to be customized
#
import smtplib, sys, datetime, time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import datetime, glob, os
EMAIL_EDITOR_FROM = "[email protected]"
EMAIL_EDITOR_FROM_NAME = "EVIDENCE Project: Forensics Tools Catalogue - Editor"
EMAIL_APPROVAL_FROM_NAME = "EVIDENCE Project: Forensics Tools Catalogue - Editor Approval"
EMAIL_EDITOR_ADMIN = "...emailUserAdmin"
EMAIL_EDITOR_ADMIN_PWD = "...emailUserAdminPassword"
EMAIL_SMTP_HOST = "...smtpHost"
EMAIL_SMTP_PORT = 465
oggi = datetime.datetime.today()
sDay = str(oggi.day)
sMonth = str(oggi.month)
sYear = str(oggi.year)
if len(sDay) < 2:
sDay = '0' + sDay
if len(sMonth) < 2:
sMonth = '0' + sMonth
sOggi = sYear + sMonth + sDay
sOra = time.strftime("%H:%M:%S")
fLog = open(os.getcwd() + "/debug/dfte.send.mail.log", "a")
if len(sys.argv) < 4:
fLog.write(sOggi + ' ' + sOra + ' - Usage: ' + sys.argv[0] + ' toolName userTo userToEmail' + "\n")
sys.exit(100);
toolName = sys.argv[1]
userName = sys.argv[2]
userEmail = sys.argv[3]
emailMsg = MIMEMultipart('alternative')
emailMsg['Subject'] = EMAIL_APPROVAL_FROM_NAME
emailMsg['From'] = EMAIL_EDITOR_FROM
emailMsg['To'] = userEmail
#emailMsg['Cc'] = timeUser
#emailMsg['Bcc'] = "[email protected]"
emailBody = "<p>Dear " + userName + "<br/><br/>The following tool </p>"
emailBody += "<strong>" + toolName + "</strong><br/><br/> has just been approved! <br/><br/>"
emailBody += "From now on it is available on the online Forensics Catalogue <br/><br/> ";
emailBody += "<blockquote>wp4.evidenceproject.eu </blockquote><br/><br/>Best Regards";
part = MIMEText(emailBody, 'html')
emailMsg.attach(part)
try:
#--- server = smtplib.SMTP("smtp.gmail.com", 587)
server = smtplib.SMTP_SSL(EMAIL_SMTP_HOST, EMAIL_SMTP_PORT)
server.login(EMAIL_EDITOR_ADMIN, EMAIL_EDITOR_ADMIN_PWD)
server.sendmail(emailMsg['From'], [emailMsg['To'], emailMsg['Cc'], emailMsg['Bcc']], emailMsg.as_string())
server.quit()
fLog.write(sOggi + ' ' + sOra + ' - successfully sent the approval mail: ' + sys.argv[1] + ' ' + sys.argv[2] + '\n')
except:
fLog.write(sOggi + ' ' + sOra + ' - failed to send the approval mail: ' + sys.argv[1] + ' ' + sys.argv[2] + '\n')
fLog.close()
| null |
tools/default.dfte.send.mail.ssl.approval.py
|
default.dfte.send.mail.ssl.approval.py
|
py
| 2,348 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.today",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 71,
"usage_type": "attribute"
}
] |
578273215
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
import BF_functions as bff
'''
Program to extract radial velocities from a double-lined binary star spectrum.
Uses the Broadening Function technique.
Meredith Rawls
2014-2015
Based loosely on Rucinski's BFall_IDL.pro, and uses the PyAstronomy tools.
http://www.astro.utoronto.ca/~rucinski/BFdescription.html
http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/svd.html
In practice, you will run this twice: once to do the initial BF, and then again
to properly fit the peaks of each BF with a Gaussian.
INPUT
infiles: single-column file with one FITS or TXT filename (w/ full path) per line
1st entry must be for the template star (e.g., arcturus or phoenix model)
(the same template is used to find RVs for both stars)
NO comments are allowed in this file
FUN FACT: unless APOGEE, these should be continuum-normalized to 1 !!!
bjdinfile: columns 0,1,2 must be filename, BJD, BCV (e.g., from IRAF bcvcorr)
top row must be for the template star (e.g., arcturus)
(the 0th column is never used, but typically looks like infiles_BF.txt)
one line per observation
comments are allowed in this file using #
gausspars: your best initial guesses for fitting gaussians to the BF peaks
the parameters are [amp1, offset1, width1, amp2, offset2, width2]
the top line is ignored (template), but must have six values
one line per observation
comments are allowed in this file using #
OUTPUT
outfile: a file that will be created with 8 columns: BJD midpoint, orbital phase,
Kepler BJD, RV1, RV1 error, RV2, RV2 error
bfoutfile: a file that contains all the BF function data (raw RV, BF, gaussian model)
IMMEDIATELY BELOW, IN THE CODE
You need to specify whether you have APOGEE (near-IR) or "regular" (e.g., ARCES)
spectra with the 'isAPOGEE' flag. You also need to set the binary's PERIOD and BJD0,
both in days, and the constant RV and BCV of whatever template you are using.
'''
##########
# YOU NEED TO HAVE THESE INPUT FILES !!!
# THE OUTPUT FILE WILL BE CREATED FOR YOU
# EXAMPLE INFILES AND OUTFILES
#infiles = 'infiles.txt'; bjdinfile = 'bjdinfile.txt'
#gausspars = 'gausspars.txt'
#outfile = 'rvoutfile.txt'; bfoutfile = 'bfoutfile.txt'; bfgaussout = 'bfgaussout.txt'
#4851217
#infiles = 'data/4851217/4851217infiles.txt'; bjdinfile = 'data/4851217/4851217bjdinfile.txt'
#gausspars = 'data/4851217/4851217gausspars.txt'
#outfile = 'data/4851217/4851217Outfile.txt'; bfoutfile = 'data/4851217/4851217BFOut.txt'
#5285607
#infiles = 'data/5285607/5285607infiles.txt'; bjdinfile = 'data/5285607/5285607bjdinfile.txt'
#gausspars = 'data/5285607/5285607gausspars.txt'
#outfile = 'data/5285607/5285607OutfileJC.txt'; bfoutfile = 'data/5285607/5285607BFOut1.txt';
#gaussoutfile = 'data/5285607/5285607gaussout.txt'; areaout = 'data/5285607/5285607BFArea.txt'
#5285607 APSTAR ORDER
#infiles = 'data/5285607/5285607infilesApstar.txt'; bjdinfile = 'data/5285607/5285607bjdinfileApstar.txt'
#gausspars = 'data/5285607/5285607gaussparsApstar.txt'
#outfile = 'data/5285607/5285607OutfileApstar.txt'; bfoutfile = 'data/5285607/5285607BFOutApstar.txt'
#4075064
#infiles = 'data/4075064/4075064infiles.txt'; bjdinfile = 'data/4075064/4075064bjdinfile.txt'
#gausspars = 'data/4075064/4075064gausspars.txt'
#outfile = 'data/4075064/4075064outfile.txt'; bfoutfile = 'data/4075064/4075064BFdata.txt'
#3848919
#infiles = 'data/3848919/3848919infiles.txt'; bjdinfile = 'data/3848919/3848919bjdinfile.txt'
#gausspars = 'data/3848919/3848919gausspars.txt'
#outfile = 'data/3848919/3848919outfile.txt'; bfoutfile = 'data/3848919/3848919BFdata.txt'
#6610219
#infiles = 'data/6610219/6610219infiles.txt'; bjdinfile = 'data/6610219/6610219bjdinfile.txt'
#gausspars = 'data/6610219/6610219gausspars1.txt'
#outfile = 'data/6610219/6610219outfile.txt'; bfoutfile = 'data/6610219/6610219BFOut.txt'
#4285087
#infiles = 'data/4285087/4285087infiles.txt'; bjdinfile = 'data/4285087/4285087bjdinfile.txt'
#gausspars = 'data/4285087/4285087gausspars.txt'
#outfile = 'data/4285087/4285087outfile.txt'; bfoutfile = 'data/4285087/4285087BFOut.txt'
#gaussoutfile = 'data/4285087/4285087gaussout.txt'; areaout = 'data/4285087/4285087BFArea.txt'
#6131659
#infiles = 'data/6131659/6131659infiles.txt'; bjdinfile = 'data/6131659/6131659bjdinfile.txt'
#gausspars = 'data/6131659/6131659gausspars.txt'
#outfile = 'data/6131659/6131659outfile.txt'; bfoutfile = 'data/6131659/6131659BFOut.txt'
#gaussoutfile = 'data/6131659/6131659gaussout.txt'; areaout = 'data/6131659/6131659BFArea.txt'
#6449358
infiles = 'data/6449358/6449358infiles.txt'; bjdinfile = 'data/6449358/6449358bjdinfile.txt'
gausspars = 'data/6449358/6449358gausspars.txt'
outfile = 'data/6449358/6449358Outfile.txt'; bfoutfile = 'data/6449358/6449358BFOut.txt'
gaussoutfile = 'data/6449358/6449358gaussout.txt'; areaout = 'data/6449358/6449358BFArea.txt'
#5284133
#infiles = 'data/5284133/5284133infiles.txt'; bjdinfile = 'data/5284133/5284133bjdinfile.txt'
#gausspars = 'data/5284133/5284133gausspars.txt'
#outfile = 'data/5284133/5284133Outfile.txt'; bfoutfile = 'data/5284133/5284133BFOut.txt'
#6778289
#infiles = 'data/6778289/6778289infiles.txt'; bjdinfile = 'data/6778289/6778289bjdinfiles.txt'
#gausspars = 'data/6778289/6778289gausspars.txt'
#outfile = 'data/6778289/6778289Outfile.txt'; bfoutfile = 'data/6778289/6778289BFOut.txt'
#gaussoutfile = 'data/6778289/6778289gaussout.txt'; areaout = 'data/6778289/6778289BFArea.txt'
#6781535 (Suspected Triple System)
#infiles = 'data/6781535/6781535infiles.txt'; bjdinfile = 'data/6781535/6781535bjdinfile.txt'
#gausspars = 'data/6781535/6781535gausspars.txt'
#outfile = 'data/6781535/6781535Outfile1.txt'; bfoutfile = 'data/6781535/6781535BFOut.txt'
#gaussoutfile = 'data/6781535/6781535gaussout.txt'; areaout = 'data/6781535/6781535BFArea.txt'
#6864859
#infiles = 'data/6864859/6864859infiles.txt'; bjdinfile = 'data/6864859/6864859bjdinfile.txt'
#gausspars = 'data/6864859/6864859gausspars.txt'
#outfile = 'data/6864859/6864859Outfile.txt'; bfoutfile = 'data/6864859/6864859BFOut.txt'
#gaussoutfile = 'data/6864859/6864859gaussout.txt'; areaout = 'data/6864859/6864859BFArea.txt'
#3247294
#infiles = 'data/3247294/3247294infiles.txt'; bjdinfile = 'data/3247294/3247294bjdinfile.txt'
#gausspars = 'data/3247294/3247294gausspars.txt'
#outfile = 'data/3247294/3247294Outfile.txt'; bfoutfile = 'data/3247294/3247294BFOut.txt'
# ORBITAL PERIOD AND ZEROPOINT !!!
#period = 2.47028; BJD0 = 2455813.69734 # 4851217
#period = 3.8994011; BJD0 = 2454959.576010 # 5285607
period = 5.7767904; BJD0 = 2454955.073410 # 6449358
#period = 8.7845759; BJD0 = 245800.46231 #5284133
#period = 30.13015; BJD0 = 2456557.73097 #6778289
#period = 9.1220856; BJD0 = 2454971.834534 #6781535
#period = 40.8778427; BJD0 = 2454955.556300 #6864859
#period = 61.4228063; BJD0 = 2455813.69734 #4075064
#period = 1.0472603; BJD0 = 2455811.61005 #3848919
#period = 11.3009948; BJD0 = 2456557.73097 #6610219
#period = 4.4860312; BJD0 = 2454966.450124 #4285087
#period = 17.5278303; BJD0 = 2454960.041397 #6131659
#period = 67.4188276; BJD0 = 2454966.433454 #3247294
# STUFF YOU NEED TO DEFINE CORRECTLY !!!
# if you are fitting three gaussians, you had better give 3 sets of amplimits and widlimits
isAPOGEE = True # toggle to use near-IR stuff, or not
SpecPlot = False # toggle to plot spectra before BFs, or not
bjdoffset = 2454833. # difference between real BJDs and 'bjdfunny' (truncated BJDs)
amplimits = [0,1.2, 0,1.2, 0,1.2] # limits for gaussian normalized amplitude [min1,max1,min2,max2]
threshold = 10 # margin for gaussian position (raw RV in km/s)
#widlimits = [0,25, 0,22] # limits for gaussian width (km/s) [min1,max1,min2,max2]
# ^^^ widlimits IS NOW SPECIFIED ON A PER-STAR BASIS BELOW
# RADIAL VELOCITY AND BCV INFO FOR TEMPLATE (km/s; set both to 0 if using a model !!!)
rvstd = 0; bcvstd = 0 # model template
# PARAMETERS FOR THE BROADENING FUNCTION (IMPORTANT PAY ATTENTION !!!)
smoothstd = 1.5 # stdev of Gaussian to smooth BFs by (~slit width in pixels)
#w00 = 5400 # starting wavelength for new grid
#n = 38750 # number of wavelength points for new grid
#stepV = 1.7 # roughly 3e5 / (max_wavelength / wavelength_step) km/s, rounded down
m = 401 # length of the BF (must be longer if RVs are far from 0)
## good values for APOGEE:
#w00 = 15170; n = 32000; stepV = 1.0 # all of APOGEE, (too) high res
#w00 = 15170; n = 10000; stepV = 1.5 # all of APOGEE, still pretty high res
w00 = 15170; n = 10000; stepV = 2.0 # all of APOGEE, still pretty high res
#w00 = 15170; n = 6000; stepV = 4.0 # a little piece of APOGEE (lower res, apStar)
# CUSTOMIZED BF WIDTH (for gausspars) AND PLOT LIMITS
#widlimits = [0,15, 0,15]; rvneg = -100; rvpos = 300; ymin = -0.15; ymax = 1.19 # good starting default
#widlimits = [0,9, 0,7, 0,9]; rvneg = 0; rvpos = 149; ymin = -0.15; ymax = 1.19 # 3247294 #weird tripe only one panel
#widlimits = [0,9, 0,10, 0,9]; rvneg = -75; rvpos = 175; ymin = -0.15; ymax = 1.18 # 6781535
#widlimits = [0,9, 0,9, 0,11]; rvneg = 0; rvpos = 200; ymin = -0.15; ymax = 1.19 # 6131659
#widlimits = [0,9, 0,7]; rvneg = -300; rvpos = 300; ymin = -0.15; ymax = 1.19 # 6131659 Xtra large
#widlimits = [0,13, 0,13]; rvneg = -50; rvpos = 249; ymin = -0.15; ymax = 1.19 # 4285087
#widlimits = [0,18, 0,19]; rvneg = -70; rvpos = 270; ymin = -0.15; ymax = 1.19 # 5285607
#widlimits = [0,16, 0,11]; rvneg = -300; rvpos = 500; ymin = -0.15; ymax = 1.2 #6449358 extra wide
widlimits = [0,16, 0,11]; rvneg = -50; rvpos = 199; ymin = -0.15; ymax = 1.2 #6449358
#widlimits = [0,12, 0,8]; rvneg = -45; rvpos = 199; ymin = -0.15; ymax = 1.4 #6778289
#widlimits = [0,11, 0,10]; rvneg = 30; rvpos = 170; ymin = -0.15; ymax = 1.19 # 6864859
#widlimits = [0,9, 0,9]; rvneg = -150; rvpos = 50; ymin = -0.15; ymax = 1.19 # 6610259a
#widlimits = [0,15, 0,15]; rvneg = -50; rvpos = 10; ymin = -0.15; ymax = 1.19 # 6610219b
colors = bff.user_rc()
print('Welcome to the Broadening Function party!')
print('')
print('MAKE SURE THIS IS WHAT YOU WANT:')
print('You set Porb = {0} days, BJD0 = {1} days'.format(period, BJD0))
# CREATE NEW SPECTRUM IN LOG SPACE
# This uses w00, n, and stepV, defined above. The new wavelength grid is w1.
# The BF will be evenly spaced in velocity with length m.
# The velocity steps are r (km/s/pix).
w1, m, r = bff.logify_spec(isAPOGEE, w00, n, stepV, m)
# READ IN ALL THE THINGS
specdata = bff.read_specfiles(infiles, bjdinfile, isAPOGEE)
nspec = specdata[0]; filenamelist = specdata[1]
datetimelist = specdata[2]; wavelist = specdata[3]; speclist = specdata[4]
# INTERPOLATE THE TEMPLATE AND OBJECT SPECTRA ONTO THE NEW LOG-WAVELENGTH GRID
# OPTION TO PLOT THIS
newspeclist = []
yoffset = 0
if SpecPlot == True:
plt.axis([w1[0], w1[-1], 0, nspec+3])
plt.xlabel(r'Wavelength ({\AA})')
for i in range (0, nspec):
newspec = np.interp(w1, wavelist[i], speclist[i])
newspeclist.append(newspec)
if SpecPlot == True:
if i == 0: # plot template in red
plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10], color=colors[6], marker='.')
else: # plot the rest in blue
plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10], color=colors[0], marker='.')
yoffset = yoffset + 1
if SpecPlot == True:
##plt.legend()
plt.show()
# BROADENING FUNCTION TIME
svd = pyasl.SVD()
# Single Value Decomposition
svd.decompose(newspeclist[0], m)
singularvals = svd.getSingularValues()
bflist = []
bfsmoothlist = []
for i in range (0, nspec):
# Obtain the broadening function
bf = svd.getBroadeningFunction(newspeclist[i]) # this is a full matrix
bfarray = svd.getBroadeningFunction(newspeclist[i], asarray=True)
# Smooth the array-like broadening function
# 1ST LINE - python 2.7 with old version of pandas; 2ND LINE - python 3.5 with new version of pandas
#bfsmooth = pd.rolling_window(bfarray, window=5, win_type='gaussian', std=smoothstd, center=True)
bfsmooth = pd.Series(bfarray).rolling(window=5, win_type='gaussian', center=True).mean(std=smoothstd)
# The rolling window makes nans at the start because it's a punk.
for j in range(0,len(bfsmooth)):
if np.isnan(bfsmooth[j]) == True:
bfsmooth[j] = 0
else:
bfsmooth[j] = bfsmooth[j]
bflist.append(bf)
bfsmoothlist.append(bfsmooth)
bfnormlist = []
for a in bfsmoothlist:
bfnormlist.append((a-np.min(a))/(np.max(a)-np.min(a)))
# Obtain the indices in RV space that correspond to the BF
bf_ind = svd.getRVAxis(r, 1) + rvstd - bcvstd
# OPTION TO PLOT THE SINGULAR VALUES TO SEE WHERE THEY AREN'T A MESS
# this probably isn't important, because instead of choosing which values to throw out,
# we use "Route #2" as described by Rucinski and just use the final row of the BF array
# and smooth it with a Gaussian to get rid of noise problems.
# for more info, seriously, read http://www.astro.utoronto.ca/~rucinski/SVDcookbook.html
##plt.figure(2)
#plt.semilogy(singularvals, 'b-')
#plt.xlabel('BF Index')
#plt.ylabel('Singular Values')
#plt.show()
# OPTION TO PLOT THE SMOOTHED BFs
plt.axis([rvneg, rvpos, -0.2, float(nspec)+1])
plt.xlabel('Radial Velocity (km s$^{-1}$)')
plt.ylabel('Broadening Function (arbitrary amplitude)')
yoffset = 0.0
for i in range(1, nspec):
plt.plot(bf_ind, bfnormlist[i]+yoffset, color=colors[0], marker='.')
plt.axhline(y=yoffset, color=colors[15], ls=':')
yoffset = yoffset + 1.0
plt.show()
# FIT THE SMOOTHED BF PEAKS WITH TWO GAUSSIANS
# you have to have pretty decent guesses in the gausspars file for this to work.
bffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfnormlist, bf_ind, amplimits, threshold, widlimits)
rvraw1 = []; rvraw2 = []; rvraw1_err = []; rvraw2_err = []; rvraw3 = []; rvraw3_err = []
rvraw1.append(0); rvraw2.append(0); rvraw3.append(0)
rvraw1_err.append(0); rvraw2_err.append(0), rvraw3_err.append(0)
for i in range(1, len(bffitlist)):
rvraw1.append(bffitlist[i][0][1]) # indices are [visit][parameter, BF, error array][amp,rv,width x N]
rvraw2.append(bffitlist[i][0][4]) # [0,1,2] is amp,rv,width for star1; [3,4,5] is same for star2, etc.
if len(bffitlist[i][0]) == 9:
rvraw3.append(bffitlist[i][0][7])
else:
rvraw3.append(None)
rvraw1_err.append(bffitlist[i][2][1])
rvraw2_err.append(bffitlist[i][2][4])
if len(bffitlist[i][2]) == 9:
rvraw3_err.append(bffitlist[i][2][7])
else:
rvraw3_err.append(None)
rvrawlist = [rvraw1, rvraw1_err, rvraw2, rvraw2_err, rvraw3, rvraw3_err]
# CALCULATE ORBITAL PHASES AND FINAL RV CURVE
rvdata = bff.rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvrawlist, rvstd, bcvstd)
phase = rvdata[0]; bjdfunny = rvdata[1]
rvfinals = rvdata[2]
g2 = open(outfile, 'w')
print('# RVs calculated with BF_python.py', file=g2)
print('#', file=g2)
print('# Porb = {0} days, BJD0 = {1} days'.format(period, BJD0), file=g2)
print('# Wavelength axis = [{0} - {1}] Angstroms'.format(w1[0], w1[-1]), file=g2)
print('#', file=g2)
print('# Template spectrum (line 0 of infiles): {0}'.format(filenamelist[0]), file=g2)
print('# RV of template, BCV of template (km/s): {0}, {1}'.format(rvstd, bcvstd), file=g2)
print('#', file=g2)
print('# List of all input spectra (infiles): {0}'.format(infiles), file=g2)
print('# Target BJD and BCV info (bjdinfile): {0}'.format(bjdinfile), file=g2)
print('# Gaussian fit guesses (gausspars): {0}'.format(gausspars), file=g2)
print('#', file=g2)
print('# BF parameters: w00 = {0}; n = {1}; stepV = {2}'.format(w00, n, stepV), file=g2)
print('# BF parameters: smoothstd = {0}; m = {1}'.format(smoothstd, m), file=g2)
print('# gaussfit: amplimits = {0}; threshold = {1}, widlimits = {2}'.format(amplimits, threshold, widlimits), file=g2)
print('#', file=g2)
print('# time, phase, adjusted_time, RV1 [km/s], error1 [km/s], RV2 [km/s], error2 [km/s]', file=g2)
print('#', file=g2)
for i in range(1, nspec):
if rvfinals[4][i] and rvfinals[5][i]:
print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f %.5f %.5f' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i],
rvfinals[0][i], rvfinals[1][i], rvfinals[2][i], rvfinals[3][i], rvfinals[4][i], rvfinals[5][i]), file=g2)
else:
print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f %s %s' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i],
rvfinals[0][i], rvfinals[1][i], rvfinals[2][i], rvfinals[3][i], 'nan', 'nan'), file=g2)
g2.close()
print('BJD, phase, and RVs written to %s.' % outfile)
print('Use rvplotmaker.py to plot the RV curve.')
try:
bfout = open(bfoutfile, 'w')
for idx in range(1, nspec):
print('###', file=bfout)
print('# timestamp: {0}'.format(datetimelist[idx]), file=bfout)
print('# Gaussian 1 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][0], rvraw1[i], rvraw1_err[i], bffitlist[i][0][2]), file=bfout)
print('# Gaussian 2 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][3], rvraw2[i], rvraw2_err[i], bffitlist[i][0][5]), file=bfout)
print('# Uncorrected_RV, BF_amp, Gaussian_fit', file=bfout)
print('###', file=bfout)
for vel, amp, modamp in zip(bf_ind, bfsmoothlist[idx], bffitlist[idx][1]):
print(vel, amp, modamp, file=bfout)
bfout.close()
except:
print('No BF outfile specified, not saving BF data to file')
try:
gout = open(gaussoutfile, 'w')
for idx in range(1, nspec):
print('P Amplitude: {0} +/- {1} width {2} xmax {3}'.format(bffitlist[idx][0][0], bffitlist[idx][2][0], bffitlist[idx][0][2], bffitlist[idx][0][1]), file=gout)
print('S Amplitude: {0} +/- {1} width {2} xmax {3}'.format(bffitlist[idx][0][3], bffitlist[idx][2][3], bffitlist[idx][0][5], bffitlist[idx][0][4]), file=gout)
#print('T Amplitude: {0} +/- {1} width {2} xmax {3}'.format(bffitlist[idx][0][6], bffitlist[idx][2][6], bffitlist[idx][0][8], bffitlist[idx][0][7]), file=gout)
#print(bffitlist[idx][0][6])
gout.close()
except:
print('No gaussoutfile specified, not saving gauss data to file')
# handy little gaussian function maker
def gaussian(x, amp, mu, sig): # i.e., (xarray, amp, rv, width)
return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
# PLOT THE FINAL SMOOTHED BFS + GAUSSIAN FITS IN INDIVIDUAL PANELS
# manually adjust this multi-panel plot based on how many spectra you have
windowcols = 3 # 4 # how many columns the plot should have
#windowrows = 3
#windowrows = 8 #6864859 manually set number of plot rows here, or automatically below
#windowrows = 8 #6778289
#windowrows = 10
windowrows = int([np.rint((nspec-1)/windowcols) if (np.float(nspec-1)/windowcols)%windowcols == 0 else np.rint((nspec-1)/windowcols)+1][0])
xmin = rvneg
xmax = rvpos
#fig = plt.figure(1, figsize=(15,12))
#fig = plt.figure(1, figsize=(15,7))
fig = plt.figure(1, figsize=(15,5)) #5285607 (6 Visits)
fig.text(0.5, 0.04, 'Uncorrected Radial Velocity (km s$^{-1}$)', ha='center', va='center', size='large')
fig.text(0.07, 0.5, 'Broadening Function', ha='center', va='center', size='large', rotation='vertical')
for i in range (1, nspec):
ax = fig.add_subplot(windowrows, windowcols, i) # out of range if windowcols x windowrows < nspec
ax.yaxis.set_major_locator(MultipleLocator(0.4)) #increments of y axis tic marks
if windowcols == 4 and (i!=1 and i!=5 and i!=9 and i!=13 and i!=17 and i!=21 and i!=25):
ax.set_yticklabels(())
if windowcols == 3 and (i!=1 and i!=4 and i!=7 and i!=10 and i!=13 and i!=16 and i!=19 and i!=22 and i!=25):
ax.set_yticklabels(())
if i < nspec-windowcols:
ax.set_xticklabels(())
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis([xmin, xmax, ymin, ymax])
plt.tick_params(axis='both', which='major')
plt.text(xmax - 0.19*(np.abs(xmax-xmin)), 0.60*ymax, '%.3f $\phi$' % (phase[i]), size='small')
plt.text(xmax - 0.26*(np.abs(xmax-xmin)), 0.35*ymax, '%s' % (datetimelist[i].iso[0:10]), size='small')
#plt.plot(bf_ind, bfsmoothlist[i], color=colors[14], lw=1.5, ls='-', label='Smoothed BF')
plt.plot(bf_ind, bfnormlist[i], color=colors[14], lw=2, ls='-', label='Normalized Smoothed BF')
plt.plot(bf_ind, bffitlist[i][1], color=colors[0], lw=2, ls='-', label='Two Gaussian fit')
#gauss1 = gaussian(bf_ind, bffitlist[i][0][0], bffitlist[i][0][1], bffitlist[i][0][2])
#gauss2 = gaussian(bf_ind, bffitlist[i][0][3], bffitlist[i][0][4], bffitlist[i][0][5])
plt.plot(rvraw1[i], 0.1, color=colors[6], marker='|', ms=15)#, label='RV 1')
plt.plot(rvraw2[i], 0.1, color=colors[2], marker='|', ms=15)#, label='RV 2')
if rvraw3[i] is not None:
plt.plot(rvraw3[i], 0.1, color=colors[8], marker='|', ms=15)#, label='RV 3')
#plt.plot(bf_ind, gauss1, color=colors[6], lw=3, ls='--')#, label='Gaussian fit 1')
#plt.plot(bf_ind, gauss2, color=colors[2], lw=3, ls='--')#, label='Gaussian fit 2')
# OPTION TO PLOT VERTICAL LINE AT ZERO
#plt.axvline(x=0, color=colors[15])
# MAKE A LEGEND
#ax.legend(bbox_to_anchor=(2.5,0.7), loc=1, borderaxespad=0.,
# frameon=False, handlelength=3, prop={'size':18})
if nspec - 1 == windowcols * (windowrows - 1): # square plot, you must adjust the rows for room
# in this situation, the legend is printed below the final subplot
if i==nspec-1:
ax.legend(bbox_to_anchor=(0.5,-1.2), loc=4, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':16})
else:
# in this situation, the legend is printed to the right of the final subplot
if i==nspec-1:
ax.legend(bbox_to_anchor=(2.1,0.7), loc=1, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':18})
plt.show()
#fig.savefig('3247294bfrv.png')
#fig.savefig('3247294bfrv.eps')
| null |
rvs/BFgaussout.py
|
BFgaussout.py
|
py
| 22,498 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "BF_functions.user_rc",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "BF_functions.logify_spec",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "BF_functions.read_specfiles",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "numpy.interp",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "PyAstronomy.pyasl.SVD",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "PyAstronomy.pyasl",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "pandas.Series",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "BF_functions.gaussparty",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "BF_functions.rvphasecalc",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.MultipleLocator",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 401,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 435,
"usage_type": "name"
}
] |
318541348
|
'''
Created on 5 janv. 2016
@author: Nicolas R
'''
from django.shortcuts import render
from blindtest import context_processor
def render_page(request, template_name, data = {}, static_js_files = [], static_css_files = [], ext_js_files = [], ext_css_files = []):
context = context_processor.common(request)
context['css_files']['static'] += ['%s.css' % template_name] + static_css_files
context['js_files']['static'] += ['%s.js' % template_name] + static_js_files
context['css_files']['external'] += ext_css_files
context['js_files']['external'] += ext_js_files
context.update(data)
return render(request,
'%s.html' % template_name,
context)
def render_quizz(request, data, static_js_files = [], static_css_files = [], ext_js_files = [], ext_css_files = []):
return render_page(request,
'quizz',
data,
static_js_files = ['jquery.countdown360.min.js'] + static_js_files,
static_css_files = static_css_files,
ext_js_files = ext_js_files,
ext_css_files = ext_css_files)
| null |
blindtest/ui/utils.py
|
utils.py
|
py
| 1,194 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "blindtest.context_processor.common",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "blindtest.context_processor",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
}
] |
382722640
|
import serial #import serial library
import numpy #import numpy
import matplotlib.pyplot as plt #import matplotlib library
from drawnow import *
import array as arr
dist = [] #an array to put the arduino information before print
arduinoData = serial.Serial('/dev/ttyACM0', 115200) #comunication with Arduino Serial
plt.ion() #
count = 0
def makeFig():
plt.plot(distance, 'ro-')
while True:
while (arduinoData.inWaiting()==0): #segue executando o programa somente
pass #se existem dados para serem lidos
arduinoString = arduinoData.readline() #
#splitedArray = [float(s) for s in arduinoString.split(',')]
dist = float(arduinoString)
distance.append(dist)
drawnow(makeFig)
plt.pause(.000005)
count = count + 1
if(count>50):
distance.pop(0)
| null |
pendulo/ultrassom-plt_V3.py
|
ultrassom-plt_V3.py
|
py
| 775 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "serial.Serial",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
}
] |
175445864
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 4 16:01:48 2022
@author: bdobson
"""
from wsimod import nodes
from wsimod.arcs import arcs as arcs_mod
from tqdm import tqdm
from wsimod.nodes.land import ImperviousSurface
from wsimod.core import constants
from wsimod.core.core import WSIObj
from wsimod.nodes.nodes import QueueTank, Tank, ResidenceTank, Node
import os
os.environ['USE_PYGEOS'] = '0'
import sys
import inspect
import csv
import gzip
import yaml
from math import log10
from datetime import datetime
import dill as pickle
class to_datetime():
#TODO document and make better
def __init__(self, date_string):
"""Simple datetime wrapper that has key properties used in WSIMOD
components.
Args:
date_string (str): A string containing the date, expected in
format %Y-%m-%d or %Y-%m.
"""
self._date = self._parse_date(date_string)
def __str__(self):
return self._date.strftime("%Y-%m-%d")
def __repr__(self):
return self._date.strftime("%Y-%m-%d")
@property
def dayofyear(self):
return self._date.timetuple().tm_yday
@property
def day(self):
return self._date.day
@property
def year(self):
return self._date.year
@property
def month(self):
return self._date.month
def to_period(self, args = 'M'):
return to_datetime(f"{self._date.year}-{str(self._date.month).zfill(2)}")
def is_leap_year(self):
year = self._date.year
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _parse_date(self, date_string, date_format = "%Y-%m-%d %H:%M:%S"):
try:
return datetime.strptime(date_string, date_format)
except ValueError:
try:
return datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
try:
#Check if valid 'YYYY-MM' format
if len(date_string.split('-')[0]) == 4:
int(date_string.split('-')[0])
if len(date_string.split('-')[1]) == 2:
int(date_string.split('-')[1])
return date_string
except ValueError:
raise ValueError
def __eq__(self, other):
if isinstance(other, to_datetime):
return self._date == other._date
return False
def __hash__(self):
return hash(self._date)
class Model(WSIObj):
def __init__(self):
"""Object to contain nodes and arcs that provides a default
orchestration
Returns:
Model: An empty model object
"""
super().__init__()
self.arcs = {}
# self.arcs_type = {} #not sure that this would be necessary
self.nodes = {}
self.nodes_type = {}
def all_subclasses(cls):
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)])
self.nodes_type = [x.__name__ for x in all_subclasses(Node)] + ['Node']
self.nodes_type = set(getattr(nodes,x)(name='').__class__.__name__ for x in self.nodes_type).union(['Foul'])
self.nodes_type = {x : {} for x in self.nodes_type}
def get_init_args(self,cls):
"""
Get the arguments of the __init__ method for a class and its superclasses
"""
init_args = []
for c in cls.__mro__:
# Get the arguments of the __init__ method
args = inspect.getfullargspec(c.__init__).args[1:]
init_args.extend(args)
return init_args
def load(self, address, config_name = 'config.yml', overrides = {}):
with open(os.path.join(address, config_name), "r") as file:
data = yaml.safe_load(file)
for key, item in overrides.items():
data[key] = item
constants.POLLUTANTS =data['pollutants']
constants.ADDITIVE_POLLUTANTS =data['additive_pollutants']
constants.NON_ADDITIVE_POLLUTANTS =data['non_additive_pollutants']
constants.FLOAT_ACCURACY = float(data['float_accuracy'])
self.__dict__.update(Model().__dict__)
nodes = data['nodes']
for name, node in nodes.items():
if 'filename' in node.keys():
node['data_input_dict'] = read_csv(os.path.join(address, node['filename']))
del node['filename']
if 'surfaces' in node.keys():
for key, surface in node['surfaces'].items():
if 'filename' in surface.keys():
node['surfaces'][key]['data_input_dict'] = read_csv(os.path.join(address,surface['filename']))
del surface['filename']
node['surfaces'] = list(node['surfaces'].values())
arcs = data['arcs']
self.add_nodes(list(nodes.values()))
self.add_arcs(list(arcs.values()))
if 'dates' in data.keys():
self.dates = [to_datetime(x) for x in data['dates']]
def save(self, address, config_name = 'config.yml', compress = False):
"""Save the model object to a yaml file and input data to csv.gz format
in the directory specified
Args:
address (str): Path to a directory
config_name (str, optional): Name of yaml model file.
Defaults to 'model.yml'
"""
if not os.path.exists(address):
os.mkdir(address)
nodes = {}
if compress:
file_type = 'csv.gz'
else:
file_type = 'csv'
for node in self.nodes.values():
init_args = self.get_init_args(node.__class__)
special_args = set(['surfaces', 'parent', 'data_input_dict'])
node_props = {x : getattr(node, x) for x in set(init_args).difference(special_args)}
node_props['type_'] = node.__class__.__name__
node_props['node_type_override'] = repr(node.__class__).split('.')[-1].replace("'>","")
if 'surfaces' in init_args:
surfaces = {}
for surface in node.surfaces:
surface_args = self.get_init_args(surface.__class__)
surface_props = {x : getattr(surface, x) for x in set(surface_args).difference(special_args)}
surface_props['type_'] = surface.__class__.__name__
#Exceptions...
#TODO I need a better way to do this
del surface_props['capacity']
if set(['rooting_depth','pore_depth']).intersection(surface_args):
del surface_props['depth']
if 'data_input_dict' in surface_args:
if surface.data_input_dict:
filename = "{0}-{1}-inputs.{2}".format(node.name, surface.surface, file_type).replace("(", "_").replace(")", "_").replace("/", "_").replace(" ", "_")
write_csv(surface.data_input_dict,
{'node' : node.name,
'surface' : surface.surface},
os.path.join(address,filename),
compress = compress)
surface_props['filename'] = filename
surfaces[surface_props['surface']] = surface_props
node_props['surfaces'] = surfaces
if 'data_input_dict' in init_args:
if node.data_input_dict:
filename = "{0}-inputs.{1}".format(node.name,file_type)
write_csv(node.data_input_dict,
{'node' : node.name},
os.path.join(address, filename),
compress = compress)
node_props['filename'] = filename
nodes[node.name] = node_props
arcs = {}
for arc in self.arcs.values():
init_args = self.get_init_args(arc.__class__)
special_args = set(['in_port', 'out_port'])
arc_props = {x : getattr(arc, x) for x in set(init_args).difference(special_args)}
arc_props['type_'] = arc.__class__.__name__
arc_props['in_port'] = arc.in_port.name
arc_props['out_port'] = arc.out_port.name
arcs[arc.name] = arc_props
data = {'nodes' : nodes,
'arcs' : arcs,
'pollutants':constants.POLLUTANTS,
'additive_pollutants':constants.ADDITIVE_POLLUTANTS,
'non_additive_pollutants':constants.NON_ADDITIVE_POLLUTANTS,
'float_accuracy' : constants.FLOAT_ACCURACY}
if hasattr(self, 'dates'):
data['dates'] = [str(x) for x in self.dates]
def coerce_value(value):
conversion_options = {'__float__' : float,
'__iter__' : list,
'__int__' : int,
'__str__' : str,
'__bool__' : bool,
}
converted = False
for property, func in conversion_options.items():
if hasattr(value, property):
try:
yaml.safe_dump(func(value))
value = func(value)
converted = True
break
except:
raise ValueError(f"Cannot dump: {value} of type {type(value)}")
if not converted:
raise ValueError(f"Cannot dump: {value} of type {type(value)}")
return value
def check_and_coerce_dict(data_dict):
for key, value in data_dict.items():
if isinstance(value, dict):
check_and_coerce_dict(value)
else:
try:
yaml.safe_dump(value)
except yaml.representer.RepresenterError:
if hasattr(value, '__iter__'):
for idx, val in enumerate(value):
if isinstance(val, dict):
check_and_coerce_dict(val)
else:
value[idx] = coerce_value(val)
data_dict[key] = coerce_value(value)
check_and_coerce_dict(data)
write_yaml(address, config_name, data)
def load_pickle(self, fid):
"""Load model object to a pickle file, including the model states
Args:
fid (str): File address to load the pickled model from
Returns:
model (obj): loaded model
Example:
>>> # Load and run your model
>>> my_model.load(model_dir,config_name = 'config.yml')
>>> _ = my_model.run()
>>>
>>> # Save it including its different states
>>> my_model.save_pickle('model_at_end_of_run.pkl')
>>>
>>> # Load it at another time to resume the model from the end
>>> # of the previous run
>>> new_model = Model()
>>> new_model = new_model.load_pickle('model_at_end_of_run.pkl')
"""
file = open(fid,'rb')
return pickle.load(file)
def save_pickle(self, fid):
"""Save model object to a pickle file, including saving the model states
Args:
fid (str): File address to save the pickled model to
Returns:
message (str): Exit message of pickle dump
"""
file = open(fid, 'wb')
pickle.dump(self, file)
return file.close()
def add_nodes(self, nodelist):
"""Add nodes to the model object from a list of dicts, where
each dict contains all of the parameters for a node. Intended
to be called before add_arcs.
Args:
nodelist (list): List of dicts, where a dict is a node
"""
def all_subclasses(cls):
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)])
for data in nodelist:
name = data['name']
type_ = data['type_']
if 'node_type_override' in data.keys():
node_type = data['node_type_override']
del data['node_type_override']
else:
node_type = type_
if 'foul' in name:
#Absolute hack to enable foul sewers to be treated separate from storm
type_ = 'Foul'
if 'geometry' in data.keys():
del data['geometry']
del data['type_']
self.nodes_type[type_][name] = getattr(nodes,node_type)(**dict(data))
self.nodes[name] = self.nodes_type[type_][name]
self.nodelist = [x for x in self.nodes.values()]
def add_instantiated_nodes(self, nodelist):
"""Add nodes to the model object from a list of objects, where
each object is an already instantiated node object. Intended
to be called before add_arcs.
Args:
nodelist (list): list of objects that are nodes
"""
self.nodelist = nodelist
self.nodes = {x.name : x for x in nodelist}
for x in nodelist:
self.nodes_type[x.__class__.__name__][x.name] = x
def add_arcs(self, arclist):
"""Add nodes to the model object from a list of dicts, where
each dict contains all of the parameters for an arc.
Args:
arclist (list): list of dicts, where a dict is an arc
"""
river_arcs = {}
for arc in arclist:
name = arc['name']
type_ = arc['type_']
del arc['type_']
arc['in_port'] = self.nodes[arc['in_port']]
arc['out_port'] = self.nodes[arc['out_port']]
self.arcs[name] = getattr(arcs_mod,type_)(**dict(arc))
if arc['in_port'].__class__.__name__ in ['River', 'Node', 'Waste','Reservoir']:
if arc['out_port'].__class__.__name__ in ['River', 'Node', 'Waste','Reservoir']:
river_arcs[name] = self.arcs[name]
if any(river_arcs):
upstreamness = {x : 0 for x in self.nodes_type['Waste'].keys()}
upstreamness = self.assign_upstream(river_arcs, upstreamness)
self.river_discharge_order = []
for node in sorted(upstreamness.items(), key=lambda item: item[1],reverse=True):
if node[0] in self.nodes_type['River'].keys():
self.river_discharge_order.append(node[0])
def add_instantiated_arcs(self, arclist):
"""Add arcs to the model object from a list of objects, where
each object is an already instantiated arc object.
Args:
arclist (list): list of objects that are arcs.
"""
self.arclist = arclist
self.arcs = {x.name : x for x in arclist}
river_arcs = {}
for arc in arclist:
if arc.in_port.__class__.__name__ in ['River', 'Node', 'Waste','Reservoir']:
if arc.out_port.__class__.__name__ in ['River', 'Node', 'Waste','Reservoir']:
river_arcs[arc.name] = arc
upstreamness = {x : 0 for x in self.nodes_type['Waste'].keys()}
upstreamness = self.assign_upstream(river_arcs, upstreamness)
self.river_discharge_order = []
for node in sorted(upstreamness.items(), key=lambda item: item[1],reverse=True):
if node[0] in self.nodes_type['River'].keys():
self.river_discharge_order.append(node[0])
def assign_upstream(self, arcs, upstreamness):
"""Recursive function to trace upstream up
arcs to determine which are the most upstream
Args:
arcs (list): list of dicts where dicts are arcs
upstreamness (dict): dictionary contain nodes in
arcs as keys and a number representing upstreamness
(higher numbers = more upstream)
Returns:
upstreamness (dict): final version of upstreamness
"""
upstreamness_ = upstreamness.copy()
in_nodes = [x.in_port.name for x in arcs.values() if x.out_port.name in upstreamness.keys()]
ind = max(list(upstreamness_.values())) + 1
in_nodes = list(set(in_nodes).difference(upstreamness.keys()))
for node in in_nodes:
upstreamness[node] = ind
if upstreamness == upstreamness_:
return upstreamness
else:
upstreamness = self.assign_upstream(arcs,upstreamness)
return upstreamness
def debug_node_mb(self):
"""Simple function that iterates over nodes
calling their mass balance function
"""
for node in self.nodelist:
_ = node.node_mass_balance()
def default_settings(self):
"""Incomplete function that enables easy specification
of results storage
Returns:
(dict): default settings
"""
return {'arcs' : {'flows' : True,
'pollutants' : True},
'tanks' : {'storages' : True,
'pollutants' : True},
'mass_balance' : False}
def change_runoff_coefficient(self, relative_change, nodes = None):
"""Clunky way to change the runoff coefficient of a land node
Args:
relative_change (float): amount that the impervious area in the land
node is multiplied by (grass area is changed in compensation)
nodes (list, optional): list of land nodes to change the parameters of.
Defaults to None, which applies the change to all land nodes.
"""
#Multiplies impervious area by relative change and adjusts grassland accordingly
if nodes == None:
nodes = self.nodes_type['Land'].values()
if isinstance(relative_change,float):
relative_change = {x : relative_change for x in nodes}
for node in nodes:
surface_dict = {x.surface : x for x in node.surfaces}
if 'Impervious' in surface_dict.keys():
impervious_area = surface_dict['Impervious'].area
grass_area = surface_dict['Grass'].area
new_impervious_area = impervious_area * relative_change[node]
new_grass_area = grass_area + (impervious_area - new_impervious_area)
if new_grass_area < 0:
print('not enough grass')
break
surface_dict['Impervious'].area = new_impervious_area
surface_dict['Impervious'].capacity *= relative_change[node]
surface_dict['Grass'].area = new_grass_area
surface_dict['Grass'].capacity *= (new_grass_area / grass_area)
for pol in constants.ADDITIVE_POLLUTANTS + ['volume']:
surface_dict['Grass'].storage[pol] *= (new_grass_area / grass_area)
for pool in surface_dict['Grass'].nutrient_pool.pools:
for nutrient in pool.storage.keys():
pool.storage[nutrient] *= (new_grass_area / grass_area)
def run(self,
dates = None,
settings = None,
record_arcs = None,
record_tanks = None,
record_surfaces = None,
verbose = True,
record_all = True,
objectives = []):
"""Run the model object with the default orchestration
Args:
dates (list, optional): Dates to simulate. Defaults to None, which
simulates all dates that the model has data for.
settings (dict, optional): Dict to specify what results are stored,
not currently used. Defaults to None.
record_arcs (list, optional): List of arcs to store result for.
Defaults to None.
record_tanks (list, optional): List of nodes with water stores to
store results for. Defaults to None.
record_surfaces (list, optional): List of tuples of
(land node, surface) to store results for. Defaults to None.
verbose (bool, optional): Prints updates on simulation if true.
Defaults to True.
record_all (bool, optional): Specifies to store all results.
Defaults to True.
objectives (list, optional): A list of dicts with objectives to
calculate (see examples). Defaults to [].
Returns:
flows: simulated flows in a list of dicts
tanks: simulated tanks storages in a list of dicts
objective_results: list of values based on objectives list
surfaces: simulated surface storages of land nodes in a list of dicts
Examples:
# Run a model without storing any results but calculating objectives
import statistics as stats
objectives = [{'element_type' : 'flows',
'name' : 'my_river',
'function' : @ (x, _) stats.mean([y['phosphate'] for y in x])
},
{'element_type' : 'tanks',
'name' : 'my_reservoir',
'function' : @ (x, model) sum([y['storage'] < (model.nodes['my_reservoir'].tank.capacity / 2) for y in x])
}]
_, _, results, _ = my_model.run(record_all = False, objectives = objectives)
"""
if record_arcs is None:
record_arcs = []
if record_all:
record_arcs = list(self.arcs.keys())
if record_tanks is None:
record_tanks = []
if record_surfaces is None:
record_surfaces = []
if settings is None:
settings = self.default_settings()
def blockPrint():
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
return stdout
def enablePrint(stdout):
sys.stdout = stdout
if not verbose:
stdout = blockPrint()
if dates is None:
dates = self.dates
for objective in objectives:
if objective['element_type'] == 'tanks':
record_tanks.append(objective['name'])
elif objective['element_type'] == 'flows':
record_arcs.append(objective['name'])
elif objective['element_type'] == 'surfaces':
record_surfaces.append((objective['name'],
objective['surface']))
else:
print('element_type not recorded')
flows = []
tanks = []
node_mb = []
surfaces = []
for date in tqdm(dates, disable = (not verbose)):
# for date in dates:
for node in self.nodelist:
node.t = date
node.monthyear = date.to_period('M')
#Run FWTW
for node in self.nodes_type['FWTW'].values():
node.treat_water()
#Create demand (gets pushed to sewers)
for node in self.nodes_type['Demand'].values():
node.create_demand()
#Create runoff (impervious gets pushed to sewers, pervious to groundwater)
for node in self.nodes_type['Land'].values():
node.run()
#Infiltrate GW
for node in self.nodes_type['Groundwater'].values():
node.infiltrate()
#Discharge sewers (pushed to other sewers or WWTW)
for node in self.nodes_type['Sewer'].values():
node.make_discharge()
#Foul second so that it can discharge any misconnection
for node in self.nodes_type['Foul'].values():
node.make_discharge()
#Discharge WWTW
for node in self.nodes_type['WWTW'].values():
node.calculate_discharge()
#Discharge GW
for node in self.nodes_type['Groundwater'].values():
node.distribute()
#river
for node in self.nodes_type['River'].values():
node.calculate_discharge()
#Abstract
for node in self.nodes_type['Reservoir'].values():
node.make_abstractions()
for node in self.nodes_type['Land'].values():
node.apply_irrigation()
for node in self.nodes_type['WWTW'].values():
node.make_discharge()
#Catchment routing
for node in self.nodes_type['Catchment'].values():
node.route()
#river
for node_name in self.river_discharge_order:
self.nodes[node_name].distribute()
#mass balance checking
#nodes/system
sys_in = self.empty_vqip()
sys_out = self.empty_vqip()
sys_ds = self.empty_vqip()
#arcs
for arc in self.arcs.values():
in_, ds_, out_ = arc.arc_mass_balance()
for v in constants.ADDITIVE_POLLUTANTS + ['volume']:
sys_in[v] += in_[v]
sys_out[v] += out_[v]
sys_ds[v] += ds_[v]
for node in self.nodelist:
# print(node.name)
in_, ds_, out_ = node.node_mass_balance()
# temp = {'name' : node.name,
# 'time' : date}
# for lab, dict_ in zip(['in','ds','out'], [in_, ds_, out_]):
# for key, value in dict_.items():
# temp[(lab, key)] = value
# node_mb.append(temp)
for v in constants.ADDITIVE_POLLUTANTS + ['volume']:
sys_in[v] += in_[v]
sys_out[v] += out_[v]
sys_ds[v] += ds_[v]
for v in constants.ADDITIVE_POLLUTANTS + ['volume']:
#Find the largest value of in_, out_, ds_
largest = max(sys_in[v], sys_in[v], sys_in[v])
if largest > constants.FLOAT_ACCURACY:
#Convert perform comparison in a magnitude to match the largest value
magnitude = 10**int(log10(largest))
in_10 = sys_in[v] / magnitude
out_10 = sys_in[v] / magnitude
ds_10 = sys_in[v] / magnitude
else:
in_10 = sys_in[v]
ds_10 = sys_in[v]
out_10 = sys_in[v]
if (in_10 - ds_10 - out_10) > constants.FLOAT_ACCURACY:
print("system mass balance error for " + v + " of " + str(sys_in[v] - sys_ds[v] - sys_out[v]))
#Store results
for arc in record_arcs:
arc = self.arcs[arc]
flows.append({'arc' : arc.name,
'flow' : arc.vqip_out['volume'],
'time' : date})
for pol in constants.POLLUTANTS:
flows[-1][pol] = arc.vqip_out[pol]
for node in record_tanks:
node = self.nodes[node]
tanks.append({'node' : node.name,
'storage' : node.tank.storage['volume'],
'time' : date})
for node, surface in record_surfaces:
node = self.nodes[node]
name = node.name
surface = node.get_surface(surface)
if not isinstance(surface,ImperviousSurface):
surfaces.append({'node' : name,
'surface' : surface.surface,
'percolation' : surface.percolation['volume'],
'subsurface_r' : surface.subsurface_flow['volume'],
'surface_r' : surface.infiltration_excess['volume'],
'storage' : surface.storage['volume'],
'evaporation' : surface.evaporation['volume'],
'precipitation' : surface.precipitation['volume'],
'tank_recharge' : surface.tank_recharge,
'capacity' : surface.capacity,
'time' : date,
'et0_coef' : surface.et0_coefficient,
# 'crop_factor' : surface.crop_factor
})
for pol in constants.POLLUTANTS:
surfaces[-1][pol] = surface.storage[pol]
else:
surfaces.append({'node' : name,
'surface' : surface.surface,
'storage' : surface.storage['volume'],
'evaporation' : surface.evaporation['volume'],
'precipitation' : surface.precipitation['volume'],
'capacity' : surface.capacity,
'time' : date})
for pol in constants.POLLUTANTS:
surfaces[-1][pol] = surface.storage[pol]
if record_all:
for node in self.nodes.values():
for prop_ in dir(node):
prop = node.__getattribute__(prop_)
if prop.__class__ in [QueueTank, Tank, ResidenceTank]:
tanks.append({'node' : node.name,
'time' : date,
'storage' : prop.storage['volume'],
'prop' : prop_})
for pol in constants.POLLUTANTS:
tanks[-1][pol] = prop.storage[pol]
for name, node in self.nodes_type['Land'].items():
for surface in node.surfaces:
if not isinstance(surface,ImperviousSurface):
surfaces.append({'node' : name,
'surface' : surface.surface,
'percolation' : surface.percolation['volume'],
'subsurface_r' : surface.subsurface_flow['volume'],
'surface_r' : surface.infiltration_excess['volume'],
'storage' : surface.storage['volume'],
'evaporation' : surface.evaporation['volume'],
'precipitation' : surface.precipitation['volume'],
'tank_recharge' : surface.tank_recharge,
'capacity' : surface.capacity,
'time' : date,
'et0_coef' : surface.et0_coefficient,
# 'crop_factor' : surface.crop_factor
})
for pol in constants.POLLUTANTS:
surfaces[-1][pol] = surface.storage[pol]
else:
surfaces.append({'node' : name,
'surface' : surface.surface,
'storage' : surface.storage['volume'],
'evaporation' : surface.evaporation['volume'],
'precipitation' : surface.precipitation['volume'],
'capacity' : surface.capacity,
'time' : date})
for pol in constants.POLLUTANTS:
surfaces[-1][pol] = surface.storage[pol]
for node in self.nodes.values():
node.end_timestep()
for arc in self.arcs.values():
arc.end_timestep()
objective_results = []
for objective in objectives:
if objective['element_type'] == 'tanks':
val = objective['function']([x for x in tanks if x['node'] == objective['name']], self)
elif objective['element_type'] == 'flows':
val = objective['function']([x for x in flows if x['arc'] == objective['name']], self)
elif objective['element_type'] == 'surfaces':
val = objective['function']([x for x in surfaces if (x['node'] == objective['name']) & (x['surface'] == objective['surface'])], self)
objective_results.append(val)
if not verbose:
enablePrint(stdout)
return flows, tanks, objective_results, surfaces
def reinit(self):
"""Reinitialise by ending all node/arc timesteps and calling reinit
function in all nodes (generally zero-ing their storage values).
"""
for node in self.nodes.values():
node.end_timestep()
for prop in dir(node):
prop = node.__getattribute__(prop)
for prop_ in dir(prop):
if prop_ == 'reinit':
prop_ = node.__getattribute__(prop_)
prop_()
for arc in self.arcs.values():
arc.end_timestep()
def write_yaml(address, config_name, data):
with open(os.path.join(address, config_name), 'w') as file:
yaml.dump(data,
file,
default_flow_style = False,
sort_keys = False,
Dumper=yaml.SafeDumper)
def open_func(file_path, mode):
if mode == "rt" and file_path.endswith(".gz"):
return gzip.open(file_path, mode)
else:
return open(file_path, mode)
def read_csv(file_path, delimiter=","):
with open_func(file_path, "rt") as f:
reader = csv.DictReader(f, delimiter=delimiter)
data = {}
for row in reader:
key = (row['variable'], to_datetime(row['time']))
value = float(row['value'])
data[key] = value
return data
def write_csv(data, fixed_data = {}, filename = '', compress = False):
if compress:
open_func = gzip.open
mode = 'wt'
else:
open_func = open
mode = 'w'
with open_func(filename, mode, newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(fixed_data.keys()) + ['variable', 'time', 'value'])
fixed_data_values = list(fixed_data.values())
for key, value in data.items():
writer.writerow(fixed_data_values + list(key) + [str(value)])
def flatten_dict(d, parent_key='', sep='-'):
# Initialize an empty dictionary
flat_dict = {}
# Loop through each key-value pair in the input dictionary
for k, v in d.items():
# Construct a new key by appending the parent key and separator
new_key = str(parent_key) + sep + str(k) if parent_key else k
# If the value is another dictionary, call the function recursively
if isinstance(v, dict):
flat_dict.update(flatten_dict(v, new_key, sep))
# Otherwise, add the key-value pair to the flat dictionary
else:
flat_dict[new_key] = v
# Return the flattened dictionary
return flat_dict
def check_and_convert_string(value):
try:
return int(value)
except:
try:
return float(value)
except:
if value == 'None':
return None
else:
return value
def unflatten_dict(d, sep=':'):
result = {}
for k, v in d.items():
keys = k.split(sep)
current = result
for key in keys[:-1]:
current = current.setdefault(key, {})
current[keys[-1]] = v
return result
def convert_keys(d):
# base case: if d is not a dict, return d
if not isinstance(d, dict):
return d
# recursive case: create a new dict with int keys and converted values
new_d = {}
for k, v in d.items():
new_d[check_and_convert_string(k)] = convert_keys(v)
return new_d
def csv2yaml(address, config_name = 'config_csv.yml', csv_folder_name = 'csv'):
csv_path = os.path.join(address, csv_folder_name)
csv_list = [os.path.join(csv_path, f) for f in os.listdir(csv_path) if os.path.isfile(os.path.join(csv_path, f))]
objs_type = {'nodes' : {},
'arcs' : {}}
for fid in csv_list:
with open(fid, "rt") as f:
if 'Dates' in fid:
reader = csv.reader(f, delimiter=',')
dates = []
for row in reader:
dates.append(row[0])
objs_type['dates'] = dates[1:]
else:
reader = csv.DictReader(f, delimiter=',')
data = {}
for row in reader:
formatted_row = {}
for key, value in row.items():
if value:
if ('[' in value) & (']' in value):
#Convert lists
value = value.strip('[]') # Remove the brackets
value = value.replace("'",'') # Remove the string bits
value = value.split(', ') # Split by comma
value = [check_and_convert_string(x) for x in value]
else:
#Convert ints, floats and strings
value = check_and_convert_string(value)
#Convert key and store converted values
formatted_row[key] = value
if 'Sim_params' not in fid:
label = formatted_row['label']
del formatted_row['label']
formatted_row = unflatten_dict(formatted_row)
formatted_row = convert_keys(formatted_row)
#Convert nested dicts dicts
data[row['name']] = formatted_row
if 'Sim_params' in fid:
objs_type = {**objs_type, **{x : y['value'] for x, y in data.items()}}
else:
objs_type[label] = {**objs_type[label], **data}
write_yaml(address, config_name, objs_type)
def yaml2csv(address, config_name = 'config.yml', csv_folder_name = 'csv'):
with open(os.path.join(address, config_name), "r") as file:
data = yaml.safe_load(file)
#Format to easy format to write to database
objs_type = {}
for objects, object_label in zip([data['nodes'],data['arcs']],['nodes','arcs']):
for key, value in objects.items():
if isinstance(value, dict):
#Identify node type
if 'node_type_override' in value.keys():
type_ = value['node_type_override']
elif 'type_' in value.keys():
type_ = value['type_']
else:
type_ = False
if type_:
#Flatten dictionaries
new_dict = {}
if type_ not in objs_type.keys():
objs_type[type_] = {}
for key_, value_ in value.items():
if isinstance(value_,dict):
new_dict[key_] = flatten_dict(value_, key_, ':')
for key_, value_ in new_dict.items():
del value[key_]
value = {**value, **value_}
value['label'] = object_label
objs_type[type_][key] = value
del data['nodes']
del data['arcs']
if 'dates' in data.keys():
objs_type['Dates'] = data['dates']
del data['dates']
objs_type['Sim_params'] = {x : {'name':x,'value':y} for x, y in data.items()}
csv_dir = os.path.join(address,csv_folder_name)
if not os.path.exists(csv_dir):
os.mkdir(csv_dir)
for key, value in objs_type.items():
if key == 'Sim_params':
fields = ['name','value']
elif key == 'Dates':
fields = ['date']
else:
fields = {}
for value_ in value.values():
fields = {**fields, **value_}
del fields['name']
fields = ['name'] + list(fields.keys())
with open(os.path.join(csv_dir, '{0}.csv'.format(key)), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fields)
if key == 'Dates':
for date in value:
writer.writerow([date])
else:
for key_, value_ in value.items():
writer.writerow([str(value_[x]) if x in value_.keys() else None for x in fields])
| null |
wsimod/orchestration/model.py
|
model.py
|
py
| 42,982 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "wsimod.core.core.WSIObj",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.nodes.Node",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "wsimod.nodes",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "inspect.getfullargspec",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.NON_ADDITIVE_POLLUTANTS",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.FLOAT_ACCURACY",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.items",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "wsimod.nodes",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "wsimod.nodes.values",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "wsimod.nodes",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "wsimod.nodes",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "wsimod.nodes",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.NON_ADDITIVE_POLLUTANTS",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.FLOAT_ACCURACY",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "yaml.safe_dump",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "yaml.representer",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "dill.load",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "dill.dump",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "wsimod.nodes",
"line_number": 345,
"usage_type": "argument"
},
{
"api_name": "wsimod.arcs.arcs",
"line_number": 376,
"usage_type": "argument"
},
{
"api_name": "wsimod.nodes",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes",
"line_number": 470,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 559,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 560,
"usage_type": "attribute"
},
{
"api_name": "os.devnull",
"line_number": 560,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 563,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 654,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 669,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 669,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.ADDITIVE_POLLUTANTS",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 674,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.FLOAT_ACCURACY",
"line_number": 679,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 679,
"usage_type": "name"
},
{
"api_name": "math.log10",
"line_number": 681,
"usage_type": "call"
},
{
"api_name": "wsimod.core.constants.FLOAT_ACCURACY",
"line_number": 690,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 690,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 699,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 699,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.land.ImperviousSurface",
"line_number": 712,
"usage_type": "argument"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 727,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 727,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 737,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 737,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.nodes.QueueTank",
"line_number": 743,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.nodes.Tank",
"line_number": 743,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.nodes.ResidenceTank",
"line_number": 743,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 748,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 748,
"usage_type": "name"
},
{
"api_name": "wsimod.nodes.land.ImperviousSurface",
"line_number": 753,
"usage_type": "argument"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 768,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 768,
"usage_type": "name"
},
{
"api_name": "wsimod.core.constants.POLLUTANTS",
"line_number": 778,
"usage_type": "attribute"
},
{
"api_name": "wsimod.core.constants",
"line_number": 778,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 816,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 816,
"usage_type": "attribute"
},
{
"api_name": "yaml.dump",
"line_number": 817,
"usage_type": "call"
},
{
"api_name": "yaml.SafeDumper",
"line_number": 821,
"usage_type": "attribute"
},
{
"api_name": "gzip.open",
"line_number": 825,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 841,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 847,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 904,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 904,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 905,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 912,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 918,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 953,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 953,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 954,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 995,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 995,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 997,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 997,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 998,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 1013,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1013,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 1014,
"usage_type": "call"
}
] |
565670345
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 10:21:53 2019
@author: SEBASTIAN LAVERDE
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
mars01 = cv2.imread("hirise-map-proj-v3/map-proj-v3/ESP_011283_2265_RED-0013.jpg", 1)
mars01_gray = cv2.cvtColor(mars01, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(mars01, 100, 70)
cv2.imshow("Original", mars01)
cv2.imshow("Canny",edges)
res,thresh = cv2.threshold(mars01[:,:,0], 50, 255, cv2.THRESH_BINARY)
res2,thresh2 = cv2.threshold(mars01[:,:,0], 70, 255, cv2.THRESH_BINARY)
res3,thresh3 = cv2.threshold(mars01[:,:,0], 30, 255, cv2.THRESH_BINARY)
print(mars01.shape)
thresh_adapt = cv2.adaptiveThreshold(mars01_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1) #adaptive treshold
#_, contours, hierachy = cv2.findContours(thresh_adapt, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #find countours in the thresh (binary img)
_, contours, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow("thresh_50", thresh)
cv2.imshow("thresh_70", thresh2)
cv2.imshow("thresh_30", thresh3)
cv2.imshow("thresh_adapt", thresh_adapt)
filtered = []
for c in contours:
if (cv2.contourArea(c) < 50 or cv2.contourArea(c) > 500):continue #this is a parameter to be set
filtered.append(c)
print(len(contours))
print(len(filtered))
Moments_cx = []
Moments_cy = []
objects = np.zeros([mars01_gray.shape[0],mars01_gray.shape[1],3], 'uint8')
for c in filtered:
col = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
cv2.drawContours(objects,[c], -1, col, -1)
area = cv2.contourArea(c)
p = cv2.arcLength(c,True)
M = cv2.moments(c)
cx = int( M['m10']/M['m00']) #to calculate the centroid of an image we use the moments of an image
cy = int( M['m01']/M['m00'])
Moments_cx.append(cx)
Moments_cy.append(cy)
print("area: ",area,"perimeter: ",p)
mars01copy = mars01.copy()
i = 0
print(Moments_cx, ", ", Moments_cy)
for obj in range(len(Moments_cx)):
cv2.circle(mars01copy, (Moments_cx[i],Moments_cy[i]), 10, (0,0,255), 1)
i+=1
cv2.imshow("Original_method1", mars01copy)
cv2.namedWindow("Contours",cv2.WINDOW_NORMAL)
cv2.imshow("Contours",objects)
_, contours2, hierachy2 = cv2.findContours(thresh3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
filtered2 = []
for c in contours2:
if (cv2.contourArea(c) < 10 or cv2.contourArea(c) > 500):continue #this is a parameter to be set
filtered2.append(c)
print("second length: ", len(contours2))
print("filtered: ", len(filtered2))
objects2 = np.zeros([mars01_gray.shape[0],mars01_gray.shape[1],3], 'uint8')
print("\nSecond test with threshold value in 30\n")
Moments_cx1 = []
Moments_cy1 = []
for c in filtered2:
col = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
cv2.drawContours(objects2,[c], -1, col, -1)
area = cv2.contourArea(c)
p = cv2.arcLength(c,True)
M = cv2.moments(c)
cx = int( M['m10']/M['m00']) #to calculate the centroid of an image we use the moments of an image
cy = int( M['m01']/M['m00'])
Moments_cx1.append(cx)
Moments_cy1.append(cy)
print("area: ",area,"perimeter: ",p)
i = 0
print(Moments_cx1, ", ", Moments_cy1)
for obj in range(len(Moments_cx1)):
cv2.circle(mars01, (Moments_cx1[i],Moments_cy1[i]), 10, (0,255,0), 1)
i+=1
cv2.imshow("Original_method2", mars01)
cv2.imshow("Gray", mars01_gray)
cv2.namedWindow("Contours_2",cv2.WINDOW_NORMAL)
cv2.imshow("Contours_2",objects2)
cv2.waitKey(0)
cv2.destroyAllWindows()
| null |
image_segmentation/ImageSegmentation.py
|
ImageSegmentation.py
|
py
| 3,584 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.Canny",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cv2.arcLength",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.moments",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.arcLength",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.moments",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 115,
"usage_type": "call"
}
] |
543136763
|
from comet_ml import Experiment # must be imported before keras
import numpy as np
import pandas as pd
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Dropout, Conv2D, MaxPooling2D, UpSampling2D
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
import tensorflow as tf
import pickle
# check GPU availability
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
img_dir = 'images_cropped/'
metadata_filepath = 'X_meta.csv'
batch_size = 128
# set up experiment logging
# set COMET_API_KEY in your environment variables
# or pass it as the first value in the Experiment object
experiment = Experiment(
"CgFCfEAIYJVIxez3BZzCqFeeX",
workspace="ceceshao1", project_name="aleksey-open-fruits"
)
# get X and y values for flow_from_directory
X_meta = pd.read_csv(metadata_filepath)
X = X_meta[['CroppedImageURL']].values
y = X_meta['LabelName'].values
# define data generators
train_datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1/255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.2
)
test_datagen = ImageDataGenerator(
rescale=1/255,
)
train_generator = train_datagen.flow_from_directory(
img_dir,
target_size=(96, 96),
batch_size=batch_size,
class_mode='categorical',
subset='training',
)
validation_generator = train_datagen.flow_from_directory(
img_dir,
target_size=(96, 96),
batch_size=batch_size,
class_mode='categorical',
subset='validation'
)
# define the model
# init a new model with 96x96 CNN layers
# padding='same' will downsample to 48x48
# # which is the expected input size for pretrain
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), input_shape=(96, 96, 3), activation='relu', padding='same'))
model.add(Conv2D(64, kernel_size=(3, 3), input_shape=(96, 96, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# load the pretrained model
prior = load_model('resnet48_128/model-48.h5')
# add all but the first two layers of VGG16 to the new model
# strip the input layer out, this is now 96x96
# also strip out the first convolutional layer, this took the 48x48 input and convolved it but
# this is now the job of the three new layers.
for layer in prior.layers[0].layers[2:]:
model.add(layer)
# re-add the feedforward layers on top
for layer in prior.layers[1:]:
model.add(layer)
# the pretrained CNN layers are already marked non-trainable
# mark off the top layers as well
for layer in prior.layers[-4:]:
layer.trainable = False
# compile the model
model.compile(
optimizer=RMSprop(),
loss='categorical_crossentropy',
metrics=['accuracy']
)
# generate class weights
import os
labels_count = dict()
for img_class in [ic for ic in os.listdir('images_cropped/') if ic[0] != '.']:
labels_count[img_class] = len(os.listdir('images_cropped/' + img_class))
total_count = sum(labels_count.values())
class_weights = {cls: total_count / count for cls, count in enumerate(labels_count.values())}
# fit the model
history = model.fit_generator(
train_generator,
steps_per_epoch=len(train_generator.filenames) // batch_size,
epochs=20,
validation_data=validation_generator,
validation_steps=len(train_generator.filenames) // batch_size,
callbacks=[
EarlyStopping(patience=3, restore_best_weights=True),
ReduceLROnPlateau(patience=2)
]
)
# save model artifacts
model.save('/opt/ml/model/model-96.h5')
with open('/opt/ml/model/model-96-history.pickle', 'wb') as fp:
pickle.dump(history.history, fp)
experiment.end()
| null |
models/resnet96_128/train.py
|
train.py
|
py
| 3,830 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.Session",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "comet_ml.Experiment",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.RMSprop",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.EarlyStopping",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ReduceLROnPlateau",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 132,
"usage_type": "call"
}
] |
34480743
|
from BTrees.OOBTree import OOBTree
from copy import copy
from datetime import datetime
from opengever.base.pathfinder import PathFinder
from opengever.base.sentry import log_msg_to_sentry
from plone import api
from plone.portlets.constants import CONTEXT_ASSIGNMENT_KEY
from plone.protect.auto import ProtectTransform
from plone.protect.auto import safeWrite
from plone.protect.interfaces import IDisableCSRFProtection
from plone.protect.utils import SAFE_WRITE_KEY
from pprint import pformat
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.interfaces import IPloneSiteRoot
from ZODB.utils import u64
from zope.annotation.attribute import AttributeAnnotations
from zope.annotation.interfaces import IAnnotatable
from zope.annotation.interfaces import IAnnotations
from zope.component import adapts
from zope.component.hooks import getSite
from zope.interface import Interface
from zope.publisher.interfaces.browser import IBrowserRequest
import logging
import os
import subprocess
import transaction
LOG = logging.getLogger('opengever.base.protect')
def unprotected_write(obj):
"""Marks ``obj`` so that it does not trigger plone.protect's
write protection for GET request.
The flag is not applied recursively.
This currently delegates most of the work to safeWrite(), but we can't
quite drop it yet, because:
- safeWrite() doesn't return the object, which makes it more awkward to use
- safeWrite() doesn't unwrap non-persistent attribute annotations
TODO: Possibly move this functionaly upstream (into plone.protect)
"""
if obj is None:
return obj
# Unwrap nonpersistent AttributeAnnotations
if isinstance(obj, AttributeAnnotations):
unprotected_write(getattr(obj.obj, '__annotations__', None))
return obj
# safeWrite all buckets of a BTree
if getattr(obj, '_firstbucket', None):
for bucket in get_buckets_for_btree(obj):
safeWrite(bucket)
safeWrite(obj)
return obj
def get_buckets_for_btree(tree):
bucket = tree._firstbucket
yield bucket
while bucket._next:
bucket = bucket._next
yield bucket
class OGProtectTransform(ProtectTransform):
adapts(Interface, IBrowserRequest)
def _get_current_view(self):
return getattr(self.request, 'steps', [''])[-1]
def _abort_txn_on_confirm_action_view(self):
if self._get_current_view() == '@@confirm-action':
if len(self._registered_objects()) > 0 and \
not IDisableCSRFProtection.providedBy(self.request):
transaction.abort()
LOG.error(
"Error checking for CSRF. Transaction was modified when "
"visiting @@confirm-action view. Transaction aborted!)")
return True
def _redirect_loop_detected(self):
# This should never happen: If the current view is @@confirm-action,
# AND the original_url points to the same view, we assume
# that there's a redirect loop.
# If this happened, the _abort_txn_on_confirm_action_view() safeguard
# above must have failed.
redirect_url = self.request.form.get('original_url', '')
return (self._get_current_view() == '@@confirm-action'
and '@@confirm-action' in redirect_url)
def _check(self):
should_log_csrf = bool(os.environ.get('CSRF_LOG_ENABLED', True))
if should_log_csrf:
# Keep a summary of _registered_objects and a shallow copy of
# request.__dict__ if we want to log the incident later
registered_objects_summary = self._registered_objects_summary()
request_dict_before_check = copy(self.request.__dict__)
self._abort_txn_on_confirm_action_view()
if self._redirect_loop_detected():
LOG.error("Detected redirect loop on @@confirm-action view! "
"Breaking loop by redirecting to Plone site.")
site = api.portal.get()
return self.request.RESPONSE.redirect(site.absolute_url())
is_safe = super(OGProtectTransform, self)._check()
if not is_safe:
user = api.user.get_current()
env = {
'username': user.getUserName() if user else 'unknown-user',
'url': self.request.get('ACTUAL_URL', ''),
'registered_objects_summary': registered_objects_summary,
'request_dict': request_dict_before_check,
}
# Always (try to) log to Sentry
self._log_csrf_incident_to_sentry(env)
if should_log_csrf:
# Log to file if enabled
self._log_csrf_incident(env)
return is_safe
def _registered_objects_summary(self):
"""Summarize the contents of _registered_objects in a way that is
suited for logging:
- OID
- Object class
- Object's repr, cropped
"""
summary = []
for obj in self._filtered_registered_objects():
oid = hex(u64(getattr(obj, '_p_oid', '\x00' * 8)))
klass = repr(getattr(obj, '__class__', None))
obj_summary = repr(obj)[:100]
summary.append({'oid': oid, 'class': klass, 'obj': obj_summary})
return summary
def _log_csrf_incident(self, env):
"""Log a CSRF incident to a file.
"""
max_age = int(os.environ.get('CSRF_LOG_MAX_AGE', 7 * 24 * 60))
ts = datetime.now()
log_dir = PathFinder().var_log
log_filename = 'csrf-{}-{}.log'.format(
ts.strftime('%Y-%m-%d_%H-%M-%S'), env['username'])
logfile_path = os.path.join(log_dir, log_filename)
with open(logfile_path, 'w') as logfile:
for line in self._build_csrf_report(env):
log_message = '{} {}\n'.format(ts.isoformat(), line)
logfile.write(log_message)
LOG.warn('CSRF incident has been logged to {}'.format(logfile_path))
# Remove old incident logs
subprocess.check_call(
"find {} -name 'csrf-*.log' -type f -mmin +{} -delete".format(
log_dir, max_age), shell=True)
def _log_csrf_incident_to_sentry(self, env):
logged = False
try:
extra = {'referrer': self.request.get('HTTP_REFERER', ''),
'filtered_registered_objects': env['registered_objects_summary']}
except Exception as e:
LOG.error('Error while preparing CSRF incident data for Sentry'
' (%r)' % e)
return
logged = log_msg_to_sentry(
'CSRF @@confirm-action triggered',
request=self.request,
url=env['url'],
extra=extra,
fingerprint=['{{ default }}', env['url']],
level='warning',
)
if logged:
LOG.warn('Logged CSRF incident to Sentry')
def _build_csrf_report(self, env):
"""Generator that produces a sequence of lines to be logged to a file
as the CSRF incident report.
"""
request_dict = env['request_dict']
# Drop response from request dict - we know what we're gonna send
request_dict.pop('response', None)
request_dict.get('other', {}).pop('RESPONSE', None)
# Remove basic auth header before logging
request_dict.pop('_auth', {})
request_dict.get('_orig_env', {}).pop('HTTP_AUTHORIZATION', None)
yield 'CSRF incident at {}'.format(env['url'])
yield '=' * 80
yield '\n'
yield 'User:'
yield '-' * 80
yield env['username']
yield '\n'
yield 'HTTP_REFERER:'
yield '-' * 80
yield request_dict.get('environ', {}).get('HTTP_REFERER', '')
yield '\n'
yield 'registered_objects_summary:'
yield '-' * 80
yield '\n' + pformat(env['registered_objects_summary'])
yield '\n'
yield 'Request:'
yield '-' * 80
yield '\n' + pformat(request_dict)
yield '\n'
def _registered_objects(self):
self._global_unprotect()
return super(OGProtectTransform, self)._registered_objects()
def _filtered_registered_objects(self):
# Get list of whitelisted (safe) oids
safe_oids = []
if SAFE_WRITE_KEY in getattr(self.request, 'environ', {}):
safe_oids = self.request.environ[SAFE_WRITE_KEY]
def is_not_safe(obj):
oid = getattr(obj, '_p_oid', None)
if oid is not None and oid in safe_oids:
return False
return True
# Filter objects to only the ones that aren't safe
filtered_objs = filter(is_not_safe, self._registered_objects())
return filtered_objs
def _global_unprotect(self):
# portal_memberdata._members cache will be written sometimes.
if IPloneSiteRoot.providedBy(getSite()):
unprotected_write(getToolByName(getSite(), 'portal_memberdata')._members)
context = self.getContext()
# always allow writes to context's annotations.
if IAnnotatable.providedBy(context):
annotations = IAnnotations(context)
unprotected_write(annotations)
if CONTEXT_ASSIGNMENT_KEY in annotations:
# also allow writes to context portlet assignments
unprotected_write(annotations[CONTEXT_ASSIGNMENT_KEY])
class ProtectAwareAttributeAnnotations(AttributeAnnotations):
"""Zope AttributeAnnotations lazily intializes annotations.
When annotations are initialized on an object we need to unprotect that
object.
"""
def __setitem__(self, key, value):
try:
annotations = self.obj.__annotations__
except AttributeError:
# unprotect new annotations since they will be written
annotations = unprotected_write(OOBTree())
# unprotect obj for which we initialize annotations
unprotected_write(self.obj).__annotations__ = annotations
annotations[key] = value
| null |
opengever/base/protect.py
|
protect.py
|
py
| 10,157 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "zope.annotation.attribute.AttributeAnnotations",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "plone.protect.auto.safeWrite",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "plone.protect.auto.safeWrite",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "plone.protect.auto.ProtectTransform",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "zope.component.adapts",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "zope.interface.Interface",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "zope.publisher.interfaces.browser.IBrowserRequest",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "plone.protect.interfaces.IDisableCSRFProtection.providedBy",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "plone.protect.interfaces.IDisableCSRFProtection",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "transaction.abort",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "copy.copy",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "plone.api.portal.get",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "plone.api.portal",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "plone.api",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "plone.api.user.get_current",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "plone.api.user",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "plone.api",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "ZODB.utils.u64",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "opengever.base.pathfinder.PathFinder",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_call",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "opengever.base.sentry.log_msg_to_sentry",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "plone.protect.utils.SAFE_WRITE_KEY",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "plone.protect.utils.SAFE_WRITE_KEY",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "Products.CMFPlone.interfaces.IPloneSiteRoot.providedBy",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "Products.CMFPlone.interfaces.IPloneSiteRoot",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "zope.component.hooks.getSite",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "Products.CMFCore.utils.getToolByName",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "zope.component.hooks.getSite",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "zope.annotation.interfaces.IAnnotatable.providedBy",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "zope.annotation.interfaces.IAnnotatable",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "zope.annotation.interfaces.IAnnotations",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "plone.portlets.constants.CONTEXT_ASSIGNMENT_KEY",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "plone.portlets.constants.CONTEXT_ASSIGNMENT_KEY",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "zope.annotation.attribute.AttributeAnnotations",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "BTrees.OOBTree.OOBTree",
"line_number": 281,
"usage_type": "call"
}
] |
167662047
|
import sys
#sys.stdin=open("input.txt",'rt') # 경로 잡고, 파일 엵음. read a file as text
from collections import deque
dq=deque()
dq.append((0,0))
dx=[-1,0,1,0]
dy=[0,1,0,-1]
n=7
board=[]
for i in range(n):
board.append(list(map(int,input().split())))
res=[[0]*n for _ in range(n)] # 있으면 안뻗을꺼임.
while dq:
x,y=dq.popleft()
if (x,y) == (n-1,n-1):
break
for i,j in zip(dx,dy):
next_x=x+i
next_y=y+j
if n-1>= next_x >=0 and n-1>= next_y >= 0 :
if board[next_x][next_y]==0 and res[next_x][next_y] ==0 :
dq.append((next_x,next_y))
res[next_x][next_y]=res[x][y]+1
if res[n-1][n-1]==0:
print(-1)
else:
print(res[n-1][n-1])
| null |
Self_study/2020_Winter/2. Algorithms with Python/파이썬 알고리즘 문제 및 채점/섹션7. 깊이, 넓이 우선탐색(DFS, BFS) 활용/9. 미로의 최단거리 통로/AA.py
|
AA.py
|
py
| 840 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
152649808
|
import sys
from PyQt5 import uic
from recommend import Recommend
from complain import Complain
from profil import Profil
from PyQt5.QtWidgets import QApplication, QMainWindow
class Main(QMainWindow):
def __init__(self, parent=None):
super().__init__()
uic.loadUi('main.ui', self)
self.initUI()
def initUI(self):
self.btn_recommend.setStyleSheet('QPushButton {background-color: #A3C1DA}')
self.btn_recommend.clicked.connect(self.recommend)
self.btn_complain.setStyleSheet('QPushButton {background-color: #A3C1DA}')
self.btn_complain.clicked.connect(self.complain)
self.btn_profil.setStyleSheet('QPushButton {background-color: #A3C1DA}')
self.btn_profil.clicked.connect(self.profil)
self.btn_backToAutentification.setStyleSheet('QPushButton {background-color: #A3C1DA}')
self.btn_backToAutentification.clicked.connect(self.backToAutentification)
def recommend(self): # Переход в рубрику "Рекомендую"
self.m1 = Recommend(self)
self.m1.show()
def complain(self): # Переход в рубрику "Пожаловаться"
self.m2 = Complain(self)
self.m2.show()
def profil(self): # Переход во вкладку "Профиль"
self.m3 = Profil(self)
self.m3.show()
def backToAutentification(self): # Вернуться назад
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
m = Main()
m.show()
sys.exit(app.exec_())
| null |
main.py
|
main.py
|
py
| 1,567 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "recommend.Recommend",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "complain.Complain",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "profil.Profil",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 46,
"usage_type": "call"
}
] |
425086863
|
import pybullet as p
from time import sleep
import pybullet_data
import numpy as np
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD)
p.setGravity(0, 0, -10)
planeId = p.loadURDF("plane.urdf", [0,0,-2])
#boxId = p.loadURDF("cube.urdf", [0,0,4],useMaximalCoordinates = True)
cubeStartPos = [0, 0, 2]
cubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])
botId = p.loadURDF("biped/biped2d_pybullet.urdf", cubeStartPos, cubeStartOrientation)
p.changeDynamics(botId, -1, mass=0.1)
for i in range(0, p.getNumJoints(botId)):
p.changeDynamics(botId, i, mass=0.1)
p.setJointMotorControl2(bodyUniqueId=botId, jointIndex=i, controlMode=p.VELOCITY_CONTROL, targetVelocity = 1, force = 100)
print(i, p.getJointInfo(botId, i))
#softId = p.loadSoftBody("torus.vtk", useNeoHookean = 1, NeoHookeanMu = 60, NeoHookeanLambda = 200, NeoHookeanDamping = 0.01, useSelfCollision = 1, frictionCoeff = 0.5)
softId = p.loadSoftBody("tube.vtk", [0, 0, 0], mass=10, useNeoHookean = 0, NeoHookeanMu = 600, NeoHookeanLambda = 200,
NeoHookeanDamping = 0.01, useSelfCollision = 0, frictionCoeff = 0.5,
springElasticStiffness=500, springDampingStiffness=50, springBendingStiffness=50,
useMassSpring=1, useBendingSprings=1, collisionMargin=0.05)
# softId2 = p.loadSoftBody("tube.vtk", [0, 0, 2], mass=10, useNeoHookean = 0, NeoHookeanMu = 600, NeoHookeanLambda = 200,
# NeoHookeanDamping = 0.01, useSelfCollision = 0, frictionCoeff = 0.5,
# springElasticStiffness=500, springDampingStiffness=50, springBendingStiffness=50,
# useMassSpring=1, useBendingSprings=1, collisionMargin=0.1)
p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25)
p.setRealTimeSimulation(0)
np.set_printoptions(precision=4, suppress=True)
debug_lines = []
for i in range(100):
line_id = p.addUserDebugLine([0,0,0], [0,0,0])
debug_lines.append(line_id)
while p.isConnected():
p.setGravity(0,0,-10)
x, y, z, contX, contY, contZ, contForceX, contForceY, contForceZ = p.getSoftBodyData(softId)
contact_pt = np.stack((contX, contY, contZ)).T
contact_force = np.stack((contForceX, contForceY, contForceZ)).T
#print(contact_pt.shape)
print('num nodes', len(x), 'contact nodes', len(contX))
#print('fx', contForceX)
#print('fy', contForceY)
#print('fz', contForceZ)
for i in range(len(debug_lines)):
if i < len(contX):
debug_lines[i] = p.addUserDebugLine(contact_pt[i, :], contact_pt[i, :] + contact_force[i, :], lineWidth=3, replaceItemUniqueId=debug_lines[i])
else:
debug_lines[i] = p.addUserDebugLine([0,0,0], [0,0,0], replaceItemUniqueId=debug_lines[i])
#sleep(1./240.)
p.stepSimulation()
| null |
examples/pybullet/examples/soft_body_data.py
|
soft_body_data.py
|
py
| 2,832 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pybullet.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pybullet.GUI",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pybullet.setAdditionalSearchPath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pybullet_data.getDataPath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pybullet.resetSimulation",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pybullet.RESET_USE_DEFORMABLE_WORLD",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pybullet.setGravity",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pybullet.loadURDF",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pybullet.getQuaternionFromEuler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pybullet.loadURDF",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pybullet.changeDynamics",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pybullet.getNumJoints",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pybullet.changeDynamics",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pybullet.setJointMotorControl2",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pybullet.VELOCITY_CONTROL",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pybullet.getJointInfo",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pybullet.loadSoftBody",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pybullet.setPhysicsEngineParameter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pybullet.setRealTimeSimulation",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pybullet.addUserDebugLine",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pybullet.isConnected",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pybullet.setGravity",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pybullet.getSoftBodyData",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pybullet.addUserDebugLine",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pybullet.addUserDebugLine",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pybullet.stepSimulation",
"line_number": 71,
"usage_type": "call"
}
] |
370266890
|
#!/usr/bin/env python
# coding=utf-8
from flask import render_template, request, Response, redirect, url_for, Blueprint,abort
from flask.ext.login import login_required
from datetime import date, datetime
import flaskcms.config as myconfig
import math
import ast
import os
import time
import json
from flask.views import MethodView
from flask.ext.login import login_required
from flaskcms.lib import db
from flaskcms.view.ext import pager
from flaskcms.view import CommonView
class AdminView(CommonView):
'''管理类的基类
model:
模块对应的数据库model
category:
模块的名称
channels:
模块里面拥有的channel
'''
def get_template_name(self, channel):
if channel in self.channels:
template = 'admin/admin_'+self.category+'_'+channel+'.html'
else:
return abort(404)
return template
def get_list_or_404(self):
pass
def build_form_dict(self, form):
data = dict([(key, form.get(key)) for key in form.keys()])
return self.build_extra(data)
def build_extra(self, data):
return data
@login_required
def get(self, channel):
contents = self.get_contents(channel)
return render_template(self.get_template_name(channel), **contents)
@login_required
def put(self, channel):
id = request.args.get('id', '')
try:
pass
item = self.model.query.get(id)
for key in request.form.keys():
setattr(item, key, request.form.get(key))
db.session.commit()
response = {"status": "success", "data": {"id": item.id}}
return json.dumps(response)
except Exception as e:
return '{"status":"error","reason":"data insert error"}'
@login_required
def post(self, channel):
params = request.form
data = self.build_form_dict(params)
try:
item = self.model(**data)
db.session.add(item)
db.session.commit()
response = {"status": "success", "data": {"id": item.id}}
return json.dumps(response)
except Exception as e:
raise e
return '{"status":"error","reason":"data insert error"}'
@login_required
def delete(self, channel):
ids = request.args.get('ids', '')
ids = ids.split(',')
new_ids = []
for id in ids:
new_ids.append(int(id))
try:
items = self.model.query.filter(self.model.id.in_(new_ids))
for item in items:
db.session.delete(item)
self.delete_extra(item)
db.session.commit()
return '{"status":"success"}'
except Exception as e:
return '{"status":"error"}'
def delete_extra(self, item):
pass
class BaseAdmin(AdminView):
def get_template_name(self, channel):
if channel:
template = 'admin/' + channel+'.html'
else:
template = 'admin/admin.html'
return template
@login_required
def get(self, channel):
contents = self.get_contents(channel)
return render_template(self.get_template_name(channel), **contents)
| null |
flaskcms/view/admin.py
|
admin.py
|
py
| 3,300 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flaskcms.view.CommonView",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.login_required",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.request.form.keys",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "flaskcms.lib.db.session.commit",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flaskcms.lib.db.session",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flaskcms.lib.db",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.login_required",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "flaskcms.lib.db.session.add",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "flaskcms.lib.db.session",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "flaskcms.lib.db",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flaskcms.lib.db.session.commit",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flaskcms.lib.db.session",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "flaskcms.lib.db",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.login_required",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flaskcms.lib.db.session.delete",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flaskcms.lib.db.session",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "flaskcms.lib.db",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "flaskcms.lib.db.session.commit",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flaskcms.lib.db.session",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flaskcms.lib.db",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "flask.ext.login.login_required",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.login_required",
"line_number": 110,
"usage_type": "name"
}
] |
390002150
|
import netCDF4
from netCDF4 import Dataset
import numpy as np
import datetime
import rioxarray, rasterio, xarray as xr
from osgeo import gdal
from rasterio.control import GroundControlPoint as GCP_r
from gdal import GCP as GCP_g
from rasterio.control import GroundControlPoint as GCP_r
import cv2
from affine import Affine as AffineLib
# your netcdf files
ncpath_2 = r"testdata/netcdfs/wrf_20200214_00_1.nc"
ncpath_3 = r"testdata/netcdfs/wrf_20200214_00_3.nc"
ncpath_4 = r"testdata/netcdfs/wrf_20200214_00_4.nc"
ncpath_5 = r"testdata/netcdfs/wrf_20200214_00_5.nc"
ncpath_6 = r"testdata/netcdfs/wrf_20200214_00_6.nc"
ncout_path = r"testdata/netcdfs/out/wrf_20200214_00_a.nc"
# reading them netCDF library, it is useful to get info
nc_in_2 = Dataset(ncpath_2, 'r', format='NETCDF4')
nc_in_3 = Dataset(ncpath_3, 'r', format='NETCDF4')
nc_in_4 = Dataset(ncpath_4, 'r', format='NETCDF4')
nc_in_5 = Dataset(ncpath_5, 'r', format='NETCDF4')
nc_in_6 = Dataset(ncpath_6, 'r', format='NETCDF4')
# lets take variable array
latitude = nc_in_2.variables['latitude']
longitude = nc_in_2.variables['longitude']
# my data does not contain time. So you will find out how to import your time data into netcdf
times = [datetime.datetime(2019, 10, 1) + datetime.timedelta(hours=i) for i in range(5)]
# some metata
units = 'hours since 2019-10-01 00:00'
calendar = 'standard'
# you can do this:
# temperature_2 = nc_in_2.variables['t2_0']
# this one can be done if you want to store your data as float32 or whatever you want. If not, use above one.
temperature_2 = np.array([np.float32(i) for i in nc_in_2.variables['t2_0']])
temperature_3 = np.array([np.float32(i) for i in nc_in_3.variables['t2_0']])
temperature_4 = np.array([np.float32(i) for i in nc_in_4.variables['t2_0']])
temperature_5 = np.array([np.float32(i) for i in nc_in_5.variables['t2_0']])
temperature_6 = np.array([np.float32(i) for i in nc_in_6.variables['t2_0']])
# create netcdf
nc_out = Dataset(ncout_path, 'w', format='NETCDF4')
# dimensions
nc_out.createDimension('time', size=len(times))
nc_out.createDimension('x_loc', size=len(latitude))
nc_out.createDimension('y_loc', size=len(longitude))
# create latitude axis
lat = nc_out.createVariable('lat', np.dtype('double').char, ('lat'))
lat.standard_name = 'latitude'
lat.long_name = 'latitude'
lat.units = 'degrees_north'
lat.axis = 'Y'
# create longitude axis
lon = nc_out.createVariable('lon', np.dtype('double').char, ('lon'))
lon.standard_name = 'longitude'
lon.long_name = 'longitude'
lon.units = 'degrees_east'
lon.axis = 'X'
# create time axis
time = nc_out.createVariable('time', np.dtype('double').char, ('time',))
time.long_name = 'time'
time.units = 'hours since 2019-10-01 00:00:00'
time.calendar = 'standard'
time.axis = 'T'
time[:] = netCDF4.date2num(times, units=units, calendar=calendar)
time = netCDF4.num2date(time[:], units=units, calendar=calendar)
# create variable array
temp_out = nc_out.createVariable('t2_0', np.dtype('double').char, ('time', 'lat', 'lon'))
temp_out.long_name = '2 metre temperature'
temp_out.units = 'K'
# copy axis from original dataset
# print(first_latitude, first_longitude)
lon[:] = longitude[:]
lat[:] = latitude[:]
# concatenating data.
concat_temp = np.concatenate([temperature_2, temperature_3, temperature_4, temperature_5, temperature_6])
temp_out[:] = concat_temp[:]
nc_out.close()
nc_in_2.close()
nc_in_3.close()
nc_in_4.close()
nc_in_5.close()
nc_in_6.close()
| null |
basics_and_concatenating.py
|
basics_and_concatenating.py
|
py
| 3,439 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "netCDF4.Dataset",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "netCDF4.date2num",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "netCDF4.num2date",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 94,
"usage_type": "call"
}
] |
169424338
|
# coding: utf-8
"""
Redactium III -> Base Forms
Типовые базовые формы
Skillholders LLC, 2014
skillholders.com | [email protected]
"""
from django import forms
class ReForm(forms.Form):
"""
Базовый класс легковесной формы для модели
"""
# ID объекта (раз у нас форма для модели)
id = forms.IntegerField(required=False, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
"""
Инициализация с блэкджеком и гимназистками
"""
init_with_me = kwargs.get('init_with', None)
if init_with_me:
del kwargs['init_with']
super(ReForm, self).__init__(*args, **kwargs)
# поля, соответствующие полям модели target_model
self.data_fields = ()
meta_class = getattr(self, 'Meta', None)
if meta_class:
self.mymodel = getattr(meta_class, 'target_model', None)
# поля, которые мы исключаем
self.exclude = getattr(meta_class, 'exclude', [])
# поля модели (замечание: убираем _id у FK-полей)
rawfields = vars(self.mymodel())
modelfields = (
x[:-3] if x.endswith('_id') else x for x in rawfields
)
modelfields = filter(lambda x: x not in self.exclude, modelfields)
# поля внешних ключей (если заканчиваются на _id)
foreign_keys = filter(lambda x: x.endswith('_id'), rawfields)
self.fk_fields = {}
for field in foreign_keys:
try:
field = field[:-3]
field_object = getattr(self.mymodel, field, None)
field_model = getattr(field_object.field, 'model', None)
self.fk_fields[field] = field_model
except AttributeError:
continue
self.data_fields = set(modelfields) & set(self.fields.keys())
# если у нас передан объект инициализации
if init_with_me:
self.init_with(init_with_me)
def init_with(self, data_obj):
"""
Инициализируем поля формы с помощью объекта
"""
for field in self.data_fields:
obj_value = getattr(data_obj, field, None)
if not obj_value:
id_field = '_'.join((field, 'id'))
obj_value = getattr(
data_obj, id_field, None
)
if field in self.fk_fields:
# если поле - внешний ключ, то нам нужен только ID
obj_value = obj_value.id if obj_value else 0
print('%s %s' % (field, obj_value))
self.fields[field].initial = obj_value
def get_object(self):
"""
Получаем объект типа target_model для заполненной формы
"""
ret = self.mymodel()
for field in self.data_fields:
tg_value = self.cleaned_data[field]
# проверяем, не является ли поле внешним ключом
if field in self.fk_fields:
if int(tg_value) is not 0:
tg_value = self.fk_fields[field].objects.get(
id__exact=tg_value
)
else:
tg_value = None
setattr(ret, field, tg_value)
print(vars(ret))
return ret
| null |
ephemeris/back/base/forms.py
|
forms.py
|
py
| 3,758 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.Form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.HiddenInput",
"line_number": 17,
"usage_type": "call"
}
] |
239954873
|
from apscheduler.schedulers.background import BackgroundScheduler
import datetime
import importlib
import gatt
import logging
import yaml
import subprocess
logging.basicConfig(filename='/var/log/turntouch.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger('monitor')
manager = gatt.DeviceManager(adapter_name='hci0')
class TurnTouch(gatt.Device):
button_codes = {
b'\xff\x00': 'Off',
b'\xfe\x00': 'North Press',
b'\xef\x00': 'North Double',
b'\xfe\xff': 'North Hold',
b'\xfd\x00': 'East Press',
b'\xdf\x00': 'East Double',
b'\xfd\xff': 'East Hold',
b'\xfb\x00': 'West Press',
b'\xbf\x00': 'West Double',
b'\xfb\xff': 'West Hold',
b'\xf7\x00': 'South Press',
b'\x7f\x00': 'South Double',
b'\xf7\xff': 'South Hold'
}
button_presses = []
battery_notifications_sent = []
def __init__(self, mac_address, manager, buttons, name, controllers):
super().__init__(mac_address, manager)
self.sched = BackgroundScheduler()
self.sched.start()
self.button_actions = buttons
self.listening = False
self.name = name
self.controllers = controllers
def connect_succeeded(self):
super().connect_succeeded()
logger.info("Connected!")
def connect_failed(self, error):
super().connect_failed(error)
logger.info("Connect failed with error {}".format(error))
def services_resolved(self):
super().services_resolved()
button_status_service = next(s for s in self.services
if s.uuid == '99c31523-dc4f-41b1-bb04-4e4deb81fadd')
self.button_status_characteristic = next(c for c in button_status_service.characteristics
if c.uuid == '99c31525-dc4f-41b1-bb04-4e4deb81fadd')
self.button_status_characteristic.enable_notifications()
battery_status_service = next(s for s in self.services
if s.uuid.startswith('0000180f'))
self.battery_status_characteristic = next(c for c in battery_status_service.characteristics
if c.uuid.startswith('00002a19'))
self.battery_status_characteristic.read_value()
self.sched.add_job(self.battery_status_characteristic.read_value,
trigger='interval', minutes=1) #todo: reduce this
def characteristic_enable_notifications_succeeded(self, characteristic):
super().characteristic_enable_notifications_succeeded(characteristic)
logger.info("Connected to {}!".format(self.name))
def characteristic_value_updated(self, characteristic, value):
super().characteristic_value_updated(characteristic, value)
if characteristic == self.battery_status_characteristic:
percentage = int(int.from_bytes(value, byteorder='big') * 100/ 255)
key = 'battery_{}'.format(percentage)
if self.button_actions.get(key, False) and key not in self.battery_notifications_sent:
self.battery_notifications_sent.append(key)
self.perform('battery', str(percentage))
logger.info('Battery status: {}%'.format(percentage))
return
if value == b'\xff\x00': #off
return
self.button_presses.append(value)
if not self.listening:
self.listening = True
time = datetime.datetime.now() + datetime.timedelta(seconds=1)
self.sched.add_job(self.deduplicate_buttons, trigger='date', run_date=time)
def deduplicate_buttons(self):
self.listening = False
actions = [self.button_codes[p] for p in self.button_presses]
# work out what to do
first_words = [s.split(' ')[0] for s in actions]
second_words = [s.split(' ')[1] for s in actions]
self.button_presses = []
if len(set(first_words)) != 1:
logger.info("Too many presses too quickly")
return
direction = first_words[0]
if 'Double' in second_words:
self.perform(direction, 'Double')
elif 'Hold' in second_words:
self.perform(direction, 'Hold')
else:
self.perform(direction, 'Press')
def perform(self, direction, action):
logger.info("Performing {} {}".format(direction, action))
action = self.button_actions.get("{}_{}".format(direction.lower(), action.lower()), {'type': 'none'})
if action['type'] == 'none':
return
elif action['type'] in self.controllers:
self.controllers[action['type']].perform(action)
else:
logger.info("No controller found for action {}".format(action['type']))
if __name__ == '__main__':
try:
with open('config.yml') as f:
config = yaml.load(f)
logger.info('Config loaded: {}'.format(config))
except Exception as e:
config = []
logger.info("Error loading config: {}".format(e))
for c in config:
controllers = {}
for t in set([b['type'] for _, b in c['buttons'].items()]):
logger.info("Found command of type {}, trying to load controller".format(t))
m = importlib.import_module('controllers.{}_controller'.format(t))
controller = [k for k in m.__dict__.keys() if 'Controller' in k][0]
controllers[t] = getattr(m, controller)()
device = TurnTouch(
mac_address=c['mac'],
manager=manager,
buttons=c['buttons'],
name=c['name'],
controllers=controllers
)
logger.info("Trying to connect to {} at {}...".format(c['name'], c['mac']))
device.connect()
manager.run()
| null |
monitor.py
|
monitor.py
|
py
| 5,877 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gatt.DeviceManager",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gatt.Device",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "apscheduler.schedulers.background.BackgroundScheduler",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 141,
"usage_type": "call"
}
] |
532264254
|
import torch
import os
import sys
import torch
from torchvision.transforms import functional as F
from torch.utils.data import DataLoader
sys.path.append('/u/bryao/work/Documents/deeplabv3/utils')
from PIL import Image
import cv2
import glob
from build_samplers import make_data_sampler, make_batch_data_sampler
import json
from tqdm import tqdm
import pdb
class A3DDataset(torch.utils.data.Dataset):
def __init__(self, root):
self.W = 640 #1280
self.H = 320 #720
self.mean = torch.tensor([0.485, 0.456, 0.406])
self.std = torch.tensor([0.229, 0.224, 0.225])
img_root = '/home/data/vision7/A3D_2.0/frames/'
self.samples = []
all_img_folders = glob.glob(os.path.join(img_root, '*'))
for img_folder in tqdm(all_img_folders):
video_name = img_folder.split('/')[-1]
all_files = sorted(glob.glob(os.path.join(img_folder, 'images', '*.jpg')))
for idx, file in enumerate(all_files):
self.samples.append((video_name,
idx,
file
))
def __getitem__(self, index):
video_name, idx, file_name = self.samples[index]
frame = Image.open(file_name)
# resize
frame = F.resize(frame, (self.H, self.W))
frame = F.to_tensor(frame)
# DeeplabV3, normalize
frame = F.normalize(frame, mean=self.mean, std=self.std)
return video_name, idx, frame
def __len__(self):
return len(self.samples)
def make_dataloader(root,
shuffle=False,
is_train=False,
distributed=False,
batch_per_gpu=1,
num_workers=0,
max_iters=10000):
dataset = A3DDataset(root)
sampler = make_data_sampler(dataset, shuffle=shuffle, distributed=distributed, is_train=is_train)
batch_sampler = make_batch_data_sampler(dataset,
sampler,
aspect_grouping=False,
batch_per_gpu=batch_per_gpu,
max_iters=max_iters,
start_iter=0,
dataset_name='A3D')
dataloader = DataLoader(dataset,
num_workers=num_workers,
batch_sampler=batch_sampler)
return dataloader
| null |
datasets/a3d.py
|
a3d.py
|
py
| 2,620 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.utils",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.functional.resize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.functional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.functional.to_tensor",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.functional",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.functional.normalize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.functional",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "build_samplers.make_data_sampler",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "build_samplers.make_batch_data_sampler",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 68,
"usage_type": "call"
}
] |
515472616
|
"""pandas, seaborn, sklearn utils
This is an independent util script.
source activate py2
"""
import re
from glasbey import Glasbey
import plotly.graph_objs as go
import plotly.io as plio
import plotly.express as px
import plotly
plotly.io.orca.config.executable = "/home/yli11/.conda/envs/py2/bin/orca"
import plotly.io as pio
pio.orca.config.use_xvfb = True
import datetime
import uuid
import matplotlib
import pandas as pd
matplotlib.use('agg')
import seaborn as sns
import numpy as np
import scipy
import glob
import sys
import matplotlib.pyplot as plt
import os
from joblib import Parallel, delayed
from os.path import isfile,isdir
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
import matplotlib
import numpy as np
import scipy
import glob
import sys
import matplotlib.pyplot as plt
import os
import numpy as np
import getpass
import argparse
from matplotlib_venn import venn3,venn2
sys.setrecursionlimit(99999)
import umap
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans,AgglomerativeClustering,DBSCAN
"""some notes
common parameters
-----------------
for plotting
------------
xlabel="",ylabel="",title=""
sns
---
clustermap, heatmap can be saved directly by sns
barplot savefig has to use plt.savefig(bbox_inches='tight')
plotly colors
https://plot.ly/python/plotly-express/
"""
def longestSubstringFinder22(string1, string2):
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
if (len(match) > len(answer)): answer = match
match = ""
def longestSubstringFinder(string1, string2):
answers = {}
s1 = re.split("\.|_|-",string1)
s2 = re.split("\.|_|-",string2)
flag = False
count = 0
for i in range(len(s1)):
try:
if not count in answers:
answers[count] = []
if s1[i] == s2[i]:
answers[count].append(s1[i])
else:
count += 1
except:
break
for k in answers:
answers[k] = "_".join(answers[k])
df = pd.DataFrame.from_dict(answers,orient="index")
# print (df)
df['len'] = [len(x.split("_")) for x in df[0]]
df = df.sort_values("len",ascending=False)
return [df[0].tolist()[0],df['len'].tolist()[0]]
def guess_label(names):
lines = []
for i in names:
for j in names:
if j == i:
continue
current = [i,j]
current+=longestSubstringFinder(i,j)
lines.append(current)
df = pd.DataFrame(lines)
# print (df)
df = df.sort_values(3,ascending=False)
df = df.drop_duplicates(0)
return df[3].tolist()
def common_substring_liyc(myList):
common_list =[]
for i in re.split('_|-|\.|,',myList[0]):
flag = True
for j in myList[1:]:
if not i in j:
flag = False
break
if flag:
common_list.append(i)
newList = []
for i in myList:
new_name = []
for j in re.split('_|-|\.|,',i):
if j.isnumeric():
continue
if j in common_list:
continue
if len(j) == 1:
continue
if len(j) <=3:
letter_flag = False
number_flag = False
for x in j:
if x.upper() in ['A',"B","C"]:
letter_flag = True
if x.isdigit():
number_flag = True
if number_flag and letter_flag:
continue
new_name.append(j)
newList.append("_".join(new_name))
return newList
from difflib import SequenceMatcher
def similar(a, b):
return 1-SequenceMatcher(None, a, b).ratio()
from sklearn.metrics import pairwise_distances
def stringList_to_distanceMatrix(myList):
my_iter = []
for i in myList:
for j in myList:
my_iter.append([i,j])
values = Parallel(n_jobs=-1,verbose=10)(delayed(similar)(x[0],x[1]) for x in my_iter)
df = pd.DataFrame(np.reshape(values, (len(myList), len(myList))))
df.index = myList
df.columns = myList
return df
def group_similar_names(myList,k):
myList = common_substring_liyc(myList)
print (myList)
X = stringList_to_distanceMatrix(myList)
# X = pairwise_distances(myList,myList,similar,-1)
print (X)
model = AgglomerativeClustering(n_clusters=k, affinity="precomputed",linkage="average")
model.fit(X)
df = pd.DataFrame()
df[0]=myList
df[1] = model.labels_
print (df)
myDict = {}
for s,d in df.groupby(1):
myList = d[0].tolist()
print (s,myList)
try:
common = common_substring_liyc_return_string(myList)
except:
common = myList[0]
myDict[s] = common
print (myDict)
df[1] = df[1].replace(myDict)
print (myDict)
print (df)
return df[1].tolist()
def common_substring_liyc_return_string(myList):
common_list =[]
for i in re.split('_|-|\.|,',myList[0]):
flag = True
for j in myList[1:]:
if not i in j:
flag = False
break
if flag:
common_list.append(i)
return "_".join(common_list)
def group_similar_names_dbscan(myList):
myList = common_substring_liyc(myList)
print (myList)
X = stringList_to_distanceMatrix(myList)
# X = pairwise_distances(myList,myList,similar,-1)
print (X)
model = DBSCAN( metric="precomputed",n_jobs=-1,min_samples=1,eps=0.09)
model.fit(X)
df = pd.DataFrame()
df[0]=myList
df[1] = model.labels_
print (df)
myDict = {}
for s,d in df.groupby(1):
myList = d[0].tolist()
print (s,myList)
try:
common = common_substring_liyc_return_string(myList)
except:
common = myList[0]
myDict[s] = common
print (myDict)
df[1] = df[1].replace(myDict)
print (df)
return df[1].tolist()
def general_df_reader(args):
if args.header:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0)
else:
df = pd.read_csv(args.input,sep=args.sep)
else:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0,header=None)
else:
df = pd.read_csv(args.input,sep=args.sep,header=None)
return df
def guess_sep(x):
with open(x) as f:
for line in f:
tmp1 = len(line.strip().split(","))
tmp2 = len(line.strip().split("\t"))
# print (tmp1,tmp2)
if tmp1 > tmp2:
return ","
if tmp2 > tmp1:
return "\t"
else:
print ("Can't determine the separator. Please input manually")
exit()
def read_csv_with_index(x):
df = pd.read_csv(x,index_col=0)
return df
def top_row_mean(df,n=50):
tmp = df.copy()
tmp['mean']=tmp.mean(axis=1)
tmp = tmp.sort_values('mean',ascending=False)
selected_list = tmp.index.tolist()[:n]
print ("the minimal mean value given top %s is %s"%(n,tmp.at[selected_list[-1],'mean']))
return df.loc[selected_list]
def row_mean_cutoff(df,c=5):
return df[df.mean(axis=1) >= c]
def row_mean_percent_col_cutoff(df,frac=0.1,c=5):
N = int(frac*df.shape[1])
mean_list = np.mean(np.partition(df.values, -N)[:, -N:], 1)
return df[mean_list >= c]
# def clustermap(df,output_name,xlabel="",ylabel="",title="",reIndexDict="",show_x=True,show_y=True,W="",H="",figure_type="png",method='average', metric='euclidean', z_score=None, standard_scale=None):
def clustermap(df,output_name,xlabel="",ylabel="",title="",reIndexDict="",show_x=True,show_y=True,W="",H="",figure_type="png",method='average', metric='euclidean', z_score=None, standard_scale=None):
"""sns clustermap with some options"""
df = df.copy()
return_file_name = "%s.%s"%(output_name,figure_type)
if not reIndexDict=="":
df.index = [reIndexDict[x] for x in df.index.tolist()]
if H=="":
H = int(df.shape[0]/4)
if H > 200:
H=200
if H < 2:
H = 2
if W=="":
W = int(df.shape[1]/4)
if W > 200:
W=200
if W < 2:
W=2
# print (df.head())
# df.to_csv("test.csv")
# print (df.isnull().any().any())
# print (show_x,show_y)
print ("%s size is %s * %s"%(return_file_name,W,H))
# for x in df.dtypes:
# if str(x) !="float64":
# print (x)
## I had one bug caused by that W is string type
size_limit = 1000
if df.shape[0] * df.shape[1] > size_limit*size_limit:
if df.shape[0] > size_limit:
print ("N row is %s. Taking %s random rows"%(df.shape[0],size_limit))
df = df.sample(n=size_limit)
if df.shape[1] > size_limit:
print ("N col is %s. Taking %s random cols"%(df.shape[1],size_limit))
df = df.sample(n=size_limit,axis=1)
g=sns.clustermap(df,xticklabels=show_x,yticklabels=show_y,figsize=(int(W),int(H)),method=method, metric=metric, z_score=z_score, standard_scale=standard_scale)
# for c in df.columns:
# df[c] = df[c].astype(np.float)
# plt.figure(figsize=(W,H))
# g=sns.clustermap(df,xticklabels=show_x,yticklabels=show_y)
# g=sns.clustermap(df)
ax = g.ax_heatmap
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
# plt.savefig(return_file_name, bbox_inches='tight')
g.savefig(return_file_name,dpi=300)
return return_file_name
def barplot_with_err(df,output_name,x,y,err,xlabel="",ylabel="",title="",figure_type="png",W="",H=""):
return_file_name = "%s.%s"%(output_name,figure_type)
## to do: get a sense of best W and H
plt.figure(figsize=(int(W),int(H)))
g=sns.barplot(x,y,data=df,yerr=df[err].tolist())
for item in g.get_xticklabels():
item.set_rotation(90)
g.set(xlabel=xlabel, ylabel=ylabel,title=title)
plt.savefig(return_file_name, bbox_inches='tight',dpi=300)
return return_file_name
def plotly_scatter(plot_df,is_discrete=True,colorscale='Viridis',showlegend=True,xlabel="",ylabel="",title="",figure_type="png",output="output",width=500,height=500,text=False):
"""
https://plot.ly/python/line-and-scatter/
maybe later try: go.Scattergl
https://community.plot.ly/t/what-colorscales-are-available-in-plotly-and-which-are-the-default/2079
This is the list of Plotly colorscales:
[‘Blackbody’,
‘Bluered’,
‘Blues’,
‘Earth’,
‘Electric’,
‘Greens’,
‘Greys’,
‘Hot’,
‘Jet’,
‘Picnic’,
‘Portland’,
‘Rainbow’,
‘RdBu’,
‘Reds’,
‘Viridis’,
‘YlGnBu’,
‘YlOrRd’]
"""
### discrete
n_unique = plot_df['color'].nunique()
color_set = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000']
if n_unique >20 and n_unique <= 50:
palette = [(228,26,28),(55,126,184),(77,175,74),(152,78,163),(255,127,0),(255,255,51),(166,86,40),(247,129,191),(153,153,153)]
gb = Glasbey(base_palette=palette)
p=gb.generate_palette(size=(n_unique-20))
color_set_tmp = gb.convert_palette_to_rgb(p)
color_set = color_set+["rgb%s"%(str(x)) for x in color_set_tmp]
if is_discrete:
# fig = go.Figure(data=go.Scatter(x=plot_df[0],
# y=plot_df[1],
# mode='markers',
# text=plot_df['text'],
# color=plot_df['color'],
# marker=dict(
# size=plot_df['size']
# )
# ))
if n_unique < 50:
plot_df['color'] = plot_df['color'].astype(str)
# print (plot_df)
# size_max = 20
try:
size_max = plot_df['size'].astype(int).max()
except:
size_max=20
if text:
try:
fig = px.scatter(plot_df,x="x",y='y',color='color',symbol='symbol',size='size',hover_data=['text'],text='text',color_discrete_sequence=color_set,template="plotly_white",size_max=size_max)
except:
fig = px.scatter(plot_df,x="x",y='y',text='text',color='color',size='size',hover_data=['text'],color_discrete_sequence=color_set,template="plotly_white",size_max=size_max)
else:
try:
fig = px.scatter(plot_df,x="x",y='y',color='color',symbol='symbol',size='size',hover_data=['text'],color_discrete_sequence=color_set,template="plotly_white",size_max=size_max)
except:
fig = px.scatter(plot_df,x="x",y='y',color='color',size='size',hover_data=['text'],color_discrete_sequence=color_set,template="plotly_white",size_max=size_max)
else:
# fig = go.Figure(data=go.Scatter(x=plot_df[0],
# y=plot_df[1],
# mode='markers',
# text=plot_df['text'],
# marker=dict(
# size=plot_df['size'],
# color=plot_df['color'],
# colorscale=colorscale
# )
# ))
print ("input is continous data")
fig = px.scatter(plot_df,x="x",y='y',color='color',size='size',hover_data=['text'],color_continuous_scale=px.colors.sequential.Rainbow,template="plotly_white",opacity=0.7)
# https://medium.com/@abel.rech66/introduction-to-plotly-express-ee7bc478f333
fig.update_layout(
title=go.layout.Title(
text=title
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text=xlabel
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text=ylabel
)
),
showlegend=showlegend
)
# fig.update_layout(
# title=go.layout.Title(
# text=title,
# xref="paper",
# x=0
# ),
# xaxis=go.layout.XAxis(
# title=go.layout.xaxis.Title(
# text=xlabel,
# font=dict(
# family="Courier New, monospace",
# size=18,
# color="#7f7f7f"
# )
# )
# ),
# yaxis=go.layout.YAxis(
# title=go.layout.yaxis.Title(
# text=ylabel,
# font=dict(
# family="Courier New, monospace",
# size=18,
# color="#7f7f7f"
# )
# )
# ),
# showlegend=showlegend
# )
fig.write_html('%s.html'%(output), include_plotlyjs=True,auto_open=False)
# fig.to_html('%s.html'%(output))
# fig.write_image('%s.%s'%(output,figure_type), format=figure_type, width=width, height=height)
| null |
utils/liyc_utils.py
|
liyc_utils.py
|
py
| 13,885 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "plotly.io",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "plotly.io.orca",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.use",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.setrecursionlimit",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "difflib.SequenceMatcher",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.AgglomerativeClustering",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.DBSCAN",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.partition",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "seaborn.clustermap",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "seaborn.barplot",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "glasbey.Glasbey",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "plotly.express.scatter",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 425,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 428,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "plotly.express.colors",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs.layout.Title",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.layout",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.layout.XAxis",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.layout",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.layout.xaxis.Title",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.layout",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.layout.YAxis",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.layout",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.layout.yaxis.Title",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.layout",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs",
"line_number": 457,
"usage_type": "name"
}
] |
579220899
|
"""Profile class"""
import re
from bs4 import BeautifulSoup
from rival_regions_wrapper import functions
class Overview():
"""Wrapper class for perks"""
def __init__(self, api_wrapper):
self.api_wrapper = api_wrapper
def info(self):
"""Get perks"""
path = 'main/content'
response = self.api_wrapper.get(path)
soup = BeautifulSoup(response, 'html.parser')
perks = soup.select('.perk_source_4')
upgrade_perk = None
upgrade_date = None
for perk in perks:
date_string = perk.select_one('.small')
if date_string:
upgrade_perk = int(perk['perk'])
date_string = re.sub(r'^.*:\s', '', soup.select_one('.perk_source_4 .small').text)
upgrade_date = functions.parse_date(date_string)
break
auto_war = soup.select_one('.war_index_war span.pointer:nth-child(4)')
if auto_war and auto_war.has_attr('action'):
auto_war = auto_war['action'].replace('war/details/', '')
else:
auto_war = None
overview = {
'perks': {
'strenght': int(soup.find('div', {'perk': 1, 'class': 'perk_source_2'}).text),
'education': int(soup.find('div', {'perk': 2, 'class': 'perk_source_2'}).text),
'endurance': int(soup.find('div', {'perk': 3, 'class': 'perk_source_2'}).text),
'upgrade_date': upgrade_date,
'upgrade_perk': upgrade_perk
},
'war': {
'auto_war': auto_war,
}
}
return overview
def status(self):
"""Get current status"""
path = 'main'
response = self.api_wrapper.get(path)
soup = BeautifulSoup(response, 'html.parser')
profile_url = soup.select_one('#header_my_avatar')['action']
party_url = soup.select_one('#party_menu_members')['action']
stats = {
'profile_id': int(profile_url.replace('slide/profile/', '')),
'party_id': int(party_url.replace('listed/party/', '')),
'gold': int(soup.select_one('#g').text.replace('.', '')),
'money': int(soup.select_one('#m').text.replace('.', '')),
'level': int(soup.select_one('#exp_level').text),
'exp': int(soup.select_one('#exp_points').text),
}
return stats
| null |
src/rival_regions_wrapper/api_wrapper/overview.py
|
overview.py
|
py
| 2,416 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rival_regions_wrapper.functions.parse_date",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rival_regions_wrapper.functions",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 53,
"usage_type": "call"
}
] |
369864178
|
import os
from bs4 import BeautifulSoup as beautifulsoup
import random
import urllib.request as urllib_request
import sys
import lxml
import winter_cat_url_control.url_put as put
import winter_cat_fake_user_box.fake_headers as fake_headers
def get_search_link(url):
if url is None:
headers = {'User-Agent': fake_headers.get_fake_headers()}
search_request = urllib_request.Request(url=put.get_url(), headers=headers)
search_page_data = urllib_request.urlopen(search_request).read().decode('utf8')
beautiful_soup_research = beautifulsoup(search_page_data, 'lxml')
return beautiful_soup_research
else:
headers = {'User-Agent': fake_headers.get_fake_headers()}
search_request = urllib_request.Request(url=url, headers=headers)
search_page_data = urllib_request.urlopen(search_request).read().decode('utf8')
beautiful_soup_research = beautifulsoup(search_page_data, 'lxml')
return beautiful_soup_research
def get_title_info():
data = get_search_link(url='')
title_name = data.find('title').text
meta_description = data.find(attrs={"name": "description"})['content']
info = [{'web title:', title_name}, {'web meta:', meta_description}]
return info
def get_all_link():
link_collection =[]
data = get_search_link(url='')
link = data.find_all('a')
for i in link:
link_collection.append(i.get('href'))
return link_collection
for i in get_all_link():
get_search_link(url=i)
| null |
winter_cat_research/link_research.py
|
link_research.py
|
py
| 1,509 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "winter_cat_fake_user_box.fake_headers.get_fake_headers",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "winter_cat_fake_user_box.fake_headers",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "urllib.request.Request",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "winter_cat_url_control.url_put.get_url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "winter_cat_url_control.url_put",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "winter_cat_fake_user_box.fake_headers.get_fake_headers",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "winter_cat_fake_user_box.fake_headers",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "urllib.request.Request",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
}
] |
107816243
|
"""Запуск запуск сервера и клиента MongoDB и соединения с интернетом."""
import atexit
import functools
import logging
import subprocess
from typing import Tuple
import psutil
import pymongo
import requests
from requests import adapters
from poptimizer import config
# Максимальный пул соединений по HTTPS и повторных загрузок
HTTPS_MAX_POOL_SIZE = 20
MAX_RETRIES = 3
# База данных и коллекция по умолчанию
DB = "data"
MISC = "misc"
# Ключ в базе, где хранится количество данных по дивидендам
DIV_COUNT = "div_count"
# База и коллекция с источником данных по дивидендам
SOURCE_DB = "source"
COLLECTION = "dividends"
# Ссылки на данные по дивидендам в интернете
DIV_DATA_URL = {
"dividends.bson": (
"https://github.com/WLM1ke/poptimizer/blob/master/dump/source/dividends.bson?raw=true"
),
"dividends.metadata.json": (
"https://github.com/WLM1ke/poptimizer/blob/master/dump/source/dividends.metadata.json?raw=true"
),
}
def start_mongo_server() -> psutil.Process:
"""Запуск сервера MongoDB."""
for process in psutil.process_iter(attrs=["name"]):
if "mongod" in process.info["name"]:
# logging.info("Локальный сервер MongoDB уже работает")
return process
# logging.info("Запускается локальный сервер MongoDB")
config.MONGO_PATH.mkdir(parents=True, exist_ok=True)
mongo_server = [
"mongod",
"--dbpath",
config.MONGO_PATH,
"--directoryperdb",
"--bind_ip",
"localhost",
]
return psutil.Popen(mongo_server, stdout=subprocess.DEVNULL)
def restore_dump(client: pymongo.MongoClient, http_session: requests.Session) -> None:
"""Осуществляет восстановление данных по дивидендам."""
if not config.MONGO_DUMP.exists():
logging.info(f"Файлы с данными о дивидендах отсутствуют - начинается загрузка")
path = config.MONGO_DUMP / SOURCE_DB
path.mkdir(parents=True)
for name, url in DIV_DATA_URL.items():
with http_session.get(url, stream=True) as respond:
with open(path / name, "wb") as fin:
fin.write(respond.content)
logging.info(f"Файлы с данными о дивидендах загружены")
if SOURCE_DB not in client.list_database_names():
logging.info(f"Начато восстановление данных с дивидендами")
mongo_restore = ["mongorestore", config.MONGO_DUMP]
process = psutil.Popen(mongo_restore)
status = process.wait()
logging.info(
f"Восстановление данных с дивидендами завершен со статусом {status}"
)
def start_mongo_client(http_session: requests.Session) -> pymongo.MongoClient:
"""Открытие клиентского соединения с MongoDB."""
# logging.info("Создается клиент MongoDB")
client = pymongo.MongoClient("localhost", 27017, tz_aware=False)
restore_dump(client, http_session)
return client
def start_http_session() -> requests.Session:
"""Открытие клиентского соединение с интернетом."""
# logging.info("Открывается сессия для обновления данных по интернет")
session = requests.Session()
adapter = adapters.HTTPAdapter(
pool_maxsize=HTTPS_MAX_POOL_SIZE, max_retries=MAX_RETRIES, pool_block=True
)
session.mount("https://", adapter)
return session
def dump_dividends_db(client: pymongo.MongoClient) -> None:
"""Осуществляет резервное копирование базы данных с дивидендами."""
n_docs = client[SOURCE_DB][COLLECTION].count_documents({})
div_count = client[DB][MISC].find_one({"_id": DIV_COUNT})
if div_count is None or n_docs != div_count["data"]:
logging.info(f"Backup данных с дивидендами {n_docs} документов")
mongo_dump = ["mongodump", "--out", config.MONGO_DUMP, "--db", SOURCE_DB]
process = psutil.Popen(mongo_dump)
status = process.wait()
client[DB][MISC].replace_one({"_id": DIV_COUNT}, {"data": n_docs}, upsert=True)
logging.info(f"Backup данных с дивидендами завершен со статусом {status}")
def clean_up(client: pymongo.MongoClient, http_session: requests.Session) -> None:
"""Отключение клиента и закрытие соединений."""
dump_dividends_db(client)
client.close()
# logging.info("Подключение клиента MongoDB закрыто")
http_session.close()
# logging.info("Сессия для обновления данных по интернет закрыта")
def start_and_setup_clean_up() -> Tuple[
psutil.Process, pymongo.MongoClient, requests.Session
]:
"""Запуск сервера и клиента MongoDB и соединения с интернетом.
Регистрация процедуры отключения клиента и закрытия соединения.
Сервер не отключается.
"""
server = start_mongo_server()
http_session = start_http_session()
client = start_mongo_client(http_session)
atexit.register(
functools.partial(clean_up, client=client, http_session=http_session)
)
return server, client, http_session
MONGO_PROCESS, MONGO_CLIENT, HTTP_SESSION = start_and_setup_clean_up()
| null |
poptimizer/store/mongo.py
|
mongo.py
|
py
| 5,958 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "psutil.process_iter",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_PATH.mkdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_PATH",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "poptimizer.config.MONGO_PATH",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "psutil.Popen",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "psutil.Process",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config.MONGO_DUMP.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_DUMP",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_DUMP",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_DUMP",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "psutil.Popen",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "requests.adapters.HTTPAdapter",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "requests.adapters",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "requests.Session",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "poptimizer.config.MONGO_DUMP",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "poptimizer.config",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "psutil.Popen",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "atexit.register",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "psutil.Process",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 125,
"usage_type": "attribute"
}
] |
199498559
|
import sqlite3
from selenium import webdriver
import time
import random
import unicodedata
import re
from pytube import YouTube
#МОдуль для получения ссылок на видео
def save_link_in_db_from_channel(chan_for_download, number_of_scrolling):
# создание пустой базы данных
# number_of_scrolling = 10
# print(number_of_scrolling)
chan_for_download_list = [chan_for_download,]
options = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome('chromedriver.exe', options=options)
conn = sqlite3.connect('bazasearch_download.db')
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS vidos(
vidid INT,
name TEXT,
descr TEXT,
prosm INT,
pub TEXT,
link TEXT PRIMARY KEY);
""")
conn.commit()
# links = ['https://www.youtube.com/c/NickChernobaev/videos',]
# функция сохранени ссылок в базу данных
def get_links(link_list, number_of_scrolling):
for link in link_list:
driver.get(link)
time.sleep(1)
len_scroll = 3000
for i in range(1, number_of_scrolling):
driver.execute_script("window.scrollBy(0,{})".format(len_scroll))
len_scroll += 6000
time.sleep(1)
print('прокрутка')
for i in driver.find_elements_by_id('video-title'):
vid_link = str(i.get_attribute('href'))
vid_description = str(i.get_attribute('aria-label'))
print(vid_link + ' ' + vid_description)
try:
author_date = str(vid_description.split('Автор:', 1)[1]).split(' ', 1)[1].rstrip()
except:
author_date = "author_date ошибка "
print("author_date ошибка" + str(vid_link))
stro = unicodedata.normalize('NFKD', author_date)
prosm_text = str(re.findall(r"\w{0}\s{0}\d+\s*\d*\s*\d* просм", stro))
prosm_int = re.findall(r'\d+', prosm_text)
try:
prosm_int = int(''.join(prosm_int))
except:
prosm_int = 0
print('prosm_int исключение' + str(vid_link))
vids = ('1', author_date, vid_description, prosm_int, '0', vid_link)
print(vids)
try:
cur.execute("INSERT INTO vidos VALUES(?, ?, ?, ?, ?, ?);", vids)
conn.commit()
except sqlite3.IntegrityError as err:
print(str(err) + 'в ссылке: ' + link)
driver.close()
# запускаем функцию сохраняем ссылки в базу данных
get_links(chan_for_download_list, number_of_scrolling)
def download_videos_from_db(nums):
# Скачивание по ссылкамм из базы, нужно сделать в виде функции и очистка базы в конце.
conn = sqlite3.connect('bazasearch_download.db')
cur = conn.cursor()
cur.execute("""SELECT link FROM vidos""")
print(cur.execute("""SELECT link FROM vidos ORDER BY pub"""))
# vid_links = cur.fetchall()
vid_links = cur.fetchmany(nums)
# print(vid_links)
# for i in vid_links:
# print(i)
# return 'просто текст результат функциии channel_download_module'
for i in vid_links:
yt = YouTube(i[0])
yt.streams.get_by_itag(18).download()
print(i[0])
# нужно добавить очистку базы данных
return 'просто текст результат функциии channel_download_module'
# скачивание видео по отдельным ссылкам.
def download_from_links(vid_links):
vid_links = ['https://www.youtube.com/watch?v=oPr7555NIevI']
for i in vid_links:
yt = YouTube(i)
yt.streams.get_by_itag(18).download()
print(i)
# save_link_in_db_from_channel('https://www.youtube.com/c/DjangoSchool/videos', 3)
# download_videos_from_db(15)
| null |
download_videos_from_channels.py
|
download_videos_from_channels.py
|
py
| 4,332 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "unicodedata.normalize",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sqlite3.IntegrityError",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 105,
"usage_type": "call"
}
] |
339161831
|
import cv2
import numpy as np
import logging
import math
import datetime
import threading
import _thread
import time
import serial
import traceback # TODO remove
import pyrealsense2 as rs
import sys
from cam import setupstream
from cam import getframes
_SHOW_IMAGE = True
# CONSTANTS
yellow_goal_points = (310, 300, 290) #TODO find empirically
blue_goal_points = (10, 20, 30)
#Threshold: yellow
thresh_yellow_low = (20, 30, 161)
thresh_yellow_high = (47, 255, 255)
#Thresholds: blue
thresh_blue_low = (96, 30, 147)
thresh_blue_high = (145, 246, 239)
#Thresholds: Purple (obstacle)
thresh_purple_low = (0, 0, 0)
thresh_purple_high = (180, 255, 255)
obstacle_area_min = 400
obstacle_area_max = 2500
obstacle_perimeter_min = 80
obstacle_perimeter_max = 200
obstacle_ratio = 0.8 # Never bigger than 1 (dont need max)
obstacle_rect_fill = 0.8
# Image size
width = 320
height = 180
# Zoning (constants to split into top/middle/bottom sections)
top_mask = height*1/6
section_half_height = (height - top_mask)*1/8
section_overlap = section_half_height * 1/4
border_top = top_mask
top_goal = top_mask + section_half_height
border_middle_top = top_mask + section_half_height*2
middle_goal = top_mask + section_half_height*3
border_middle_bottom = top_mask + section_half_height*4
bottom_goal = top_mask + section_half_height*5
border_bottom = top_mask + section_half_height*6
class FakeArduino:
def __init__(self):
self.speed = 80
self.send_speed = True
self.angle = 90
self.send_angle = True
def update_speed(self, speed):
if speed > 180:
speed = 180
elif speed < 0:
speed = 0
self.speed = int(speed)
self.send_speed = True
def update_angle(self, angle):
if angle > 180:
angle = 180
elif angle < 0:
angle = 0
angle = 180 - angle
self.angle = int(angle)
def get_speed(self):
return self.speed
def get_angle(self):
return self.angle
def run(self):
while True:
time.sleep(0.04)
if self.send_speed:
# print(f"M{self.speed:03d}")
self.send_speed = False
time.sleep(0.04)
# print(f"S{self.angle:03d}")
time.sleep(0.04)
class Arduino:
def __init__(self):
self.connection = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
self.speed = 80
self.send_speed = True
self.angle = 90
self.send_angle = True
def update_speed(self, speed):
if speed > 180:
speed = 180
elif speed < 0:
speed = 0
self.speed = int(speed)
self.send_speed = True
def update_angle(self, angle):
if angle > 180:
angle = 180
elif angle < 0:
angle = 0
angle = 180 - angle
self.angle = int(angle)
def get_speed(self):
return self.speed
def get_angle(self):
return self.angle
def run(self):
while True:
self.connection.write(b"D000")
time.sleep(0.04)
if self.send_speed:
self.connection.write(f"M{self.speed:03d}".encode())
self.send_speed = False
time.sleep(0.04)
self.connection.write(f"S{self.angle:03d}".encode())
time.sleep(0.04)
class Stopper:
def __init__(self, arduino):
self.arduino = arduino
def run(self):
while True:
self.arduino.update_speed(90)
time.sleep(4)
input("Press enter to start again:")
self.arduino.update_speed(80)
class Camera:
def __init__(self):
self.pipeline = rs.pipeline()
self.config = rs.config()
self.config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
30) # enable_stream(source, width, height, format, fps)
self.config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8,
30) # Intel resources say 1280 & 720 is best for the depth calculations, then you want to downsize it later)
self.pipeline.start(self.config)
self.color_out = None
self.depth_out = None
self.run = True
def take_pics(self):
while self.run:
# Currently, just runs the color as we're not (yet) using depth
frames = self.pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
# depth_frame = frames.get_depth_frame()
"""if not color_frame or not depth_frame:
pass
else:
self.color_out = color_frame
self.depth_out = depth_frame"""
# Temp color-only code:
if color_frame is not None:
self.color_out = color_frame.get_data()
self.timestamp = time.time()
def stop(self):
self.run = False
self.pipeline.stop()
def get_color_frame(self):
return self.color_out
def get_depth_frame(self):
return self.depth_out
def get_timestamp(self):
return self.timestamp
class HandCodedLaneFollower(object):
def __init__(self, car=None):
logging.info('Creating a HandCodedLaneFollower...')
self.car = car
self.curr_steering_angle = 90
self.arduino = FakeArduino()
self.arduino_thread = threading.Thread(target=self.arduino.run)
self.arduino_thread.start()
#self.stopper = Stopper(self.arduino)
#self.stopper_thread = threading.Thread(target=self.stopper.run)
# self.stopper_thread.start()
def follow_lane(self, frame):
# Main entry point of the lane follower
#show_image("orig", frame)
frame = cv2.resize(frame, (width, height),
interpolation=cv2.INTER_NEAREST)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
yellow_points, blue_points, lane_lines_image = detect_lane(frame)
mid_points = calculate_midpoints(yellow_points, blue_points)
"""obstacle = detect_obstacle(frame)
if obstacle is not None:
_, _, rect = obstacle
x_values = [x[0] for x in rect[0]]
y_values = [x[0] for x in rect[0]]
left = min(x_values)
right = max(x_values)
y_value = min(y_values)
section = None
onLeft = False
if border_top < y_value < border_middle_top:
section = 2
elif border_middle_top < y_value < border_middle_bottom:
section = 1
elif border_middle_bottom < y_value < border_bottom:
section = 0
if section is not None:
#Find out which side of the midpoint the box is on
if midpoints[section] is not None:
if (left+right)/2 <= midpoints[section]:
onLeft = True
else:
onLeft = False
else:
#Compare against average midpoint value
no_none_midpoints = [x for x in midpoints if x is not None]
if len(no_none_midpoints) != 0:
midpoint = sum(no_none_midpoints)/len(no_none_midpoints)
if (left+right)/2 <= midpoint:
onLeft = True
else:
onLeft = False
else:
#Just go to the right
onLeft = False
#TODO actually go to the left / right
"""
# Draw mid points
mid_points_draw = (
(mid_points[0], bottom_goal), (mid_points[1], middle_goal), (mid_points[2], top_goal))
mid_points_draw = [
point for point in mid_points_draw if point[0] is not None]
lane_lines_image = display_points(
lane_lines_image, mid_points_draw, point_color=(127, 0, 255))
show_image("Mid points", lane_lines_image)
# Do steering stuff
#final_frame = self.steer(frame, lane_lines)
points = [x for x in ((mid_points[0], bottom_goal),
(mid_points[1], middle_goal), (mid_points[2], top_goal))]
angle = calculate_angle(points)
speed = calculate_speed(points, angle)
self.arduino.update_angle(angle)
self.arduino.update_speed(speed)
def calculate_midpoints(yellow_points, blue_points):
# points = (bottom, middle, top) x values
mid_points = [None, None, None]
found_midpoints = 0
for counter in range(3):
if yellow_points[counter] is not None and blue_points[counter] is not None:
mid_points[counter] = (yellow_points[counter] + blue_points[counter])/2
found_midpoints += 1
if found_midpoints == 0: # Just go off estimates
for counter in range(3):
if yellow_points[counter] is not None:
mid_points[counter] = width/2 + yellow_points[counter] - yellow_goal_points[counter]
elif blue_points[counter] is not None:
mid_points[counter] = width/2 + blue_points[counter] - blue_goal_points[counter]
elif found_midpoints == 1: # Use offset from the single midpoint
# Find which one has the midpoint
midpoint = None
for point in range(3):
if mid_points[point] is not None:
midpoint = point
for counter in range(3):
if mid_points[counter] is None:
if yellow_points[counter] is not None:
mid_points[counter] = mid_points[midpoint] + (yellow_goal_points[midpoint] - yellow_goal_points[counter]) - (yellow_points[midpoint] - yellow_points[counter])
elif blue_points[counter] is not None:
mid_points[counter] = mid_points[midpoint] + (blue_goal_points[midpoint] - blue_goal_points[counter]) - (blue_points[midpoint] - blue_points[counter])
elif found_midpoints == 2:
first_midpoint = None
second_midpoint = None
for point in range(3):
if mid_points[point] is not None:
if first_midpoint is None:
first_midpoint = point
else:
second_midpoint = point
#Find the expected gradient of the line
try:
m = (second_midpoint-first_midpoint)/(mid_points[second_midpoint] - mid_points[first_midpoint])
except ZeroDivisionError:
m = 1024
b = first_midpoint - m*mid_points[first_midpoint]
for counter in range(3):
if mid_points[counter] is None:
if yellow_points[counter] is not None:
mid_points[counter] = (1/m)*(counter-b)
return mid_points
def calculate_angle(midpoints):
# midpoints is a list of midpoints :)
# returns the value that should be sent to arduino
# if no midpoints found don't turn?? reconsider this later
if len(midpoints) == 0:
return 90
kp = 90 / 70
kd = 0
# one midpoint == only proportional
if len(midpoints) == 1:
x, y = midpoints[0]
# theta = x from centre to midpoint / y from centre to midpoint
theta = (x - width/2) / (height - y)
theta += 90
# -20 acts as a stabiliser
return kp * (theta - 20)
# more than one midpoint == proportional and differential
if len(midpoints) == 2:
x1, y1 = midpoints[0]
x2, y2 = midpoints[1]
theta1 = (x1 - width/2) / (height - y1)
theta1 += 90
theta2 = (x2 - width/2) / (height - y2)
theta2 += 90
change_in_theta = (theta2 - theta1) / (y1 - y2)
return kp * (theta1 - 20) + kd * (change_in_theta - 20 / height)
if len(midpoints) == 3:
x1, y1 = midpoints[0]
x2, y2 = midpoints[1]
x3, y3 = midpoints[2]
theta1 = (x1 - width/2) / (height - y1)
theta1 += 90
theta2 = (x2 - width/2) / (height - y2)
theta2 += 90
theta3 = (x3 - width/2) / (height - y3)
theta3 += 90
change_in_theta1 = (theta2 - theta1) / (y1 - y2)
change_in_theta2 = (theta3 - theta2) / (y2 - y3)
change_in_theta_average = (change_in_theta1 + change_in_theta2) / 2
# return kp * (theta1 - 20) + kd * (change_in_theta1 - 20 / height) + kd * (change_in_theta2 - 20 / height)
return kp * (theta1 - 20) + kd * (change_in_theta_average - 20 / height)
# if something wrong tho don't steer
return 90
def calculate_speed(midpoints, steer):
# if no midpoints found don't move?? reconsider this later
if len(midpoints) == 0:
return 90
kp = -0.333
min_speed = 80
max_speed_proportional = 50
max_speed_integration = 40
ki = 0
proportional_speed = min_speed + kp * steer
# one midpooint == only proportional
if len(midpoints) == 1:
return proportional_speed
# more than one midpoint == proportional and integration
if len(midpoints) == 2:
x1, y1 = midpoints[0]
x2, y2 = midpoints[1]
change_in_y = y1 - y2
ki = max_speed_integration / (max_speed_proportional * change_in_y)
return ki * change_in_y * proportional_speed
if len(midpoints) == 3:
x1, y1 = midpoints[0]
x2, y2 = midpoints[1]
x3, y3 = midpoints[2]
change_in_y_1 = y1 - y2
change_in_y_2 = y3 - y2
change_in_y_average = (change_in_y_1 + change_in_y_2) / 2
# ki_1 = max_speed_integration / (max_speed_proportional * change_in_y1)
# ki_2 = max_speed_integration / (max_speed_proportional * change_in_y2)
ki = max_speed_integration / \
(max_speed_proportional * change_in_y_average)
# return ki_1 * change_in_y1 * proportional_speed + ki_2 * change_in_y2 * proportional_speed
return ki * change_in_y_average * proportional_speed
# if something wrong tho don't move
return 90
############################
# Frame processing steps
############################
def detect_lane(frame):
logging.debug('detecting lane lines...')
yellow_edges = detect_edges(frame, thresh_yellow_low, thresh_yellow_high)
blue_edges = detect_edges(frame, thresh_blue_low, thresh_blue_high)
#show_image('yellow edges', yellow_edges)
#show_image('blue edges', blue_edges)
# Crop out top of image
crop_polygon = np.array([[
(0, top_mask),
(width, top_mask),
(width, height),
(0, height)
]], np.int32)
yellow_cropped = region_of_interest(yellow_edges, crop_polygon)
blue_cropped = region_of_interest(blue_edges, crop_polygon)
show_image('yellow edges', yellow_cropped)
show_image('blue edges', blue_cropped)
yellow_line_segments = detect_line_segments(yellow_cropped)
blue_line_segments = detect_line_segments(blue_cropped)
if yellow_line_segments is None:
yellow_line_segments = []
# else: #TODO REMOVE
# yellow_line_segments = [yellow_line_segments[0]] #TODO REMOVE
if blue_line_segments is None:
blue_line_segments = []
# else: #TODO REMOVE
# blue_line_segments = [blue_line_segments[0]] #TODO REMOVE
line_segment_image_yellow = display_lines(frame, yellow_line_segments)
show_image("yellow line segments", line_segment_image_yellow)
line_segment_image_blue = display_lines(frame, blue_line_segments)
show_image("blue line segments", line_segment_image_blue)
# Split lines into three segments:
yellow_bottom, yellow_mid, yellow_top = split_lines(
yellow_line_segments, height)
blue_bottom, blue_mid, blue_top = split_lines(blue_line_segments, height)
frame = display_lines(frame, (((0, border_top, width, border_top),), ((0, top_goal, width, top_goal),), ((0, border_middle_top, width, border_middle_top),), ((0, middle_goal, width, middle_goal),), ((
0, border_middle_bottom, width, border_middle_bottom),), ((0, bottom_goal, width, bottom_goal),), ((0, border_bottom, width, border_bottom),)), line_color=(255, 255, 255), line_width=1)
blue_top_image = display_lines(frame, blue_top)
show_image("blue line top segments", blue_top_image)
blue_mid_image = display_lines(frame, blue_mid)
show_image("blue mid segments", blue_mid_image)
blue_bottom_image = display_lines(frame, blue_bottom)
show_image("blue bottom segments", blue_bottom_image)
yellow_top_image = display_lines(frame, yellow_top)
show_image("yellow line top segments", yellow_top_image)
yellow_mid_image = display_lines(frame, yellow_mid)
show_image("yellow mid segments", yellow_mid_image)
yellow_bottom_image = display_lines(frame, yellow_bottom)
show_image("yellow bottom segments", yellow_bottom_image)
yellow_bottom_line = section_average_slope_intercept(
yellow_bottom, bottom_goal) # returns (gradient, intercept)
yellow_mid_line = section_average_slope_intercept(yellow_mid, middle_goal)
yellow_top_line = section_average_slope_intercept(yellow_top, top_goal)
blue_bottom_line = section_average_slope_intercept(
blue_bottom, bottom_goal) # returns (gradient, intercept)
blue_mid_line = section_average_slope_intercept(blue_mid, middle_goal)
blue_top_line = section_average_slope_intercept(blue_top, top_goal)
"""yellow_bottom_line = average_slope_intercept(frame, yellow_bottom) #returns (gradient, intercept)
yellow_mid_line = average_slope_intercept(frame, yellow_mid)
yellow_top_line = average_slope_intercept(frame, yellow_top)
blue_bottom_line = average_slope_intercept(frame, blue_bottom) #returns (gradient, intercept)
blue_mid_line = average_slope_intercept(frame, blue_mid)
blue_top_line = average_slope_intercept(frame, blue_top)"""
# Subbing in for x (y=mx+b style)
if yellow_bottom_line is not None:
yellow_bottom_point = (
1/yellow_bottom_line[0])*(bottom_goal - yellow_bottom_line[1])
else:
yellow_bottom_point = None
if yellow_mid_line is not None:
yellow_mid_point = (
1/yellow_mid_line[0])*(middle_goal - yellow_mid_line[1])
else:
yellow_mid_point = None
if yellow_top_line is not None:
yellow_top_point = (
1/yellow_top_line[0])*(top_goal - yellow_top_line[1])
else:
yellow_top_point = None
if blue_bottom_line is not None:
blue_bottom_point = (
1/blue_bottom_line[0])*(bottom_goal - blue_bottom_line[1])
else:
blue_bottom_point = None # TODO replace 0 with None, do properly
if blue_mid_line is not None:
blue_mid_point = (1/blue_mid_line[0])*(middle_goal - blue_mid_line[1])
else:
blue_mid_point = None
if blue_top_line is not None:
blue_top_point = (1/blue_top_line[0])*(top_goal - blue_top_line[1])
else:
blue_top_point = None
yellow_points = (yellow_bottom_point, yellow_mid_point, yellow_top_point)
blue_points = (blue_bottom_point, blue_mid_point, blue_top_point)
for point in yellow_points:
if point is not None:
if not (0 < point < width*5/4):
point = None
for point in blue_points:
if point is not None:
if not (width*-1/4 < point < width):
point = None
# Display stuff
lane_lines_image = np.copy(frame)
if yellow_bottom_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_bottom, yellow_bottom_line[0], yellow_bottom_line[1]), border_bottom, calculate_x_from_y(
border_middle_bottom, yellow_bottom_line[0], yellow_bottom_line[1]), border_middle_bottom),),), line_color=(204, 102, 0))
if yellow_mid_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_middle_bottom, yellow_mid_line[0], yellow_mid_line[1]), border_middle_bottom, calculate_x_from_y(
border_middle_top, yellow_mid_line[0], yellow_mid_line[1]), border_middle_top),),), line_color=(255, 153, 51))
if yellow_top_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_middle_top, yellow_top_line[0], yellow_top_line[1]), border_middle_top, calculate_x_from_y(
border_top, yellow_top_line[0], yellow_top_line[1]), border_top),),), line_color=(255, 204, 153))
if blue_bottom_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_bottom, blue_bottom_line[0], blue_bottom_line[1]), border_bottom, calculate_x_from_y(
border_middle_bottom, blue_bottom_line[0], blue_bottom_line[1]), border_middle_bottom),),), line_color=(0, 153, 153))
if blue_mid_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_middle_bottom, blue_mid_line[0], blue_mid_line[1]), border_middle_bottom, calculate_x_from_y(
border_middle_top, blue_mid_line[0], blue_mid_line[1]), border_middle_top),),), line_color=(0, 255, 255))
if blue_top_line is not None:
lane_lines_image = display_lines(lane_lines_image, (((calculate_x_from_y(border_middle_top, blue_top_line[0], blue_top_line[1]), border_middle_top, calculate_x_from_y(
border_top, blue_top_line[0], blue_top_line[1]), border_top),),), line_color=(153, 255, 255))
line_points = ((yellow_bottom_point, bottom_goal), (yellow_mid_point, middle_goal), (yellow_top_point,
top_goal), (blue_bottom_point, bottom_goal), (blue_mid_point, middle_goal), (blue_top_point, top_goal))
line_points = [point for point in line_points if point[0] is not None]
lane_lines_image = display_points(lane_lines_image, line_points)
show_image("lane lines", lane_lines_image)
return (yellow_bottom_point, yellow_mid_point, yellow_top_point), (blue_bottom_point, blue_mid_point, blue_top_point), lane_lines_image
def calculate_x_from_y(y, gradient, intercept):
return (1/gradient)*(y-intercept)
def split_lines(lines, height):
bottom = []
middle = []
top = []
for line in lines:
x1, y1, x2, y2 = line[0]
# Make y2 always bottom (higher value) than y1
if y2 > y1:
temp = y1
y1 = y2
y2 = temp
temp = x1
x1 = x2
x2 = temp
# Bottom
if ((border_middle_bottom < y2 and border_bottom > y1) or # Both ends inside bottom zone
# Lower end far inside bottom zone
(bottom_goal < y1 < border_bottom + section_overlap) or
(border_middle_bottom < y1 < border_bottom and border_middle_bottom - section_overlap < y2)): # end in bottom zone, top not far off
bottom.append(line)
# Middle
if ((border_middle_top < y2 and border_middle_bottom < y1) or # Both ends in middle zone
# Bottom end far in middle zone
(middle_goal < y1 < border_middle_bottom) or
# Top end far in middle zone
(border_middle_top < y2 < middle_goal) or
# Bottom end in middle zone, and top is very close to middle zone
(border_middle_top < y1 < border_middle_bottom and border_middle_top - section_overlap < y2) or
(border_middle_top < y2 < border_middle_bottom * 5/6 and y1 < border_middle_top + section_overlap)): # Top end in middle zone, and bottom is very close to middle zone
middle.append(line)
# Top
if ((border_top < y1 < border_middle_top) or # Both ends inside top zone
(border_top < y2 < top_goal) or # Top end far inside top zone
(border_top < y2 < border_middle_top and border_middle_top + section_overlap < y1)): # Top inside zone, bottom near zone
top.append(line)
return bottom, middle, top
def detect_edges(frame, low_thresh, high_thresh):
# filter for lane lines
#show_image("hsv", hsv)
mask = cv2.inRange(frame, low_thresh, high_thresh)
#show_image("filtered colour", mask)
# detect edges
#edges = cv2.Canny(mask, 200, 400)
return mask
def region_of_interest(canny, polygon):
height, width = canny.shape
mask = np.zeros_like(canny)
cv2.fillPoly(mask, polygon, 255)
show_image("mask", mask)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
def detect_line_segments(cropped_edges):
# tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand
rho = 1 # precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # degree in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv2.HoughLinesP(cropped_edges, rho, angle, min_threshold, np.array([]), minLineLength=20,
maxLineGap=4)
if line_segments is not None:
for line_segment in line_segments:
logging.debug('detected line_segment:')
logging.debug("%s of length %s" % (
line_segment, length_of_line_segment(line_segment[0])))
return line_segments
def section_average_slope_intercept(line_segments, goal_height):
"""
Same logic as average_slope_intercept but without differentiating between left and right lines
(as we can split left and right lines by colour)
Rather than averaging gradients and intercepts, to solve issue of vertical lines average the
angle and intercept with x-axis (for horizontal lines, include angle but forget intercept because they're not common
and idk what to do.)
Potential improvement if lines aren't averaging well - average angle and distance from origin to closest point on the line.
"""
angles = []
distances = []
num_lines = len(line_segments)
if num_lines == 0:
return None
for line in line_segments:
x1, y1, x2, y2 = line[0]
if (x2 == x1):
angles.append(math.pi/2)
distances.append(x1)
elif (y2 == y1):
angles.append(0)
distances.append(y1)
else:
angles.append(math.atan((y2-y1)/(x2-x1)))
# Distance calculation - find general form values (set a to 1).
b = -(x2-x1)/(y2-y1)
c = -y1*b - x1
distances.append(((b*goal_height + c))/math.sqrt(1+b**2))
# Average
# Find number of data points to include in average:
data_point_num = num_lines - round(num_lines*0.5)
if (num_lines % 2 == 0 and data_point_num % 2 == 1) or (num_lines % 2 == 1 and data_point_num % 2 == 0):
data_point_num += 1
start = int((num_lines - data_point_num)/2)
end = num_lines - start
angles.sort()
average_angle = sum(angles[start:end])/data_point_num
distances.sort()
average_distance = sum(distances[start:end])/data_point_num
# Check 2/3rds of angles are within pi/8 radians
fail_num = 0
for angle in angles:
if angle - average_angle > math.pi/8 or angle - average_angle < -math.pi/8:
fail_num += 1
if fail_num > num_lines*2/3:
return None
fail_num = 0
for distance in distances:
if abs(distance - average_distance) > 25:
fail_num += 1
if fail_num > num_lines*2/3:
return None
#average_distance = distances[int(num_lines/2)]
if average_angle == math.pi/2: # Average still vertical:
average_gradient = 1024 # idk seems like a large enough number?
average_intercept = -average_distance * \
math.sqrt(average_gradient**2 + 1) + goal_height
elif average_angle == 0 or abs(average_angle) == math.pi:
return None # Probably should do better something for horizontal lines than just giving up
else:
average_gradient = math.tan(average_angle)
average_intercept = average_distance * \
math.sqrt(average_gradient**2 + 1) * \
(-1 if average_gradient < 0 else 1) + goal_height
return average_gradient, average_intercept
def average_slope_intercept(frame, line_segments):
"""
This function combines line segments into one or two lane lines
If all line slopes are < 0: then we only have detected left lane
If all line slopes are > 0: then we only have detected right lane
"""
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
fit = []
#right_fit = []
boundary = 1 / 3
# left lane line segment should be on left 2/3 of the screen
left_region_boundary = width * (1 - boundary)
# right lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info(
'skipping vertical line segment (slope=inf): %s' % line_segment)
continue
poly = np.polyfit((x1, x2), (y1, y2), 1)
slope = poly[0]
intercept = poly[1]
fit.append((slope, intercept))
fit_average = np.average(fit, axis=0)
if len(fit) > 0:
return fit_average
else:
return None
def detect_obstacle(frame):
threshold = detect_edges(frame, thresh_purple_low, thresh_purple_high)
edges = cv2.Canny(threshold, 200, 400)
(_, contours, _) = cv2.findContours(
edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contour, _, rect = filter_contours(contours)
box = cv2.boxPoints(rect)
return box
def filter_contours(contours):
final_contours = []
for contour in contours:
area = cv2.contourArea(contour)
if obstacle_area_min < area < obstacle_area_max:
perimeter = cv2.arcLength(contour, True)
if obstacle_perimeter_min < perimeter < obstacle_perimeter_max:
rect = cv2.minAreaRect(contour)
width = min(rect[1])
height = max(rect[1])
ratio = width/height
if obstacle_ratio < ratio and area/(width*height) > obstacle_rect_fill:
final_contours.append((contour, area, rect))
if len(final_contours) == 0:
return None
else:
return sorted(final_contours, key=lambda x: x[1], reverse=True)[0]
def compute_steering_angle(frame, lane_lines):
""" Find the steering angle based on lane line coordinate
We assume that camera is calibrated to point to dead center
"""
if len(lane_lines) == 0:
logging.info('No lane lines detected, do nothing')
return 90
height, width, _ = frame.shape
if len(lane_lines) == 1:
logging.debug(
'Only detected one lane line, just follow it. %s' % lane_lines[0])
x1, _, x2, _ = lane_lines[0][0]
x_offset = x2 - x1
else:
_, _, left_x2, _ = lane_lines[0][0]
_, _, right_x2, _ = lane_lines[1][0]
# 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
camera_mid_offset_percent = 0.02
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
# angle (in radian) to center vertical line
angle_to_mid_radian = math.atan(x_offset / y_offset)
# angle (in degrees) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi)
# this is the steering angle needed by picar front wheel
steering_angle = 90 + angle_to_mid_deg
logging.debug('new steering angle: %s' % steering_angle)
return steering_angle
def stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines,
max_angle_deviation_two_lines=20, max_angle_deviation_one_lane=20):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 2:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
else:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
logging.info('Proposed angle: %s, stabilized angle: %s' %
(new_steering_angle, stabilized_steering_angle))
return stabilized_steering_angle
############################
# Utility Functions
############################
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=10):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
try:
x1 = int(line[0][0])
y1 = int(line[0][1])
x2 = int(line[0][2])
y2 = int(line[0][3])
cv2.line(line_image, (x1, y1), (x2, y2),
line_color, line_width)
except (OverflowError, ValueError) as e:
print(e)
line_image = cv2.addWeighted(frame, 1, line_image, 0.8, 1)
return line_image
def display_points(frame, points, point_color=(138, 43, 226), point_radius=6):
point_image = np.zeros_like(frame)
if points is not None:
for point in points:
try:
cv2.circle(point_image, (int(point[0]), int(
point[1])), point_radius, point_color, thickness=point_radius)
except (ValueError, OverflowError) as e:
print(e)
point_image = cv2.addWeighted(frame, 1, point_image, 0.8, 1)
return point_image
def display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5):
heading_image = np.zeros_like(frame)
height, width, _ = frame.shape
# figure out the heading line from steering angle
# heading line (x1,y1) is always center bottom of the screen
# (x2, y2) requires a bit of trigonometry
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 3)
cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width)
heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)
return heading_image
def length_of_line_segment(line):
x1, y1, x2, y2 = line
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def show_image(title, frame, show=_SHOW_IMAGE):
if show:
cv2.imshow(title, frame)
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
try:
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
except OverflowError:
return [[-width, y1, -width, y1]]
class File_Inputter:
def __init__(self):
self.frame_counter = 0
self.frame_goal = 1
self.prev_increment = 50
def next_frame_counter(self):
while True:
leInput = input("At frame {}: ".format(self.frame_goal))
if leInput == "":
self.frame_goal += self.prev_increment
else:
try:
self.frame_goal += int(leInput)
self.prev_increment = int(leInput)
except ValueError:
pass
#cap = cv2.VideoCapture("test5.mp4")
#video_file = 'test5'
#frame_input = File_Inputter()
#_thread.start_new_thread(frame_input.next_frame_counter, tuple())
lane_follower = HandCodedLaneFollower()
print("Running...")
def main():
time.sleep(3)
LIVE = True
file = "xyz.bag"
pipe, config, profile = setupstream(LIVE, file)
#while cap.isOpened():
while (True):
"""if frame_input.frame_counter < frame_input.frame_goal:
_, frame = cap.read()
frame_input.frame_counter += 1"""
frame, depth_frame, frameset = getframes(pipe)
#combo_image = lane_follower.follow_lane(frame)
#time.sleep(0.04)
if cv2.waitKey(25) & 0xff == ord('q'):
cv2.destroyAllWindows()
break
return
if __name__ == '__main__':
# do main stuff
main()
| null |
Stolen_vision_2.py
|
Stolen_vision_2.py
|
py
| 38,501 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.sleep",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.pipeline",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.config",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.stream",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "pyrealsense2.format",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "pyrealsense2.stream",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "pyrealsense2.format",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_NEAREST",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "cv2.fillPoly",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 681,
"usage_type": "attribute"
},
{
"api_name": "cv2.HoughLinesP",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 713,
"usage_type": "attribute"
},
{
"api_name": "math.atan",
"line_number": 719,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 741,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 753,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 757,
"usage_type": "attribute"
},
{
"api_name": "math.tan",
"line_number": 760,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 762,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 791,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 808,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 809,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_LIST",
"line_number": 810,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 810,
"usage_type": "attribute"
},
{
"api_name": "cv2.boxPoints",
"line_number": 813,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 820,
"usage_type": "call"
},
{
"api_name": "cv2.arcLength",
"line_number": 822,
"usage_type": "call"
},
{
"api_name": "cv2.minAreaRect",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 841,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 846,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 862,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 864,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 893,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 902,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 910,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 914,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 919,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 923,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 927,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 932,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 943,
"usage_type": "attribute"
},
{
"api_name": "math.tan",
"line_number": 946,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 949,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 950,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 957,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 962,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 1008,
"usage_type": "call"
},
{
"api_name": "cam.setupstream",
"line_number": 1012,
"usage_type": "call"
},
{
"api_name": "cam.getframes",
"line_number": 1021,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 1025,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 1026,
"usage_type": "call"
}
] |
216779028
|
#!/usr/bin/env python3
import math
import sys
import argparse
import numpy as np
import tensorflow as tf
import multiprocessing as mp
from functools import partial
from Bio.PDB import *
weights = {'C':12.011, 'N':14.007, 'O':15.999, 'S':32.06}
allowed_atoms = ['N','CA','C','O','OXT','CB',
'CZ','CZ1','CZ2','CE','CE1','CE2',
'CD','CD1','CD2','CG','CG1','CG2','CG3',
'CH2','OE','OE1','OE2','OD','OD1','OD2',
'OH','OG','OG1','OG2','NZ','NE','NE1','NE2',
'NH','NH1','NH2','ND','ND1','ND2',
'SG','SD','SE']
def get_sep(struc):
three2one = {'ALA':'A','ARG':'R','ASN':'N','ASP':'D',
'CYS':'C','GLN':'Q','GLU':'E','GLY':'G',
'HIS':'H','ILE':'I','LEU':'L','LYS':'K',
'MET':'M','PHE':'F','PRO':'P','SER':'S',
'THR':'T','TRP':'W','TYR':'Y','VAL':'V',
'MSE':'M'}
seq = ''
prv = ''
chain = ''
for line in open(struc):
if line.startswith('ATOM'):
if chain == '': chain = line[21]
elif chain != line[21]: break
if line[22:27].strip() != prv:
seq += three2one[line[17:20]]
prv = line[22:27].strip()
if line.startswith('TER'): break
return seq
def ligand_center(atom_list, geometric=False):
masses = []
positions = [ [], [], [] ]
for atom in atom_list:
if atom.get_id() in allowed_atoms:
masses.append(weights[atom.get_id()[0]])
for i, coord in enumerate(atom.get_coord()):
positions[i].append(coord)
if geometric:
return [sum(coord_list)/len(masses) for coord_list in positions]
else:
w_pos = [ [], [], [] ]
for atom_index, atom_mass in enumerate(masses):
w_pos[0].append(positions[0][atom_index]*atom_mass)
w_pos[1].append(positions[1][atom_index]*atom_mass)
w_pos[2].append(positions[2][atom_index]*atom_mass)
return [sum(coord_list)/sum(masses) for coord_list in w_pos]
def get_rototranslation(data):
count = 0
result_batch = []
with tf.Session(graph=get_rt, config=config) as sess:
for pose, m1, m2 in data:
r_mat, t_vec = sess.run([r, t], feed_dict = {gr:m1, gt:m2, lcm:cm})
result_batch.append([pose, r_mat, t_vec])
count += 1
if count%1000 == 0: print ('Processed {} rt matrixes!'.format(count))
return result_batch
def rototranslate_coord(data, c_only=True):
count = 0
result_batch = []
with tf.Session(graph=rt_comp, config=config) as sess:
for rt in data:
rt_coord, p_score = sess.run([rtcoord, score], feed_dict = {r_mat:rt[1], t_vec:rt[2]})
result_batch.append([rt[0], rt_coord, p_score])
count += 1
if count%1000 == 0: print ('Roto-translated {} structures!'.format(count))
return result_batch
def get_main_coord(res):
atoms = [atom.get_id() for atom in res]
if 'CB' in atoms: return 'CB'
elif 'CA' in atoms: return 'CA'
elif len(atoms) != 0: return atoms[0]
else: return None
def split_jobs(job_list, cores):
batch_list = []
bs = math.floor(len(job_list)/cores)
for n in range(0,cores-1): batch_list.append(job_list[n*bs:(n+1)*bs])
batch_list.append(job_list[bs*(cores-1):])
return batch_list
def mergesort_pred(linear):
elem = 1
while len(linear) > elem:
joblist = []
for idx in range(0, len(linear)+elem*2, elem*2):
ida = idx+elem
idb = idx+elem*2
if len(linear) >= idb:
a = linear[idx:ida]
b = linear[ida:idb]
elif len(linear) >= ida:
a = linear[idx:ida]
b = linear[ida:]
elif len(linear) >= idx:
a = linear[idx:]
b = []
else: continue
joblist.append([a, b])
pool = mp.Pool(processes=cores)
results = pool.map(merge, joblist)
pool.close()
pool.join()
linear = [ el for res in results for el in res ]
elem *= 2
return linear
def merge(job):
l = []
l1 = job[0]
l2 = job[1]
p1 = p2 = 0
while len(l) < len(l1)+len(l2):
if p1 == len(l1): l.extend(l2[p2:])
elif p2 == len(l2): l.extend(l1[p1:])
elif l1[p1][2] >= l2[p2][2]:
l.append(l1[p1])
p1 += 1
else:
l.append(l2[p2])
p2 += 1
return l
if __name__ == "__main__":
p = argparse.ArgumentParser(description = '- Plot PPV stats for a Cmap or a list of Cmaps')
p.add_argument('-g', required= True, help='gramm output')
p.add_argument('-i1', required= True, help='interface predictions 1')
p.add_argument('-i2', required= True, help='interface predictions 2')
p.add_argument('-s1', required= True, help='structure file 1')
p.add_argument('-s2', required= True, help='structure file 2')
p.add_argument('-c', required= True, type=int, help='label file column')
p.add_argument('-n', required= False, default=0, type=int, help='number of cores to use')
p.add_argument('-o', required= False, default=None, help='output file path and prefix to write models (no format)')
ns = p.parse_args()
##### parse contacts #####
contactidxs = []
scores1 = [float(line.rstrip().split('\t')[ns.c]) for line in open(ns.i1)]
scores2 = [float(line.rstrip().split('\t')[ns.c]) for line in open(ns.i2)]
scores1 = np.array(scores1, dtype=np.float32)
scores2 = np.array(scores2, dtype=np.float32)
for p in range(scores1.shape[0]):
#if scores1[p] == 0.0: scores1[p] = 0.01
if scores1[p] == 1.0: scores1[p] = 0.99
for p in range(scores2.shape[0]):
#if scores2[p] == 0.0: scores2[p] = 0.01
if scores2[p] == 1.0: scores2[p] = 0.99
scores1 = np.expand_dims(scores1, axis=1)
scores2 = np.expand_dims(scores2, axis=0)
cmap = scores1*scores2
sep = len(get_sep(ns.s1))
contactids = [y+1 for y in range(0, cmap.shape[1])]
##### parse structures #####
p = PDBParser(QUIET=True)
str1 = p.get_structure('', ns.s1)
str2 = p.get_structure('', ns.s2)
str3 = p.get_structure('', ns.s2)
for c in str2[0]: lchainid = c.get_id()
rec_res = Selection.unfold_entities(str1, 'R')
lig_res = Selection.unfold_entities(str2, 'R')
##### ligand real interface CB/CA coordinates #####
lcoordinates = []
if (len(contactids)==0):
print("No contacts found ")
sys.exit(0)
for idx in contactids:
atom = get_main_coord(str2[0][lchainid][idx])
if atom is None: continue
lcoordinates.append([c for c in str2[0][lchainid][idx][atom].get_coord()])
lcoordinates = np.array(lcoordinates, dtype=np.float32)
if (len(lcoordinates)==0):
print("No ligand coordinates found ")
sys.exit(0)
##### ligand CB/CA coordinates #####
full_lig = []
full_lig_id = []
for res in lig_res:
resid = res.get_id()[1]
for atom in res:
atomid = atom.get_id()
full_lig.append([c for c in atom.get_coord()])
full_lig_id.append([resid, atomid])
full_lig = np.array(full_lig, dtype=np.float32)
##### receptor CB/CA coordinates #####
rcoordinates = []
for res in rec_res:
atom = get_main_coord(res)
if atom is None: continue
rcoordinates.append([c for c in res[atom].get_coord()])
rcoordinates = np.array(rcoordinates, dtype=np.float32)
if (len(rcoordinates)==0):
print("No receptor coordinates found")
sys.exit(0)
##### get contact probabilities #####
contactids = np.array(contactids, dtype=np.int)
lrprobs = cmap[:,contactids-1]
##### calculate lcm #####
lig_atoms = Selection.unfold_entities(str2, 'A')
cm = np.array(ligand_center(lig_atoms), dtype=np.float32)
##### graph to elaborate GRAMM output #######################################
with tf.Graph().as_default() as get_rt: #
with tf.name_scope('input0'): #
pi = tf.constant(math.pi/180.0) #
gr = tf.placeholder(dtype=tf.float32, shape=(3)) #
gt = tf.placeholder(dtype=tf.float32, shape=(3)) #
lcm = tf.placeholder(dtype=tf.float32, shape=(3)) #
#
gr1 = pi*gr #
sina = tf.math.sin(gr1[0]) #
sinb = tf.math.sin(gr1[1]) #
sing = tf.math.sin(gr1[2]) #
cosa = tf.math.cos(gr1[0]) #
cosb = tf.math.cos(gr1[1]) #
cosg = tf.math.cos(gr1[2]) #
#
r = tf.stack([tf.stack([cosg*cosa, #
cosg*sina, #
-sing]), #
tf.stack([sinb*sing*cosa-cosb*sina, #
sinb*sing*sina+cosb*cosa, #
cosg*sinb]), #
tf.stack([cosb*sing*cosa+sinb*sina, #
cosb*sing*sina-sinb*cosa, #
cosg*cosb])]) #
#
r_lcm = tf.linalg.matvec(r, lcm) #
t = gt+(lcm-r_lcm) #
t = tf.expand_dims(t, axis=-1) #
#############################################################################
##### graph to roto-translate and score atom coordinates ####################
with tf.Graph().as_default() as rt_comp: #
with tf.name_scope('input1'): #
pr = tf.constant(lrprobs) #
xyz = tf.constant(lcoordinates) #
rec = tf.constant(rcoordinates) #
t_vec = tf.placeholder(dtype=tf.float32, shape=(3, 1)) #
r_mat = tf.placeholder(dtype=tf.float32, shape=(3, 3)) #
#
xyz = tf.transpose(xyz, perm=[1,0]) #
rtcoord = tf.math.add(t_vec, tf.linalg.matmul(r_mat, xyz)) #
rtcoord = tf.transpose(rtcoord, perm=[1,0]) #
#
##### scoring ##### #
pr = -tf.math.log(1-pr) #
A = tf.expand_dims(rec, axis=1) #
B = tf.expand_dims(rtcoord, axis=0) #
distances = tf.math.sqrt(tf.reduce_sum((A-B)**2, axis=-1)) #
# pr = tf.math.log(pr+0.75)/distances #
zeros = tf.zeros(distances.shape, dtype=tf.float32) #
scores = tf.where(tf.math.less(distances, 12), pr, zeros) #
score = tf.math.reduce_sum(scores) #
#############################################################################
##### graph for full roto-translation #######################################
with tf.Graph().as_default() as full_rt: #
with tf.name_scope('input2'): #
full_xyz = tf.constant(full_lig) #
full_t = tf.placeholder(dtype=tf.float32, shape=(3, 1)) #
full_r = tf.placeholder(dtype=tf.float32, shape=(3, 3)) #
#
full_xyz = tf.transpose(full_xyz, perm=[1,0]) #
full_rtcoord = tf.math.add(full_t, tf.linalg.matmul(full_r, full_xyz)) #
full_rtcoord = tf.transpose(full_rtcoord, perm=[1,0]) #
#############################################################################
if ns.n != 0: cores = ns.n
else: cores = int(mp.cpu_count()-1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
##### compute rototranslations #####
mat_jobs = []
poses = False
count = 0
for line in open(ns.g):
if '[match]' in line:
poses = True
continue
if not poses: continue
line = [float(el.strip()) for el in line.split() if el.strip() != '']
grammr = np.array([line[2], line[3], line[4]], dtype=np.float32)
grammt = np.array([line[5], line[6], line[7]], dtype=np.float32)
mat_jobs.append([int(line[0]), grammr, grammt])
count += 1
if count == 11: break
pool = mp.Pool(processes = cores)
job_list = split_jobs(mat_jobs, cores)
results = pool.map(get_rototranslation, job_list)
pool.close()
pool.join()
##### rototranslate and score poses #####
rtlist = []
rtdict = {}
for batch in results:
rtlist.extend(batch)
for result in batch: rtdict[result[0]] = result[1:]
pool = mp.Pool(processes = cores)
job_list = split_jobs(rtlist, cores)
results = pool.map(rototranslate_coord, job_list)
pool.close()
pool.join()
##### sort out results #####
scorelist = []
for batch in results: scorelist.extend(batch)
sortedlist = mergesort_pred(scorelist)
##### print out #####
io = PDBIO()
with tf.Session(graph=full_rt, config=config) as sess:
for n in range(10):
pose = sortedlist[n][0]
score = sortedlist[n][2]
print ('# {} - Pose {} - Score {}'.format(n+1, pose, score))
if not ns.o is None:
rt_coord = sess.run(full_rtcoord, feed_dict = {full_r:rtdict[pose][0], full_t:rtdict[pose][1]})
for coord, ids in zip(rt_coord, full_lig_id):
str3[0][lchainid][ids[0]][ids[1]].set_coord(coord)
str3[0][lchainid][ids[0]][ids[1]].set_bfactor(scores2[0,ids[0]-1])
io.set_structure(str3)
io.save('{}_{}.pdb'.format(ns.o, n+1))
for res in str1[0]['A']:
resnum = res.get_id()[1]
for atom in res:
atomnum = atom.get_id()
str1[0]['A'][resnum][atomnum].set_bfactor(scores1[resnum-1, 0])
io.set_structure(str1)
io.save('{}_r.pdb'.format(ns.o))
| null |
src/protocol_gramm.py
|
protocol_gramm.py
|
py
| 15,686 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.Session",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "numpy.expand_dims",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Graph",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.sin",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.sin",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.sin",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.cos",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.cos",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.cos",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.stack",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tensorflow.stack",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "tensorflow.stack",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "tensorflow.linalg.matvec",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "tensorflow.linalg",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "tensorflow.Graph",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.transpose",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.add",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.linalg.matmul",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorflow.linalg",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.transpose",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.log",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.sqrt",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.where",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.less",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.reduce_sum",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Graph",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.transpose",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.add",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.linalg.matmul",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "tensorflow.linalg",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.transpose",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 346,
"usage_type": "call"
}
] |
349289287
|
import torch
from H36M import Task
class Config(object):
def __init__(self):
self.annotation_path = "./Human3.6M/annot"
self.image_path = "./Human3.6M/images"
self.pretrained_path = "./pretrained/"
self.subjects = [1, 5, 6, 7, 8, 9, 11]
self.task = Task.Train
self.num_parts = 17
self.heatmap_xy_coefficient = 2
self.voxel_xy_res = 64
self.voxel_z_res = [1, 2, 4, 64]
self.batch = 4
self.workers = 8
self.epoch = 10
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
| null |
config.py
|
config.py
|
py
| 604 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "H36M.Task.Train",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "H36M.Task",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 20,
"usage_type": "attribute"
}
] |
602785111
|
from __future__ import print_function
import sys, os.path, glob, json
from time import sleep
import requests, urllib2, httplib
from io import BytesIO
from PIL import Image, ImageFile
import numpy as np
AGENT = {'Api-User-Agent': 'WikiFaces/1.0 (https://github.com/apoorvkh/WikiFaces; [email protected])'}
info_files = glob.glob("dataset/*/*/*/data.json")
images_scraped = []
for info_file in info_files:
with open(info_file, 'r') as fp:
info_json = json.load(fp)
if 'primary_images' not in info_json or info_json['primary_images'] == []:
continue
primary_images = map(lambda img : img.replace('_', ' ').replace("+", "%2B"), info_json['primary_images'])
folder = info_file[:len(info_file) - 9]
with open(folder + 'image_data.json', 'r') as fp:
image_data = json.load(fp)
print(info_file)
if 'images' not in image_data:
image_data['images'] = {}
for primary_image in primary_images:
if primary_image in image_data['images'] or primary_image == '':
continue
while True:
try:
image_query = requests.get("https://commons.wikimedia.org/w/api.php?action=query&prop=imageinfo&iilimit=1&iiprop=url|dimensions|mime|mediatype|commonmetadata&titles=File:" + primary_image + "&format=json", headers=AGENT).json()
break
except requests.exceptions.ConnectionError:
sleep(60)
image = list(image_query['query']['pages'].values())[0]
data = {}
try:
if image['imagerepository'] == 'shared':
data['pageid'] = int(image['imageinfo'][0]['descriptionshorturl'][48:])
else:
data['pageid'] = image['pageid']
data['page_src'] = info_json['wiki_site'] + '.wikipedia.org'
except KeyError as e:
print(e)
continue
data['title'] = image['title'][5:]
data['url'] = image['imageinfo'][0]['url']
data['size'] = image['imageinfo'][0]['size']
data['width'] = image['imageinfo'][0]['width']
data['height'] = image['imageinfo'][0]['height']
meta_obj = {}
for elm in image['imageinfo'][0]['commonmetadata']:
meta_obj[elm['name']] = elm['value']
if image['imageinfo'][0]['mime'] not in ['image/png', 'image/jpg', 'image/jpeg']:
continue
if image['imageinfo'][0]['mime'] == 'image/png':
primary_image = primary_image[:len(primary_image) - 3] + 'jpg'
info_json['primary_image'] = primary_image.replace(' ', '_')
with open(folder + 'info.json', 'w') as write:
json.dump(info_json, write)
image_data['images'][primary_image] = data
enc_path = folder + primary_image.encode('utf-8').decode('utf-8').encode('utf-8')
try:
im = Image.open(BytesIO(urllib2.urlopen(data['url']).read()))
if image['imageinfo'][0]['mime'] == 'image/png':
im = im.convert('RGB')
if data['width'] > 4800 or data['height'] > 3200:
# Resize image and update metadata accordingly
if 1.0 * data['width'] / 4800 > 1.0 * data['height'] / 3200:
image_data['images'][primary_image]['width'] = 4800
image_data['images'][primary_image]['height'] = int(data['height'] * 4800 / data['width'])
else:
image_data['images'][primary_image]['width'] = int(data['width'] * 3200 / data['height'])
image_data['images'][primary_image]['height'] = 3200
im = im.resize((image_data['images'][primary_image]['width'], image_data['images'][primary_image]['height']), Image.NEAREST)
print("Saving " + enc_path)
im.save(enc_path)
images_scraped.append(enc_path)
except (urllib2.URLError, httplib.IncompleteRead):
# URL probably changed, don't try downloading this file again
del image_data['images'][primary_image]
print("HTTP Error on " + enc_path)
sleep(10)
except IOError:
del image_data['images'][primary_image]
print("Image truncation: " + enc_path)
with open(folder + 'image_data.json', "w") as write:
json.dump(image_data, write)
with open("primary_images_scraped.txt", 'w') as w:
print(images_scraped, file=w)
| null |
scripts/wiki_scrape/primary_image_scrape.py
|
primary_image_scrape.py
|
py
| 4,444 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "glob.glob",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "PIL.Image.NEAREST",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "urllib2.URLError",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "httplib.IncompleteRead",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 105,
"usage_type": "call"
}
] |
299041657
|
"""The Flask App
"""
import os
from uuid import uuid4
from datetime import datetime
from flask import Flask, request, session, abort, redirect, g, url_for, flash
from flaskext.genshi import Genshi, render_response
from flask import json
from werkzeug.contrib.fixers import ProxyFix
from openhdi.mongo import get_db
import openhdi.model as model
import openhdi.aggregates as aggregates
from openhdi.api import api
app = Flask(__name__)
# fix for REMOTE_ADDR and HTTP_HOST on reverse proxies
# However causes problems with testing when running on localhost!
# app.wsgi_app = ProxyFix(app.wsgi_app)
def configure_app():
app.config.from_object('openhdi.settings_default')
here = os.path.dirname(os.path.abspath( __file__ ))
# parent directory
config_path = os.path.join(os.path.dirname(here), 'openhdi.cfg')
if 'OPENHDI_CONFIG' in os.environ:
app.config.from_envvar('OPENHDI_CONFIG')
elif os.path.exists(config_path):
app.config.from_pyfile(config_path)
ADMINS = ['[email protected]']
if not app.debug:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, 'yourtopia.net error')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
configure_app()
genshi = Genshi(app)
QUIZ = app.config['QUIZ']
app.register_module(api, url_prefix='/api')
@app.before_request
def make_session():
g.db = get_db()
if not 'id' in session:
session['id'] = str(uuid4())
g.user_id = session.get('id')
# generic stuff
@app.errorhandler(404)
def page_not_found(e):
values = dict(error='404 - Not Found',
message='Sorry, what you are looking for is not here'
)
return render_response('error.html', values)
@app.errorhandler(500)
def apperror(e):
values = dict(error='500 - Error',
message='Sorry, there was an error. We will be looking into it!'
)
return render_response('error.html', values)
## ======================
## Routes and Controllers
@app.route('/')
def home():
total_so_far = g.db.weighting.count()
return render_response('index.html', dict(
total_so_far=total_so_far
))
@app.route('/quiz')
def quiz():
# step = int(request.args.get('stage', '1'))
quiz = model.Quiz(QUIZ)
w = model.Weighting.load(QUIZ, g.user_id, create=True)
step = len(w['sets_done']) + 1
if step <= 4:
return redirect(url_for('quiz_question', step=step))
if request.args.get('compute', False):
agg = aggregates.Aggregator()
agg.compute(g.user_id)
complete = 1
return redirect(url_for('result_user', user_id=g.user_id))
return render_response('quiz.html', dict(
num_steps=4
))
@app.route('/quiz/<int:step>')
def quiz_question(step):
db = get_db()
quiz = model.Quiz(QUIZ)
query = {'id': g.user_id}
user = db.user.find_one(query)
if user is None:
_user = {
'id': g.user_id,
'ipaddr': request.remote_addr,
'created': datetime.now().isoformat()
}
db.user.insert(_user)
w = model.Weighting.load(QUIZ, g.user_id, create=True)
if step == 1:
dimension = '__dimension__'
questions = quiz['structure']
elif step > 4: # should not be here ..
return redirect(url_for('quiz'))
else:
# use order of dimensions in quiz
dimension = quiz['structure'][step-2]['id']
questions = quiz['structure'][step-2]['structure']
total = 0
for idx,qu in enumerate(questions):
_weight = w['question_sets'][dimension][idx][1]
# percentages
_weight = int(100*_weight)
total += _weight
qu['weight'] = _weight
# make sure we sum to 100
# add to random question?
if total < 100:
questions[0]['weight'] = questions[0]['weight'] + (100-total)
return render_response('quiz_question.html', dict(
questions=questions,
step=step,
dimension=dimension,
))
@app.route('/quiz', methods=['POST'])
def quiz_submit():
db = get_db()
def indicator(field_name):
return field_name.split('-')[1]
weightings = [
[indicator(x[0]), int(x[1])/float(100)]
for x in request.form.items()
if x[0].startswith('weighting-')
]
dimension = request.form['dimension']
# TODO: should be stricter about it existing already
w = model.Weighting.load(QUIZ, g.user_id, create=True)
w['question_sets'][dimension] = weightings
if dimension not in w['sets_done']:
w['sets_done'].append(dimension)
w.compute_weights()
w.save()
# flash('Saved your weightings')
# return redirect('quiz')
return quiz()
@app.route('/about')
def about():
return render_response('about.html')
@app.route('/how')
def how():
return render_response('how.html')
@app.route('/result')
def result(user_id=None):
import iso3166
from openhdi.hdi_gni import data as hdi_gni_data
def get_sorted(score_set):
if not score_set:
return []
s = score_set
s = sorted(s, cmp=lambda x,y: -cmp(x[1], y[1]))
# normalize too (avoid divide by 0)
ourmax = max(0.00000000001, s[0][1])
result = []
for x in s:
iso_country = iso3166.countries.get(x[0])
hdi_gni = hdi_gni_data.get(iso_country.alpha3, {"HDI": None, "GNI": None})
result.append((x[0], round(x[1]/ourmax, 3), iso_country.name, hdi_gni["HDI"], hdi_gni["GNI"]))
return result
agg = aggregates.Aggregator()
global_scores = agg.scores()
global_scores = get_sorted(global_scores)
if user_id:
user_scores = agg.scores(g.user_id)
user_scores = get_sorted(user_scores)
weights = agg.weights(g.user_id)
else:
weights = agg.weights()
user_scores = []
quiz = model.Quiz('yourtopia')
treeweights = {}
for dim in quiz['structure']:
subtree = {}
for ind in dim['structure']:
subtree[ind['label']] = weights[ind['id']]
treeweights[dim['id']] = subtree
# last_year='2007'
return render_response('result.html', dict(
user_scores=user_scores,
global_scores=global_scores,
user_scores_json=json.dumps(user_scores),
global_scores_json=json.dumps(global_scores),
weights=json.dumps(treeweights)
))
# DEPRECATED. Here for backwards compatibility
@app.route('/result/me')
def result_me():
return result(g.user_id)
@app.route('/result/<user_id>')
def result_user(user_id):
db = get_db()
if not db.weighting.find_one({'user_id': user_id}):
abort(404)
return result(user_id)
if __name__ == '__main__':
app.run()
| null |
openhdi/app.py
|
app.py
|
py
| 6,938 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.SMTPHandler",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flaskext.genshi.Genshi",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "openhdi.api.api",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "flask.g.db",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "openhdi.mongo.get_db",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.g.user_id",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "flask.g.db.weighting.count",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "openhdi.model.Quiz",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "openhdi.model",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "openhdi.model.Weighting.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "openhdi.model.Weighting",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "openhdi.model",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "openhdi.aggregates.Aggregator",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "openhdi.aggregates",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.g.user_id",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "openhdi.mongo.get_db",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "openhdi.model.Quiz",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "openhdi.model",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "flask.request.remote_addr",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "openhdi.model.Weighting.load",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "openhdi.model.Weighting",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "openhdi.model",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "openhdi.mongo.get_db",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "flask.request.form.items",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "openhdi.model.Weighting.load",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "openhdi.model.Weighting",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "openhdi.model",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "iso3166.countries.get",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "iso3166.countries",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "openhdi.hdi_gni.data.get",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "openhdi.hdi_gni.data",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "openhdi.aggregates.Aggregator",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "openhdi.aggregates",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "openhdi.model.Quiz",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "openhdi.model",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "flaskext.genshi.render_response",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "flask.json.dumps",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "flask.json.dumps",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "flask.json.dumps",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "flask.g.user_id",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "openhdi.mongo.get_db",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 226,
"usage_type": "call"
}
] |
609379353
|
#!/usr/bin/python2.7
"""This is script to check a lofasm files and output a file information file.
"""
if __name__=='__main__':
import numpy as np
import argparse
import os
from lofasm.file_info import LofasmFileInfo
parser = argparse.ArgumentParser(description="Lofasm check file tool")
parser.add_argument("-lf",help="Lofasm data files. ", default="", \
type=str, nargs='+')
parser.add_argument("-info",help="File information file ", default="")
parser.add_argument("-o",help="Output file name", default="lofasm_files_info.dat")
parser.add_argument("-d",help="A directory to check", default="", \
type=str, nargs='+')
args = parser.parse_args()
if args.lf == "" and args.info != "":
lif = LofasmFileInfo(info_file=args.info)
elif args.lf != "" and args.info == "":
lif = LofasmFileInfo(files=args.lf)
elif args.lf != "" and args.info != "":
lif = LofasmFileInfo(files=args.lf, info_file=args.info)
else:
if args.d == "":
wd = [os.getcwd(),]
else:
wd = args.d
for dd in wd:
lfs = [f for f in os.listdir(dd) if os.path.isfile(os.path.join(dd, f))]
os.chdir(dd)
# Try to file lofasm default file info file
if 'lofasm_files_info.dat' in lfs:
lif = LofasmFileInfo(files=lfs, info_file='lofasm_files_info.dat')
else:
lif = LofasmFileInfo(files=lfs)
lif.info_write(args.o)
| null |
bin/lofasm_check_files.py
|
lofasm_check_files.py
|
py
| 1,548 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lofasm.file_info.LofasmFileInfo",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "lofasm.file_info.LofasmFileInfo",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "lofasm.file_info.LofasmFileInfo",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "lofasm.file_info.LofasmFileInfo",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "lofasm.file_info.LofasmFileInfo",
"line_number": 38,
"usage_type": "call"
}
] |
539323002
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
"""A simulation of co-evolution of prey and predator(birth、death、competition
and de novo mutation) using Gillespie Algorithm.
* This version is for drawing a picture of stochastic co-evolutionary dynamics
of the prey and predator species.
* The new g and k value drawn randomly from an Uniform Distribution.
* Note: This computational model is based on Weini's work in 2017.
See at: https://github.com/WeiniHuangBW/DynamicalTradeoffandCoevolution
For more detailed information about this model, please visit:
https://www.nature.com/articles/s41467-017-01957-8#Sec8
"""
class Prey:
"""
A class to represent a type of prey species.
Attributes:
num: Number of this prey species.
g_value: Growth-defense trade off parameter, g ∈ (0,1).
num_list: A list that store the number of this species after a
reaction happen.
time_list: A list that store the time.
"""
def __init__(self, num, g_value):
"""Initialize attributes to describe a kind of prey."""
self.g_value = float('%.3f' % g_value)
self.num = num
self.num_list = []
self.time_list = []
def prey_reproduce(self):
"""Prey reproduces without mutation."""
self.num += 1
def prey_die(self):
"""Prey dies because of competition, predation or dies naturally."""
self.num -= 1
def record_data(self, time):
"""Record the number of prey and time after each reaction happen."""
self.time_list.append(time)
self.num_list.append(self.num)
class Predator:
"""
A class to represent a type of predator species.
Attributes:
num: Number of this predator species.
k_value: k value is the ratio of predator growth to predation
and represents the reproduction efficacy of a predator type.
num_list: A list that store the number of this species after a
reaction happen.
time_list: A list that store the time.
"""
def __init__(self, num, k_value):
"""Initialize attributes to describe a kind of predator."""
self.k_value = float('%.3f' % k_value)
self.num = num
self.num_list = []
self.time_list = []
def predator_reproduce(self):
"""Predator reproduces without mutation."""
self.num += 1
def predator_die(self):
"""Predator dies naturally."""
self.num -= 1
def record_data(self, time):
"""Record the number of prey and time after each reaction happen."""
self.time_list.append(time)
self.num_list.append(self.num)
def prey_mutation():
"""Mutation of parental prey to generate a new prey species.
When a prey reproduces, a mutation may occur with a small probability
μx. The mutant is characterized by a new g value(type specific rate)
drawn randomly from an uniform distribution between 0 and 1.
"""
new_prey = Prey(num=1, g_value=np.random.random_sample())
current_prey.append(new_prey)
def predator_mutation():
"""Mutation of parental predator to generate a new prey species.
With a probability μy, a predator produces a mutant with a new
k value drawn from an uniform distribution between 0 and kmax,
the upper limit of the reproduction efficacy.
"""
new_predator = Predator(num=1, k_value=np.random.random_sample())
current_predator.append(new_predator)
def extinction_check(current_type, extinct_type, s=0):
"""Check extinction phenomenon.
If one kind of prey or predator species go extinct, remove it from
current_type to extinct_type list.
Args:
current_type: A list that stores the all the current type of prey
or predator.
extinct_type: A list that stores the extinct type of prey or predator.
s: s=0. It used for counting the loop.
"""
while s < len(current_type):
if current_type[s].num == 0:
extinct_type.append(current_type[s])
del current_type[s]
else:
s += 1
def reaction_time(reaction_rate_array):
"""Calculate the reaction time with random number.
In Gillespie Algorithm, we need to calculate the (reaction_rate)*ln(1/r).
r is a random number from uniform distribution between 0 and 1.
Args:
reaction_rate_array: An array that store the reaction rate.
Returns:
An array of reaction time that are calculated from reaction rate.
"""
array_shape = reaction_rate_array.shape
if len(array_shape) == 1:
calculate_reaction_time = - (1 / reaction_rate_array) * \
np.log(np.random.rand(array_shape[0]))
else:
calculate_reaction_time = - (1 / reaction_rate_array) * \
np.log(np.random.rand(array_shape[0],
array_shape[1]))
return calculate_reaction_time
# 1. GLOBAL PARAMETERS
bx = 1.0 # baseline growth rate of prey
dx = 0.1 # intrinsic death rate of prey
dy = 0.5 # intrinsic death rate of predator
rc = 0.00005 # resource competition coefficient
ux = 0.0001 # mutation rate of prey per division
uy = 0.001 # mutation rate of predation per division
p = 0.005 # scaling coefficient of the predation rate
m = 10 # determines the initial shape of the growth-defense trade-off
# 2. Set the initial conditions.
t = 0 # Star time
T = 500 # Time period
ancestor_prey = Prey(num=1000, g_value=1.0)
ancestor_predator = Predator(num=100, k_value=0.3)
current_prey = [ancestor_prey] # Store the current prey types.
current_predator = [ancestor_predator] # Store the current predator types.
extinct_prey = [] # Store the extinct prey type.
extinct_predator = [] # Store the extinct predator type.
# 3. MAIN LOOP: The Gillespie Algorithm to simulate the co-evolution system.
while t <= T:
extinction_check(current_prey, extinct_prey)
extinction_check(current_predator, extinct_predator)
if len(current_prey) == 0 or len(current_predator) == 0:
print('Extinction Phenomenon')
break
prey_g_array = np.array([prey.g_value for prey in current_prey])
prey_num_array = np.array([prey.num for prey in current_prey])
total_prey_num = prey_num_array.sum()
prey_birth_nonmutant = bx * (1 - ux) * prey_num_array * prey_g_array
prey_birth_mutant = bx * ux * prey_g_array * prey_num_array
prey_competition_death = rc * total_prey_num * prey_num_array
prey_intrinsic_death = dx * prey_num_array
predator_k_array = np.array(
[predator.k_value for predator in current_predator])
predator_num_array = np.array(
[predator.num for predator in current_predator])
predation_rate = p * prey_g_array[:, np.newaxis] ** \
(m * predator_k_array / 0.3)
predation_no_birth = predator_num_array * (1 - predator_k_array) * \
prey_num_array[:, np.newaxis] * predation_rate
predation_birth_nonmutant = predator_num_array * predator_k_array * \
(1 - uy) * prey_num_array[:, np.newaxis] * predation_rate
predation_birth_mutant = predator_num_array * predator_k_array * \
uy * prey_num_array[:, np.newaxis] * predation_rate
predator_intrinsic_death = dy * predator_num_array
# ❷Calculate the reaction time with a random number
t_prey_birth_nonmutant = reaction_time(prey_birth_nonmutant)
t_prey_birth_mutant = reaction_time(prey_birth_mutant)
t_prey_competition_death = reaction_time(prey_competition_death)
t_prey_intrinsic_death = reaction_time(prey_intrinsic_death)
t_predation_no_birth = reaction_time(predation_no_birth)
t_predation_birth_nonmutant = reaction_time(predation_birth_nonmutant)
t_predation_birth_mutant = reaction_time(predation_birth_mutant)
t_predator_intrinsic_death = reaction_time(predator_intrinsic_death)
# ❸Pick up the shortest time for the reaction.
min_t_prey_birth_nonmutant = t_prey_birth_nonmutant.min()
min_t_prey_birth_mutant = t_prey_birth_mutant.min()
min_t_prey_competition_death = t_prey_competition_death.min()
min_t_prey_intrinsic_death = t_prey_intrinsic_death.min()
min_t_predation_no_birth = t_predation_no_birth.min()
min_t_predation_birth_nonmutant = t_predation_birth_nonmutant.min()
min_t_predation_birth_mutant = t_predation_birth_mutant.min()
min_t_predator_intrinsic_death = t_predator_intrinsic_death.min()
tau = min(
min_t_prey_birth_nonmutant, min_t_prey_birth_mutant,
min_t_prey_competition_death, min_t_prey_intrinsic_death,
min_t_predation_no_birth, min_t_predation_birth_nonmutant,
min_t_predation_birth_mutant, min_t_predator_intrinsic_death)
t = t + tau # Renew the time.
# ❹Select which reaction takes place and update the number.
if tau == min_t_prey_birth_nonmutant:
index_one = t_prey_birth_nonmutant.argmin()
current_prey[index_one].prey_reproduce()
elif tau == min_t_prey_birth_mutant:
index_two = t_prey_birth_mutant.argmin()
prey_mutation()
elif tau == min_t_prey_competition_death:
index_three = t_prey_competition_death.argmin()
current_prey[index_three].prey_die()
elif tau == min_t_prey_intrinsic_death:
index_four = t_prey_intrinsic_death.argmin()
current_prey[index_four].prey_die()
elif tau == min_t_predation_no_birth:
index_five = t_predation_no_birth.min(1).argmin()
current_prey[index_five].prey_die()
elif tau == min_t_predation_birth_nonmutant:
index_six = t_predation_birth_nonmutant.min(1).argmin()
index_seven = t_predation_birth_nonmutant.min(0).argmin()
current_prey[index_six].prey_die()
current_predator[index_seven].predator_reproduce()
elif tau == min_t_predation_birth_mutant:
index_eight = t_predation_birth_mutant.min(1).argmin()
index_nine = t_predation_birth_mutant.min(0).argmin()
current_prey[index_eight].prey_die()
predator_mutation()
elif tau == min_t_predator_intrinsic_death:
index_ten = t_predator_intrinsic_death.argmin()
current_predator[index_ten].predator_die()
# Record the time and number data after each reaction.
for prey in current_prey:
prey.record_data(t)
for predator in current_predator:
predator.record_data(t)
# 4. Visualization of the population dynamics.
if len(current_prey) != 0 and len(current_predator) != 0:
all_prey = current_prey + extinct_prey
all_predator = current_predator + extinct_predator
plt.subplot(211)
for prey in all_prey:
if max(prey.num_list) >= 5:
plt.plot(prey.time_list, prey.num_list,
linewidth=1, label='g='+str(prey.g_value))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.subplot(212)
for predator in all_predator:
if max(predator.num_list) >= 3:
plt.plot(predator.time_list, predator.num_list,
linewidth=1, label='k='+str(predator.k_value))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.show()
| null |
Uniform Distribution Model/ud_pic.py
|
ud_pic.py
|
py
| 11,243 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.random_sample",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random_sample",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 305,
"usage_type": "name"
}
] |
327664636
|
from _ctypes import Structure, POINTER
from contextlib import contextmanager
from ctypes import cdll, c_char_p, c_int32
from pathlib import Path
dylib_dir = Path(__file__).parent / "dylib"
dylib_path = list(dylib_dir.glob("libsnips_nlu*"))[0]
lib = cdll.LoadLibrary(str(dylib_path))
@contextmanager
def string_pointer(ptr):
try:
yield ptr
finally:
lib.ffi_snips_nlu_engine_destroy_string(ptr)
class CStringArray(Structure):
_fields_ = [
("data", POINTER(c_char_p)),
("size", c_int32)
]
| null |
platforms/python/snips_nlu_rust/utils.py
|
utils.py
|
py
| 539 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "ctypes.cdll.LoadLibrary",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ctypes.cdll",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "_ctypes.Structure",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "_ctypes.POINTER",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ctypes.c_char_p",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "ctypes.c_int32",
"line_number": 22,
"usage_type": "name"
}
] |
259110264
|
import numpy as np
import pytest
from simulator.circuit import Circuit
from simulator.backend.stabilizer import run as run_stabilizer
from simulator.backend.statevector import run as run_statevector
from simulator.backend.tableau import run as run_tableau
from simulator.backend.chtableau import run as run_chtableau
@pytest.mark.parametrize(
"run", [run_statevector, run_stabilizer, run_tableau, run_chtableau]
)
def test_ghz_measurement(run):
# test all possible inputs (questions)
for a, b, c in [(0, 0, 0), (1, 1, 0), (1, 0, 1), (0, 1, 1)]:
# create |000>-|011>-|101>-|110>
circ = Circuit(3)
circ.h(0)
circ.h(1)
circ.cx(0, 2)
circ.cx(1, 2)
circ.s(0)
circ.s(1)
circ.s(2)
# perform strategy corresponding to given questions
if a:
circ.h(0)
if b:
circ.h(1)
if c:
circ.h(2)
state = run(circ)
want = 1 if a == b == c == 0 else -1
assert np.isclose(state.pauli_expectation([1, 1, 1, 0, 0, 0]), want)
| null |
test/integration/test_ghz_game.py
|
test_ghz_game.py
|
py
| 1,077 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "simulator.circuit.Circuit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "simulator.backend.statevector.run",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "simulator.backend.stabilizer.run",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "simulator.backend.tableau.run",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "simulator.backend.chtableau.run",
"line_number": 11,
"usage_type": "name"
}
] |
293104004
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import json, re
def json_load_as_str(file_handle):
return byteify(json.load(file_handle, object_hook=byteify), ignore_dicts=True)
def json_loads_as_str(json_text):
return byteify(json.loads(json_text, object_hook=byteify), ignore_dicts=True)
def byteify(data, ignore_dicts=False):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, list):
return [byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return dict([(byteify(key, ignore_dicts=True), byteify(value, ignore_dicts=True)) for key, value in data.iteritems()])
return data
def title_key(title):
try:
if title is None: title = ''
articles_en = ['the', 'a', 'an']
articles_de = ['der', 'die', 'das']
articles = articles_en + articles_de
match = re.match('^((\w+)\s+)', title.lower())
if match and match.group(2) in articles:
offset = len(match.group(1))
else:
offset = 0
return title[offset:]
except:
return title
| null |
script.module.placenta/lib/resources/lib/modules/utils.py
|
utils.py
|
py
| 1,781 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 42,
"usage_type": "call"
}
] |
116427682
|
from kivy.uix.screenmanager import Screen
from kivy.uix.boxlayout import BoxLayout
from kivy. uix.gridlayout import GridLayout
from kivy.uix.spinner import Spinner
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.checkbox import CheckBox
from kivy.uix.togglebutton import ToggleButton
from kivy.graphics import Color, Rectangle
import database as db
radio_bt = None
# Criação dos box
layout_pai = BoxLayout(orientation = 'vertical') # Box que recebe todos os box
box_label = BoxLayout() # Box que recebe a Label de título
box_label.size_hint=1,0.2
box_meio = BoxLayout(orientation = 'vertical') # Box que recebe o boxgrid dos radioButtom e o box do spin
box_resumo = BoxLayout(orientation ='vertical') # Recebe a Label que mostrará o resumo do cliente selecionado
box_check = GridLayout() # Agrupa os radioButtom
box_check.cols=3
box_check.size_hint= 1,0.5
box_spin = BoxLayout() # Box do spin
box_spin.size_hint = 1,0.5
box_bt = BoxLayout() # Box dos botões
box_bt.size_hint = 1,0.37
# Label título ========================================
label = Label()
label.size_hint=0.5,1.5
label.text='Gerenciador de clientes e pedidos'
# Label resumo ========================================
lb_resumo = Label()
""" Armazenar os valores no Spinner"""
spin = Spinner()
spin.size_hint= 0.2,0.5
spin.pos_hint={'top':0.5}
spin.text='Clientes'
lb_cidade = Label()
lb_nuped = Label()
lb_data = Label()
lb_valor = Label()
""" --------------------------------------------------------------------------"""
# Botãos ====================================
size_bt = [0.5 , 1]
bt_ok = Button()
bt_ok.size_hint= size_bt
bt_ok.text='Confirmar'
bt_ok.disabled=True
bt_inserir = Button()
bt_inserir.size_hint= size_bt
bt_inserir.text='Novo'
bt_resumo = Button()
bt_resumo.size_hint = size_bt
bt_resumo.text='Detalhar'
#====================================================
# Box do Checkbox
size = 16
ch_plastico = CheckBox()
ch_plastico.group=True
lb_plastico = Label()
lb_plastico.text='Plástico'
lb_plastico.halign='right'
lb_plastico.font_size = size
ch_construcao = CheckBox()
ch_construcao.group=True
lb_construcao = Label()
lb_construcao.text='Construção'
lb_construcao.font_size = size
ch_tecido = CheckBox()
ch_tecido.group=True
lb_tecido = Label()
lb_tecido.text='Tecido'
lb_tecido.font_size = size
# Criação do PopUp ======================================
pop_layout = BoxLayout(orientation = 'vertical')
pop_tg = BoxLayout()
txt_nome = TextInput()
txt_nome.multiline=False
txt_nome.hint_text='Nome'
txt_cidade = TextInput()
txt_cidade.multiline=False
txt_cidade.hint_text='Cidade'
txt_numpedido = TextInput()
txt_numpedido.multiline=False
txt_numpedido.hint_text='Número do pedido'
txt_pedido = TextInput()
txt_pedido.hint_text='Itens do pedido'
txt_valor = TextInput()
txt_valor.multiline=False
txt_valor.hint_text='Valor (exemplo: 1,99)'
bt_pop = Button(text='Ok')
pop_layout.add_widget(txt_nome)
pop_layout.add_widget(txt_cidade)
pop_layout.add_widget(txt_numpedido)
pop_layout.add_widget(txt_pedido)
pop_layout.add_widget(txt_valor)
pop_layout.add_widget(bt_pop)
pop = Popup()
pop.size_hint = 0.8, 0.7
pop.title='Novo registro'
pop.content = pop_layout
#=========================================================
# addWidgets dos box
box_label.add_widget(label)
box_resumo.add_widget(lb_resumo)
box_resumo.add_widget(lb_cidade)
box_resumo.add_widget(lb_nuped)
box_resumo.add_widget(lb_data)
box_resumo.add_widget(lb_valor)
box_meio.add_widget(box_check)
box_check.add_widget(ch_plastico)
box_check.add_widget(ch_construcao)
box_check.add_widget(ch_tecido)
box_check.add_widget(lb_plastico)
box_check.add_widget(lb_construcao)
box_check.add_widget(lb_tecido)
box_meio.add_widget(box_spin)
box_meio.add_widget(box_resumo)
box_spin.add_widget(spin)
#box_bt.add_widget(bt_ok)
box_bt.add_widget(bt_resumo)
box_bt.add_widget(bt_inserir)
layout_pai.add_widget(box_label)
layout_pai.add_widget(box_meio)
layout_pai.add_widget(box_bt)
#==========================================================
# TELA
first_screen = Screen()
first_screen.add_widget(layout_pai)
first_screen.name='primeira'
# Cria as tabelas com o nome dos RadioButtom
x = [lb_construcao.text, lb_plastico.text, lb_tecido.text]
db.Create(x)
def cor(posi, tam):
with label.canvas:
Color(0,0,1,0.25)
Rectangle(pos = posi, size = tam)
| null |
janelas/first_screen.py
|
first_screen.py
|
py
| 4,491 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "kivy.uix.gridlayout.GridLayout",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "kivy.uix.spinner.Spinner",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "kivy.uix.button.Button",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "kivy.uix.button.Button",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "kivy.uix.button.Button",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "kivy.uix.checkbox.CheckBox",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "kivy.uix.checkbox.CheckBox",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "kivy.uix.checkbox.CheckBox",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "kivy.uix.button.Button",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "kivy.uix.popup.Popup",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "database.Create",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Rectangle",
"line_number": 205,
"usage_type": "call"
}
] |
621066627
|
"""
Pre-training the place cell
Model
lstm with one hidden layer
I don't know if truncated BPTT or gradient clip are necessary here
"""
import argparse
import math
import sys
import time
import random
import pickle
import datetime
import numpy as np
import six
import matplotlib.pyplot as plt
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
from sklearn import datasets
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from dataset_generator import DatasetGenerator
# set parameters
n_epoch = 10000 # number of epochs
# n_units = 60 # number of units per layer
batchsize = 1 # minibatch size
bprop_len = 1 # length of truncated BPTT
valid_len = n_epoch // 25 # epoch on which accuracy and perp are calculated
grad_clip = 5 # gradient norm threshold to clip
maze_size = (9, 9)
whole_len = 100 # seq length of training datasset
valid_iter = 20
ev_iterations = 100 # svm dataset
list_n_units = [20, 30, 40, 50, 60] # list_n_units = [60]
# GPU
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
mod = cuda.cupy if args.gpu >= 0 else np
# validation dataset
valid_data_stack = []
for i in range(valid_iter):
valid_data = DatasetGenerator(maze_size).generate_seq(100)
valid_data_stack.append(valid_data)
# test dataset
test_data = DatasetGenerator(maze_size).generate_seq(100)
# one-step forward propagation
def forward_one_step(x, t, state, train=True):
# if args.gpu >= 0:
# data = cuda.to_gpu(data)
# targets = cuda.to_gpu(targets)
x = chainer.Variable(x, volatile=not train)
t = chainer.Variable(t, volatile=not train)
h_in = model.x_to_h(x) + model.h_to_h(state['h'])
c, h = F.lstm(state['c'], h_in)
y = model.h_to_y(h)
state = {'c': c, 'h': h}
sigmoid_y = 1 / (1 + np.exp(-y.data))
mean_squared_error = ((t.data - sigmoid_y) ** 2).sum() / t.data.size
return state, F.sigmoid_cross_entropy(y, t), mean_squared_error, h.data[0]
# initialize hidden state
def make_initial_state(batchsize=batchsize, train=True):
global n_units
return {name: chainer.Variable(mod.zeros((batchsize, n_units),
dtype=np.float32),
volatile=not train)
for name in ('c', 'h')}
# evaluation
def evaluate(data, test=False):
sum_error = 0
state = make_initial_state(batchsize=1, train=False)
hh = []
for i in six.moves.range(len(data['input'])):
x_batch = mod.asarray([data['input'][i]], dtype = 'float32')
t_batch = mod.asarray([data['output'][i]], dtype = 'int32')
state, loss, mean_squared_error, h_raw = forward_one_step(x_batch, t_batch, state, train=False)
hh.append(h_raw)
sum_error += mean_squared_error
return sum_error / len(data['input']), hh
# generate dataset for svm
def generate_seq_sklearn(iterations, test):
label = []
input_data = []
for i in range(iterations):
test_data = DatasetGenerator(maze_size).generate_seq(100)
test_mean_squared_error, test_hh = evaluate(test_data, True)
if test == True:
label.append(test_data['coordinates'])
input_data. append(test_hh)
else:
label.extend(test_data['coordinates'])
input_data.extend(test_hh)
return input_data , label
# grid search function using defalut module
def grid_search_1(train_X, train_Y):
tuned_parameters = [{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='accuracy')
clf.fit(train_X, train_Y)
return clf
# grid search function using scratched function
def grid_search_2(train_X, train_Y):
C = np.logspace(-4, 4, 10)
Gamma = np.logspace(-4, 4, 10)
max_score = 0.0
for g in Gamma:
print(g)
row = list()
for c in C:
estimator = SVC(C=c, kernel='linear', gamma=g)
classifier = OneVsRestClassifier(estimator)
classifier.fit(train_X, train_Y)
pred_train = classifier.predict(train_X)
score = accuracy_score(train_Y, pred_train)
row.append(score)
if max_score < score:
max_score = score
max_classifier = classifier
return max_classifier
# SVM with fixed hyper parameters
def svm_fixed(train_X, train_Y):
C = 1.
kernel = 'linear'
gamma = 0.01
estimator = SVC(C=C, kernel=kernel, gamma=gamma)
classifier = OneVsRestClassifier(estimator)
classifier.fit(train_X, train_Y)
return classifier
# stack results
lstm_errors_mean = np.zeros(len(list_n_units))
lstm_errors_se = np.zeros(len(list_n_units))
svm_errors_mean = np.zeros(len(list_n_units))
svm_errors_se = np.zeros(len(list_n_units))
# loop initialization
for j in range(len(list_n_units)):
n_units = list_n_units[j]
print('n_units = {}'.format(n_units))
f = open('pretrained_model_'+str(n_units)+'.pkl', 'rb')
model = pickle.load(f)
f.close()
print('calculate LSTM errors')
# LSTM accuracy on validation dataset
valid_perp_stack = np.zeros(valid_iter)
for i in range(valid_iter):
valid_perp, hh = evaluate(valid_data_stack[i])
valid_perp_stack[i] = valid_perp
valid_perp_mean = np.mean(valid_perp_stack, axis=0)
valid_perp_se = np.std(valid_perp_stack, axis=0) / np.sqrt(valid_iter)
lstm_errors_mean[j] = valid_perp_mean
lstm_errors_se[j] = valid_perp_se
# SVM
print('start SVM')
# generate dataset for SVM
svm_X_train, svm_y_train = generate_seq_sklearn(ev_iterations, False)
svm_X_test, svm_y_test = generate_seq_sklearn(ev_iterations / 5, True)
# SVM
clf = svm_fixed(svm_X_train, svm_y_train)
# clf = grid_search_1(svm_X_train, svm_y_train)
# clf = grid_search_2(svm_X_train, svm_y_train)
print('SVM test')
# SVM test
svm_perp_stack = np.zeros(ev_iterations / 5)
for i in range(ev_iterations / 5):
y_true, y_pred = svm_y_test[i], clf.predict(svm_X_test[i])
svm_test_accuracy = accuracy_score(y_true, y_pred) # accuracy, not error
svm_perp_stack[i] = svm_test_accuracy
svm_perp_mean = np.mean(svm_perp_stack, axis=0)
svm_perp_se = np.std(svm_perp_stack, axis=0) / np.sqrt(ev_iterations / 5)
svm_errors_mean[j] = 1 - svm_perp_mean # error
svm_errors_se[j] = svm_perp_se
print('svm error: {:.2f} '.format(svm_errors_mean[j]))
# plot
x = np.array(list_n_units)
plt. errorbar(x, lstm_errors_mean, yerr = lstm_errors_se, fmt='bo-')
plt.hold(True)
plt. errorbar(x, svm_errors_mean, yerr = svm_errors_se, fmt='go-')
plt.title('LSTM and SVM error versus hidden size')
plt.xlabel('size of hidden units')
plt.ylabel('errors')
plt.xlim([15, 65])
plt.legend(['LSTM', 'SVM'], loc =1)
"""
fig, ax1 = plt.subplots()
ax1. errorbar(x, lstm_errors_mean, yerr = lstm_errors_se, fmt='bo-')
ax2 = ax1.twinx()
ax2. errorbar(x, svm_errors_mean, yerr = svm_errors_se, fmt='go-')
ax1.set_title('LSTM and SVM error versus hidden size')
ax1.set_xlabel('size of hidden units')
ax1.set_xlim([15, 65])
ax1.set_ylabel('LSTM error', color='b')
ax2.set_ylabel('SVM error', color='g')
"""
d = datetime.datetime.today()
# save plots in PNG and SVG
plt.savefig('plot_' + d.strftime("%Y%m%d%H%M%S") + '.svg')
plt.savefig('plot_' + d.strftime("%Y%m%d%H%M%S") + '.png')
# save variables
f = open('plot_' + d.strftime("%Y%m%d%H%M%S") + '_x.pkl', 'wb')
pickle.dump(x, f, 2)
f.close()
f = open('plot_' + d.strftime("%Y%m%d%H%M%S") + '_lstm_errors_mean.pkl', 'wb')
pickle.dump(lstm_errors_mean, f, 2)
f.close()
f = open('plot_' + d.strftime("%Y%m%d%H%M%S") + '_lstm_errors_se.pkl', 'wb')
pickle.dump(lstm_errors_se, f, 2)
f.close()
f = open('plot_' + d.strftime("%Y%m%d%H%M%S") + '_svm_errors_mean.pkl', 'wb')
pickle.dump(svm_errors_mean, f, 2)
f.close()
# save SE of valid erros
f = open('plot_' + d.strftime("%Y%m%d%H%M%S") + '_svm_errors_se.pkl', 'wb')
pickle.dump(svm_errors_se, f, 2)
f.close()
plt.show()
| null |
trainer/visual_predictive_place_cell/train_practice_diff_layers_shortened_plot_fig2.py
|
train_practice_diff_layers_shortened_plot_fig2.py
|
py
| 8,366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "chainer.cuda.cupy",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "chainer.cuda",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "dataset_generator.DatasetGenerator",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dataset_generator.DatasetGenerator",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "chainer.Variable",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "chainer.Variable",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "chainer.functions.lstm",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "chainer.functions",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "chainer.functions.sigmoid_cross_entropy",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "chainer.functions",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "chainer.Variable",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "six.moves.range",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "six.moves",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "dataset_generator.DatasetGenerator",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "sklearn.grid_search.GridSearchCV",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sklearn.multiclass.OneVsRestClassifier",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "sklearn.multiclass.OneVsRestClassifier",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 264,
"usage_type": "name"
}
] |
541617645
|
import sys
import logging
import pymysql
import json
import requests
import time
from datetime import datetime
hostname = 'ejercicio2-instancia.cryssnu9ajdn.us-east-2.rds.amazonaws.com'
username = 'ejercicio2admin'
password = '8XLY3NHHU6WH6ODZMCDj'
dbname = 'ejercicio2db'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(host=hostname, user=username, passwd=password, db=dbname, connect_timeout=5)
except pymysql.MySQLError as e:
logger.error("ERROR: No es posible conectarse a instancia MySQL.")
logger.error(e)
sys.exit()
logger.info("CORRECTO: Acceso Exitoso a instancia MySQL.")
def lambda_handler(event, context):
url = "https://dweet.io:443/get/latest/dweet/for/thecore"
webhook = "https://webhook.site/4ed54cff-41ba-423e-9f46-b2c87408daf9"
for i in range(15):
response = requests.request("GET", url)
print("Respuesta : " + str(response.status_code))
if response.status_code == 200:
response_json = response.json()
if(response_json["with"]):
temperatura = response_json["with"][0]['content']['temperature']
humedad = response_json["with"][0]['content']['humidity']
fecha = datetime.today().strftime('%Y-%m-%d %H:%M')
with conn.cursor() as cur:
sql = 'insert into ejercicio2_table (fecha, temperatura, humedad) values(%s, %s, %s)'
val = (fecha, temperatura, humedad)
cur.execute(sql, val)
conn.commit()
print('Agregado registro ' + str(i + 1) + ' satisfactoriamente.')
time.sleep(59)
cur = conn.cursor()
sql = "select json_object('fecha',`fecha`,'temperatura',`temperatura`,'humedad',`humedad`) from `ejercicio2_table`"
cur.execute(sql)
data = cur.fetchall()
response = requests.request("POST", webhook, data=json.dumps(data), headers={'Content-Type': 'application/json'})
print("Respuesta Webhook : " + str(response.status_code))
return {
'statusCode': 200
}
| null |
ejercicio_2/lambda_function.py
|
lambda_function.py
|
py
| 2,241 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pymysql.MySQLError",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 53,
"usage_type": "call"
}
] |
18281825
|
from django.contrib.auth import get_user_model
from actstream import action
from rest_framework import serializers
from control.models import Control
from .models import UserProfile
User = get_user_model()
class RemoveControlSerializer(serializers.Serializer):
control = serializers.PrimaryKeyRelatedField(queryset=Control.objects.all())
class UserProfileSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(source='user.pk', read_only=True)
controls = serializers.PrimaryKeyRelatedField(many=True, queryset=Control.objects.all())
first_name = serializers.CharField(source='user.first_name')
last_name = serializers.CharField(source='user.last_name')
email = serializers.EmailField(source='user.email')
class Meta:
model = UserProfile
fields = (
'id', 'first_name', 'last_name', 'email', 'profile_type',
'organization', 'controls', 'is_audited', 'is_inspector')
extra_kwargs = {'controls': {'write_only': True}}
def create(self, validated_data):
profile_data = validated_data
controls_data = profile_data.pop('controls')
user_data = profile_data.pop('user')
user_data['username'] = user_data['email']
profile = UserProfile.objects.filter(user__username=user_data.get('email')).first()
action_details = {}
action_details['sender'] = self.context['request'].user
should_receive_email_report = False
if profile_data.get('profile_type') == UserProfile.INSPECTOR:
should_receive_email_report = True
if profile:
profile.user.first_name = user_data.get('first_name')
profile.user.last_name = user_data.get('last_name')
profile.organization = profile_data.get('organization')
profile.profile_type = profile_data.get('profile_type')
profile.send_files_report = should_receive_email_report
profile.user.save()
profile.save()
action_details['verb'] = 'update user'
else:
user = User.objects.create(**user_data)
profile_data['user'] = user
profile_data['send_files_report'] = should_receive_email_report
profile = UserProfile.objects.create(**profile_data)
action_details['verb'] = 'add user'
action_details['action_object'] = profile
controls_to_be_added = [c for c in controls_data if c not in profile.controls.all()]
session_user = self.context['request'].user
for control in controls_to_be_added:
if control not in session_user.profile.controls.all():
raise serializers.ValidationError(
f"{session_user} n'est pas authorisé à modifier ce contrôle: {control}")
profile.controls.add(control)
action_details['verb'] = 'add user'
action_details['target'] = control
action.send(**action_details)
return profile
| null |
user_profiles/serializers.py
|
serializers.py
|
py
| 2,998 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "control.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "control.models.Control.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "control.models.Control.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "control.models.Control",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.IntegerField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "control.models.Control.objects.all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "control.models.Control.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "control.models.Control",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.EmailField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.UserProfile",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.UserProfile.objects.filter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.UserProfile.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "models.UserProfile",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.UserProfile.INSPECTOR",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.UserProfile",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "models.UserProfile.objects.create",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models.UserProfile.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "models.UserProfile",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "control.models",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "control.models",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ValidationError",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "control.models",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "control.models",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "control.models",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "actstream.action.send",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "actstream.action",
"line_number": 68,
"usage_type": "name"
}
] |
226655453
|
from django.conf.urls import patterns, include, url
from core import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^impressum/$', views.static, {'site' : 'impressum.html'}, name='impressum'),
url(r'^presse/$', views.static, {'site' : 'presse.html'}, name='presse'),
url(r'^konzept/$', views.static, {'site' : 'konzept.html'}, name='konzept'),
url(r'^send_contactmail/$', views.send_contactmail, name='send_contactmail'),
url(r'^subscribe/$', views.subscribeToNewsletter, name='subscribe'),
url(r'^confirm/(?P<id>.+)/$', views.confirmNewsletter, name='confirm'),
url(r'^unsubscribe/(?P<id>.+)/$', views.unsubscribeFromNewsletter, name='unsubscribe'),
url(r'^robots\.txt/$', views.static, {'site' : 'robots.txt', 'content_type' : 'text/plain'}, name='robots'),
url(r'^sitemap\.xml/$', views.static, {'site' : 'sitemap.xml', 'content_type' : 'text/xml'}, name='sitemap'),
url(r'^google2ba41cbba49d5958\.html/$', views.static, {'site' : 'google2ba41cbba49d5958.html'}, name='google_verification'),
)
| null |
core/urls.py
|
urls.py
|
py
| 1,044 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "core.views.index",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "core.views.send_contactmail",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "core.views.subscribeToNewsletter",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "core.views.confirmNewsletter",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "core.views.unsubscribeFromNewsletter",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "core.views.static",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 19,
"usage_type": "name"
}
] |
230098156
|
from django.http import HttpResponseBadRequest
class ModelView(object):
u"""
A generic view for models which can recieve GET and POST requests
The __init__ method of subclasses should set the default response
variable.
"""
template_file = None
response = None
def __call__(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
method_name = "handle_%s" % request.method.upper()
model = self.get_model()
try:
getattr(self, method_name)(model)
except AttributeError:
return HttpResponseBadRequest()
return self.response
def handle_GET(self, model):
u"""Default implementation of model view is to do nothing."""
pass
def handle_POST(self, model):
u"""
Handle a POST request to this resource.
This will forward on request to a method of form "do_%s" where the
second part needs to be specified as an "action" name within the
request.
If you don't want to handle POSTs this way, just override this method
"""
if 'action' in self.request.POST:
getattr(self, "do_%s" % self.request.POST['action'].lower())(model)
def get_model(self):
u"""Responsible for loading the model that is being acted on"""
return None
| null |
oscar/view/generic.py
|
generic.py
|
py
| 1,472 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 25,
"usage_type": "call"
}
] |
283917762
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, NumericProperty, ListProperty, BooleanProperty, OptionProperty, \
ReferenceListProperty, StringProperty
from kivy.graphics import Color, Triangle, Rectangle, Ellipse, Line, InstructionGroup
from kivy.uix.image import Image
from kivy.core.audio import SoundLoader
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.popup import Popup
from kivy.lang import Builder
from random import randint
Builder.load_string("""
#:kivy 2.0.0
<Playground>
canvas:
Color:
rgba:[0.3,0.6,0.04,1]
Rectangle:
pos: self.pos
size: self.size
snake: snake_id
fruit: fruit_id
Snake:
id: snake_id
width: root.width/root.col_number
height: root.height/root.row_number
Fruit:
id: fruit_id
width: root.width/root.col_number
height: root.height/root.row_number
Label:
font_size: 70
center_x: root.x + root.width/root.col_number*2
top: root.top - root.height/root.row_number
text: str(root.score)
<Snake>
head: snake_head_id
tail: snake_tail_id
SnakeHead:
id: snake_head_id
width: root.width
height: root.height
SnakeTail:
id: snake_tail_id
width: root.width
height: root.height
# Добавления экранов управления игрой (3-я часть, 6 commit)
<WelcomeScreen>
canvas:
Color:
rgba:[0,0.4,0,1]
Rectangle:
pos: self.pos
size: self.size
AnchorLayout:
anchor_x: "center"
BoxLayout:
orientation: "vertical"
size_hint: (1, 1)
spacing: 10
Label:
size_hint_y: .4
text: "Змейка"
valign: "bottom"
bold: False
font_size: 0.2*self.height
padding: 0, 0
AnchorLayout:
anchor_x: "center"
size_hint_y: .6
BoxLayout:
size_hint: .5, .5
orientation: "vertical"
spacing: 10
Button:
halign: "center"
valign: "middle"
text: "Играть"
font_size: 0.3*self.height
on_press: root.manager.current = "playground_screen"
Button:
halign: "center"
valign: "middle"
text: "Настройки"
font_size: 0.3*self.height
on_press: root.show_popup()
<PlaygroundScreen>:
game_engine: playground_widget_id
Playground:
id: playground_widget_id
# Добавление виджетов для настроек отключенния границ экрана
# и скорости змейки (3-я часть, 7 commit)
<OptionsPopup>
border_option_widget: border_option_widget_id
speed_option_widget: speed_option_widget_id
title: "Настройки"
size_hint: .75, .75
BoxLayout:
orientation: "vertical"
spacing: 20
GridLayout:
size_hint_y: .8
cols: 2
Label:
text: "Без границ"
halign: "center"
Switch:
id: border_option_widget_id
Label:
text: "Скорость Змейки"
halign: "center"
Slider:
id: speed_option_widget_id
max: 10
min: 1
step: 1
value: 1
AnchorLayout:
anchor_x: "center"
size_hint: 1, .25
Button:
size_hint: 0.6, 0.8
text: "Сохранить"
font_size: 0.3*self.height
on_press: root.dismiss()
<VictoryScreen>
canvas:
Color:
rgba:[0,0.4,0,1]
Rectangle:
pos: self.pos
size: self.size
AnchorLayout:
anchor_x: "center"
BoxLayout:
orientation: "vertical"
size_hint: (1, 1)
spacing: 10
Label:
size_hint_y: .4
text: "Ура!!! Змейка объелась!!!"
font_size: 0.2*self.height
valign: "bottom"
bold: False
padding: 0, 0
AnchorLayout:
anchor_x: "center"
size_hint_y: .6
Button:
size_hint: 0.5, 0.3
halign: "center"
valign: "middle"
text: "Продолжить"
font_size: 0.3*self.height
on_press: root.manager.current = "welcome_screen"
""")
class Playground(Widget):
# root and children widget containers (корневой контейнер и контейнер дочерних виджетов)
fruit = ObjectProperty(None)
snake = ObjectProperty(None)
# настройки пользователя - скорости и отключения границ
start_speed = NumericProperty(1)
border_option = BooleanProperty(False)
# параметры сетки (выбраны 32-ширина, 18 высота)
col_number = 32
row_number = 18
# game variables (игровые переменные)
score = NumericProperty(0)
turn_counter = NumericProperty(0)
fruit_rhythm = NumericProperty(0)
start_time_coeff = NumericProperty(1)
running_time_coeff = NumericProperty(1)
fr = NumericProperty(0) # переменная события съедения фрукта
# user input handling (обработка пользовательского ввода)
touch_start_pos = ListProperty()
action_triggered = BooleanProperty(False)
def start(self):
# если опция border_option False, рисуем прямоугольник вокруг игровой области
if self.border_option == False:
with self.canvas.after:
self.l1 = Line(width=2., rectangle=(self.x, self.y, self.width, self.height))
else:
with self.canvas.after:
self.l1 = Line(width=0.1, rectangle=(self.x, self.y, self.width, self.height))
# вычислить временной коэф-т (time coeff),используемый как частота обновления для игры,
# используя предоставленные параметры (default 1.1, max 2)
# мы сохраняем значение дважды, чтобы сохранить ссылку в случае
# сброса (действительно, running_time_coeff будет увеличиваться в игре, если
# фрукт был съеден)
self.start_time_coeff += (self.start_speed / 10)
self.running_time_coeff = 1
# нарисовать новую змейку
self.new_snake()
# запустить цикл обновления
self.update()
def reset(self):
# сбросить игровые переменные
self.turn_counter = 0
self.score = 0
self.running_time_coeff = self.start_time_coeff
# удаляем виджет змейки и фрукт
self.snake.remove()
self.fruit.remove()
# непланируем все события в случае сброса
# (они будут перенесены в механизм перезапуска)
Clock.unschedule(self.pop_fruit)
Clock.unschedule(self.fruit.remove)
Clock.unschedule(self.update)
def new_snake(self):
# генерируем случайные координаты
start_coord = (
randint(10, self.col_number - 10), randint(6, self.row_number - 6))
# установливаем случайные координаты исходного положения змеи
self.snake.set_position(start_coord)
# генерируем случайное направление
rand_index = randint(0, 3)
start_direction = ["Up", "Down", "Left", "Right"][rand_index]
# устанавливаем случайное исходное направление змейки
self.snake.set_direction(start_direction)
def pop_fruit(self, *args):
# получаем случайные координаты фрукта
random_coord = [
randint(2, self.col_number - 1), randint(2, self.row_number - 1)]
# получаем позиции всех ячеек, занятых змеей
snake_space = self.snake.get_full_position()
# если координаты находятся в ячейке, занятой змейкой, переопределяем координаты на незанятые
while random_coord in snake_space:
random_coord = [
randint(2, self.col_number - 1), randint(2, self.row_number - 1)]
# вставляем фруктовый виджет в сгенерированные координаты
self.fruit.pop(random_coord)
def is_defeated(self):
"""
Функция используется для проверки, соответствует ли текущее положение змеи поражению.
"""
snake_position = self.snake.get_position()
# если змея кусает себя за хвост: поражение
if snake_position in self.snake.tail.blocks_positions:
return True
# если змейка вышла за границы и опция нет границ выключена - False, то Конец Игры
if self.border_option == False:
if snake_position[0] > self.col_number \
or snake_position[0] < 1 \
or snake_position[1] > self.row_number \
or snake_position[1] < 1:
return True
return False
def handle_outbound(self):
"""
Используется для замены змеи на противоположной стороне, если она выходит за пределы
(вызывается только в том случае, если для параметра границы установлено значение False)
"""
position = self.snake.get_position()
direction = self.snake.get_direction()
if position[0] == 1 and direction == "Left":
# добавить текущую позицию головы как хвостовой блок
# иначе один блок будет пропущен обычной подпрограммой
self.snake.tail.add_block(list(position))
self.snake.set_position([self.col_number + 1, position[1]])
elif position[0] == self.col_number and direction == "Right":
self.snake.tail.add_block(list(position))
self.snake.set_position([0, position[1]])
elif position[1] == 1 and direction == "Down":
self.snake.tail.add_block(list(position))
self.snake.set_position([position[0], self.row_number + 1])
elif position[1] == self.row_number and direction == "Up":
self.snake.tail.add_block(list(position))
self.snake.set_position([position[0], 0])
def update(self, *args):
"""
Функция используется для перехода игры к новому ходу.
"""
# регистрирование фруктов в возрастающей последовательности в планировщике событий
if self.turn_counter == 0:
self.fruit_rythme = self.fruit.interval + self.fruit.duration
Clock.schedule_interval(self.fruit.remove, self.fruit_rythme / self.running_time_coeff)
elif self.turn_counter == self.fruit.interval:
self.fruit.remove()
self.pop_fruit()
Clock.schedule_interval(self.pop_fruit, self.fruit_rythme / self.running_time_coeff)
elif self.fr == 1:
self.fruit.remove()
self.pop_fruit()
self.fr = 0
Clock.unschedule(self.pop_fruit)
Clock.unschedule(self.fruit.remove)
Clock.schedule_interval(self.fruit.remove, self.fruit_rythme / self.running_time_coeff)
Clock.schedule_interval(self.pop_fruit, self.fruit_rythme / self.running_time_coeff)
# если игра без границ - border_option=True, проверьте, собирается ли змейка покинуть экран
# если да, замените на соответствующую противоположную границу
if self.border_option:
self.handle_outbound()
# переместить змейку в следующую позицию
self.snake.move()
# проверка на поражение
# если это так, сбросить и перезапустить игру
if self.is_defeated():
self.reset()
# переход на экран приветствия
sound = SoundLoader.load('sound/ups.wav')
sound.play()
SnakeApp.screen_manager.current = "welcome_screen"
return
# проверяем, наличие и съеден ли фрукт
if self.fruit.is_on_board():
if self.snake.get_position() == self.fruit.pos:
# если это так, играем звук, удаляем плод, увеличиваем счет и размер хвоста, увеличиваем темп на 5%
self.fr = 1 # событие, что фрукт съеден
sound = SoundLoader.load('sound/eat.wav')
sound.play()
self.fruit.remove()
self.score += 1
self.snake.tail.size += 1
self.running_time_coeff *= 1.05
# Проверка условия победы - съедено установленное кол-во фруктов
if self.score > 5:
sound = SoundLoader.load('sound/victory.wav')
sound.play()
self.reset()
SnakeApp.screen_manager.current = "victory_screen"
return
# увеличиваем счетчик
self.turn_counter += 1
# расписание обновляется каждую секунду (1'')
Clock.schedule_once(self.update, 0.35 / self.running_time_coeff)
def on_touch_down(self, touch):
self.touch_start_pos = touch.spos
def on_touch_move(self, touch):
# вычисляем перевод из начальной позиции в текущую позицию
delta = Vector(*touch.spos) - Vector(*self.touch_start_pos)
# проверяем, не была ли команда еще отправлена, и если перевод
# составляет > 10% от размера экрана
if not self.action_triggered and (abs(delta[0]) > 0.1 or abs(delta[1]) > 0.1):
# если это так, установливаем соответствующее направление для змеи
if abs(delta[0]) > abs(delta[1]):
if delta[0] > 0:
self.snake.set_direction("Right")
else:
self.snake.set_direction("Left")
else:
if delta[1] > 0:
self.snake.set_direction("Up")
else:
self.snake.set_direction("Down")
# регистрируем, что действие было инициировано, чтобы
# оно не повторилось дважды за один ход
self.action_triggered = True
def on_touch_up(self, touch):
# готовы принять новую инструкцию касания экрана
self.action_triggered = False
class Fruit(Widget):
# константы, используемые для вычисления fruit_rhythme - периода появления фрукта
# значения выражают количество ходов
duration = NumericProperty(10)
interval = NumericProperty(1)
# представление на холсте
object_on_board = ObjectProperty(None)
state = BooleanProperty(False)
def is_on_board(self):
return self.state
def remove(self, *args):
# мы принимаем * args, потому что этот метод будет
# передан диспетчеру событий, поэтому он получит dt аргумент.
if self.is_on_board():
self.canvas.remove(self.object_on_board)
self.object_on_board = ObjectProperty(None)
self.state = False
def pop(self, pos):
self.pos = pos # используется для проверки начала употребления плода
# Рисунок фруктов
# (который просто круг, поэтому предполагаю , что это яблоко)
with self.canvas:
Color(1, 1, 1)
x = (pos[0] - 1) * self.size[0]
y = (pos[1] - 1) * self.size[1]
coord = (x, y)
# сохранение представления и обновление состояния объекта с фоном картинки яблоко
self.object_on_board = Ellipse(source='images/apple.png', pos=coord, size=self.size)
self.state = True
Color(1, 1, 1)
class Snake(Widget):
# children widgets containers (контейнер дочерних виджетов)
head = ObjectProperty(None)
tail = ObjectProperty(None)
def move(self):
"""
Перемещение змейки состоит из 3 шагов:
- сохранить текущее положение головы, так как оно будет использовано для добавления блока к хвосту.
- переместить голову на одну клетку в текущем направлении.
- добавляем новый хвостовой блок к хвосту.
"""
next_tail_pos = list(self.head.position)
self.head.move()
self.tail.add_block(next_tail_pos)
def remove(self):
"""
В нашей текущей змейке удаление всего объекта сводится к удалению его головы и хвоста, поэтому нам просто нужно вызвать соответствующие методы. Как они занимаются этим - их проблема, а не Змеи. Это просто происходит вниз по команде.
"""
self.head.remove()
self.tail.remove()
def set_position(self, position):
self.head.position = position
def get_position(self):
"""
Мы рассматриваем положение Змеи как положение, занимаемое головой.
"""
return self.head.position
def get_full_position(self):
"""
Но иногда нам нужно знать весь набор ячеек, занятых
змейкой.
"""
return self.head.position + self.tail.blocks_positions
def set_direction(self, direction):
self.head.direction = direction
def get_direction(self):
return self.head.direction
class SnakeHead(Widget):
nn = 0 # используется для переключения картинки с открытым и закрытым ртом
# представление в «сетке» игрового поля
direction = OptionProperty(
"Right", options=["Up", "Down", "Left", "Right"])
x_position = NumericProperty(0)
y_position = NumericProperty(0)
position = ReferenceListProperty(x_position, y_position)
# представление на холсте
points = ListProperty([0] * 2)
object_on_board = ObjectProperty(None)
state = BooleanProperty(False)
def is_on_board(self):
return self.state
def remove(self):
if self.is_on_board():
self.canvas.remove(self.object_on_board)
self.object_on_board = ObjectProperty(None)
self.state = False
def show(self):
"""
Фактический рендеринг головы змеи. Представление - это просто
Треугольник ориентирован в соответствии с направлением объекта
"""
with self.canvas:
Color(1, 1, 0)
if not self.is_on_board():
self.object_on_board = Ellipse(source='images/head.png', pos=self.points, size=self.size)
self.state = True # object is on board
else:
# если объект уже на борту, удалить старое представление
# перед рисованием нового
# рисуем с фоном головы в соответствии со значением nn
if self.nn == 0:
if self.direction == "Right":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head0R.png', pos=self.points, size=self.size)
elif self.direction == "Left":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head0L.png', pos=self.points, size=self.size)
elif self.direction == "Up":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head0Up.png', pos=self.points, size=self.size)
elif self.direction == "Down":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head0D.png', pos=self.points, size=self.size)
else:
if self.direction == "Right":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head1R.png', pos=self.points, size=self.size)
elif self.direction == "Left":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head1L.png', pos=self.points, size=self.size)
elif self.direction == "Up":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head1Up.png', pos=self.points, size=self.size)
elif self.direction == "Down":
self.canvas.remove(self.object_on_board)
self.object_on_board = Ellipse(source='images/head1D.png', pos=self.points, size=self.size)
Color(1, 1, 1)
def move(self):
"""
Это решение не очень изящно. Но оно работает.
Положение обновляется в соответствии с текущим направлением. Набор из
точки, представляющие треугольник, повернутый в направлении объекта,
вычисляется и сохраняется как свойство.
Затем вызывается метод show () для визуализации треугольника.
"""
# переключатель nn для смены картинки головы
if self.nn == 0:
self.nn = 1
else:
self.nn = 0
# обновление позиции
if self.direction == "Right":
self.position[0] += 1
# вычисление множества точек
x0 = (self.position[0] - 1) * self.width
y0 = (self.position[1] - 1) * self.height
elif self.direction == "Left":
self.position[0] -= 1
x0 = (self.position[0] - 1) * self.width
y0 = (self.position[1] - 1) * self.height
elif self.direction == "Up":
self.position[1] += 1
x0 = (self.position[0] - 1) * self.width
y0 = (self.position[1] - 1) * self.height
elif self.direction == "Down":
self.position[1] -= 1
x0 = (self.position[0] - 1) * self.width
y0 = (self.position[1] - 1) * self.height
# сохранение точек как свойства(списка)
self.points = [x0, y0]
# представление треугольника/эллепса
self.show()
class SnakeTail(Widget):
# tail length, in number of blocks (длина хвоста в количестве блоков)
size = NumericProperty(3)
# blocks positions on the Playground's grid (позиции блоков в сетке)
blocks_positions = ListProperty()
# blocks objects drawn on the canvas (блокирует объекты, нарисованные на холсте)
tail_blocks_objects = ListProperty()
def remove(self):
# сбросить размер змейки
self.size = 3
# удалить каждый блок хвоста с холста
# вот почему нам здесь не нужна is_on_board () здесь:
# если блока нет на борту, его нет и в списке
# поэтому мы не можем пытаться удалить не нарисованный объект
for block in self.tail_blocks_objects:
self.canvas.remove(block)
# очищаем списки, содержащие координаты блоков
# и изображения на холсте
self.blocks_positions = []
self.tail_blocks_objects = []
def add_block(self, pos):
"""
Здесь происходят 3 вещи:
- новая позиция блока, переданная в качестве аргумента, добавляется к список объектов.
- количество элементов в списке при необходимости адаптируется всплывающим самый старый блок.
- блоки нарисованы на холсте, и процесс такой же, как и раньше происходит так, что наш список блочных объектов сохраняет постоянный размер.
"""
# добавить новую позицию блока в список
self.blocks_positions.append(pos)
# контролировать количество блоков в списке
if len(self.blocks_positions) > self.size:
self.blocks_positions.pop(0)
with self.canvas:
# рисуем блоки в соответствии с позициями, хранящимися в списке
Color(0, 0, 1)
for block_pos in self.blocks_positions:
x = (block_pos[0] - 1) * self.width
y = (block_pos[1] - 1) * self.height
coord = (x, y)
block = Ellipse(pos=coord, size=(self.width, self.height))
# добавить новый объект блока в список
self.tail_blocks_objects.append(block)
# контролировать количество блоков в списке и удалять с холста
# при необходимости
if len(self.tail_blocks_objects) > self.size:
last_block = self.tail_blocks_objects.pop(0)
self.canvas.remove(last_block)
Color(1, 1, 1)
class WelcomeScreen(Screen):
options_popup = ObjectProperty(None)
def show_popup(self):
# создать экземпляр всплывающего окна и отобразить его
self.options_popup = OptionsPopup()
self.options_popup.open()
class PlaygroundScreen(Screen):
game_engine = ObjectProperty(None)
def on_enter(self):
# мы видим экран, запускаем игру
self.game_engine.start()
class OptionsPopup(Popup):
border_option_widget = ObjectProperty(None)
speed_option_widget = ObjectProperty(None)
def on_dismiss(self):
Playground.start_speed = self.speed_option_widget.value
Playground.border_option = self.border_option_widget.active
class VictoryScreen(Screen):
pass
class SnakeApp(App):
screen_manager = ObjectProperty(None)
def build(self):
# объявить ScreenManager как свойство класса
SnakeApp.screen_manager = ScreenManager()
# создаем экраны
ws = WelcomeScreen(name="welcome_screen")
ps = PlaygroundScreen(name="playground_screen")
vs = VictoryScreen(name="victory_screen")
# регистрируем экраны в диспетчере экранов
self.screen_manager.add_widget(ws)
self.screen_manager.add_widget(ps)
self.screen_manager.add_widget(vs)
return self.screen_manager
if __name__ == '__main__':
SnakeApp().run()
| null |
main.py
|
main.py
|
py
| 30,829 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "kivy.lang.Builder.load_string",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "kivy.properties.BooleanProperty",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ListProperty",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "kivy.properties.BooleanProperty",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Line",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Line",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock.unschedule",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.unschedule",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.unschedule",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.unschedule",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.unschedule",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "kivy.core.audio.SoundLoader.load",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "kivy.core.audio.SoundLoader",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "kivy.core.audio.SoundLoader.load",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "kivy.core.audio.SoundLoader",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "kivy.core.audio.SoundLoader.load",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "kivy.core.audio.SoundLoader",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.schedule_once",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "kivy.vector.Vector",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "kivy.properties.BooleanProperty",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 472,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "kivy.properties.OptionProperty",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ReferenceListProperty",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ListProperty",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "kivy.properties.BooleanProperty",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 624,
"usage_type": "name"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ListProperty",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ListProperty",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Ellipse",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 685,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 686,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 694,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "kivy.uix.popup.Popup",
"line_number": 702,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 703,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 711,
"usage_type": "name"
},
{
"api_name": "kivy.app.App",
"line_number": 715,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.ScreenManager",
"line_number": 720,
"usage_type": "call"
}
] |
38670664
|
from django.conf.urls import url
from django.conf.urls import include
from linkmanager import views
from django.contrib.auth import views as auth_views
urlpatterns =[
url(r'^dashboard/', views.Dashboard, name='dashboard'),
url(r'^about', views.about, name='about'),
url(r'^contact', views.contact, name='contact'),
url(r'^termsandconditions', views.terms_and_conditions, name='terms_and_conditions'),
url(r'^how_to/use', views.how_to_use, name='how_to_use'),
url(r'^pa/listofcommands', views.pa_list_of_commands, name='pa_list_of_commands'),
url(r'^link/create/', views.LinkCreateView, name='link_create'),
url(r'^link/update/(?P<id>[0-9A-Za-z_\-]+)/$', views.LinkUpdateView, name='link_update'),
url(r'^link/delete/(?P<id>[0-9A-Za-z_\-]+)/$', views.LinkDeleteView, name='link_delete'),
url(r'^note/create/', views.NoteCreateView, name='note_create'),
url(r'^note/update/(?P<id>.*)', views.NoteUpdateView, name='note_update'),
url(r'^note/delete/(?P<id>.*)', views.NoteDeleteView, name='note_delete'),
url(r'^user/login/', auth_views.login, name='login'),
url(r'^user/registration/', views.signup_view, name='register'),
url(r'^user/logout/', views.logout_view, name='logout'),
url(r'^user/setttings/', views.user_settings_menu, name='user_settings_menu'),
url(r'^user/info/', views.user_info, name='user_info'),
url(r'^user/infoedit/profile/', views.user_info_edit_profile, name='user_info_edit_profile'),
url(r'^user/infoedit/password/', views.user_info_edit_password, name='user_info_edit_password'),
url(r'^note/show/(?P<id>.*)/(?P<note_slug>.*)', views.note_detail, name='note_detail'),
]
| null |
linkmanager/urls.py
|
urls.py
|
py
| 1,688 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.Dashboard",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.about",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.contact",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.terms_and_conditions",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.how_to_use",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.pa_list_of_commands",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.LinkCreateView",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.LinkUpdateView",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.LinkDeleteView",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.NoteCreateView",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.NoteUpdateView",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.NoteDeleteView",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.login",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.signup_view",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.logout_view",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.user_settings_menu",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.user_info",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.user_info_edit_profile",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.user_info_edit_password",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "linkmanager.views.note_detail",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "linkmanager.views",
"line_number": 34,
"usage_type": "name"
}
] |
614137568
|
import sys
import os
import re
import subprocess
import argparse
parser = argparse.ArgumentParser(description='Gitforward helps you step easily from one commit to another, using a few simple directives.')
parser.add_argument('-n', '--next', dest='next', action='store_const', const='next', help='Next commit')
parser.add_argument('-p', '--prev', dest='prev', action='store_const', const='prev', help='Previous commit')
parser.add_argument('-s', '--start', dest='start', action='store_const', const='start', help='First commit')
parser.add_argument('-e', '--end', dest='end', action='store_const', const='end', help='Last commit')
parser.add_argument('-b', '--branch', dest='branch', help='Branch to checkout')
parser.add_argument('-i', '--index', dest='index', type=int, help='Index of the commit in Gitforward\'s list to checkout')
parser.add_argument('-l', '--list', dest='list', action='store_const', const='list', help='Display all commits')
parser.add_argument('-r', '--reset', dest='reset', action='store_const', const='reset', help='Reset data')
parser.add_argument('-t', '--tests', dest='tests', action='store_const', const='tests', help='Run tests')
parser.add_argument('-o', '--repository', dest='repository', help='The git repository to work with')
cmdargs = parser.parse_args(sys.argv[1:])
cmdargs.direction = cmdargs.next or cmdargs.prev or cmdargs.start or cmdargs.end
if not cmdargs.tests and not cmdargs.repository:
print("Please specify a repository.")
sys.exit(0)
git_repo = cmdargs.repository
git_log_data = (os.path.basename(git_repo) + ".gitfwd") if git_repo else None
def to_blob(data):
'''
>>> to_blob({'current': 0, 'commits': [{'name': '123', 'comment': '456'}]})
'current:0\\n123 456'
'''
s = ''
if 'current' in data:
s += 'current:' + str(data['current']) + '\n'
if 'commits' in data:
s += str.join('\n', [commit['name'] + ' ' + commit['comment'] for commit in data['commits']])
return s.strip()
def from_blob(data):
'''
>>> from_blob('current:0\\n123 456')
{'current': 0, 'commits': [{'comment': '456', 'name': '123'}]}
'''
d = {}
commitstart = 0
lines = [line.strip() for line in data.split('\n')]
if 'current:' in lines[0] and lines[0].index('current:') == 0:
d['current'] = int(lines[0].split(':')[1])
commitstart += 1
if len(lines) > 0:
d['commits'] = [{'name': line[0], 'comment': line[1]} for line in [line.split(' ', 1) for line in lines[commitstart:]]]
return d
def read_db():
if not os.path.exists(git_log_data):
return {}
with open(git_log_data) as f:
return from_blob(f.read())
def write_db(data):
with open(git_log_data, 'w+') as f:
return f.write(to_blob(data))
def write_db_data(key, value):
db = read_db()
db[key] = value
write_db(db)
def read_db_data(key, default=None):
db = read_db()
if key in db:
return db[key]
return default
def del_db_data(key):
db = read_db()
if key in db: del db[key]
write_db(db)
def execute_cmd(cmd, cwd=None):
try:
proc = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
if proc.returncode != 0:
print("Error running command " + str.join(',', cmd))
print("Return code: " + str(proc.returncode))
print("Standout output:")
print(proc.stdout.read())
print("")
print("Standard error:")
print(proc.stderr.read())
sys.exit(0)
res = proc.stdout.read().decode()
return res
except OSError as e:
print("Execution failed: " + str(e))
sys.exit(0)
def get_commits_from_repo():
commits = []
lines = []
rawlogs = [l.strip() for l in [l.strip() for l in execute_cmd(['git', 'log'], git_repo).split("\n")]]
for line in rawlogs:
if re.match(r'^commit', line):
if len(lines) > 0: commits.append(lines)
lines = []
lines.append(line)
if len(lines) > 0: commits.append(lines)
return [{'name': commit[0].split(' ')[1], 'comment': commit[commit.index("") + 1].strip() } for commit in commits][::-1]
def write_commits_to_index(commits):
write_db_data('commits', commits)
def parse_commit_data(data):
no_zero_length_lines = lambda l: len(l.strip()) > 0
lines = list(filter(no_zero_length_lines, data.split('\n')))
return [{'name': line[0], 'comment': line[1].strip()} for line in [line.split(' ', 1) for line in lines] ]
def get_commits_from_index():
return read_db_data('commits', {})
def get_current_index(db_data, default):
if 'current' in db_data: return int(db_data['current'])
return default
def write_current_index(index):
write_db_data('current', str(index))
def unless_no_commits(commits, fn):
'''
>>> commits = [{'comment': 'testcomment', 'name': 'testname'}, {'comment': 'testcomment2', 'name': 'testname2'}]
>>> unless_no_commits([], lambda x: x)
{'message': 'No commit found.', 'type': 'error'}
>>> unless_no_commits(commits, lambda x: x)
[{'comment': 'testcomment', 'name': 'testname'}, {'comment': 'testcomment2', 'name': 'testname2'}]
'''
if len(commits) == 0: return {'type': 'error', 'message': 'No commit found.'}
return fn(commits)
def to_commit_index(index):
'''
>>> to_commit_index(10)
{'index': '10', 'type': 'commitindex'}
'''
return {'type': 'commitindex', 'index': str(index)}
def error_msg(message):
'''
>>> error_msg('hello world')
{'message': 'Error: hello world', 'type': 'error'}
'''
return {'type': 'error', 'message': 'Error: ' + message}
def within_bounds(commits, index):
'''
>>> commits = [{'comment': 'testcomment', 'name': 'testname'}, {'comment': 'testcomment2', 'name': 'testname2'}]
>>> within_bounds(commits, 0)
{'index': '0', 'type': 'commitindex'}
>>> within_bounds(commits, 10)
{'message': 'Error: Index 10 is greater than the largest commit index.', 'type': 'error'}
>>> within_bounds(commits, -1)
{'message': 'Error: Index -1 is less than 0.', 'type': 'error'}
'''
if index < 0:
return error_msg('Index %(index)s is less than 0.' % {'index': index})
if index > len(commits) - 1:
return error_msg('Index %(index)s is greater than the largest commit index.' % {'index': index})
return to_commit_index(index)
def checkout(treeish):
execute_cmd(['git', 'checkout', treeish], git_repo)
def to_treeish(val, db_data):
'''
>>> db_data = {'commits': [{'comment': 'testcomment', 'name': 'testname'}, {'comment': 'testcomment2', 'name': 'testname2'}, {'comment': 'testcomment3', 'name': 'testname3'}], 'current': '1'}
>>> def to_treeish_test(command): return to_treeish(command, db_data)
>>> to_treeish_test('start')
{'index': '0', 'type': 'commitindex'}
>>> to_treeish_test('end')
{'index': '2', 'type': 'commitindex'}
>>> to_treeish_test('next')
{'index': '2', 'type': 'commitindex'}
>>> to_treeish_test('prev')
{'index': '0', 'type': 'commitindex'}
>>> db_data['current'] = 0
>>> to_treeish_test('prev')
{'message': 'Error: Index -1 is less than 0.', 'type': 'error'}
>>> db_data['current'] = 2
>>> to_treeish_test('next')
{'message': 'Error: Index 3 is greater than the largest commit index.', 'type': 'error'}
'''
commits = db_data['commits']
def valid_commit_index(index):
return unless_no_commits(commits, lambda commits: within_bounds(commits, int(index)))
commit_index_table = {
'start': 0,
'end': len(commits) - 1,
'next': get_current_index(db_data, -1) + 1,
'prev': get_current_index(db_data, 1) - 1
}
if val in commit_index_table: return valid_commit_index(commit_index_table[val])
try:
return valid_commit_index(str(int(str(val))))
except ValueError:
return {'type': 'branch', 'name': val}
def format_commit(commits, index, prefix=' '):
'''
>>> format_commit([{'comment': 'testcomment', 'name': 'testname'}], 0)
' 0: testcomment'
'''
index_str_length = len(str(len(commits) - 1))
format = "%(num)" + str(index_str_length) + "s"
line = prefix + format % {'num': str(index) } + ": " + commits[index]['comment']
return line
def format_current_commit(commits, index):
'''
>>> format_current_commit([{'comment': 'testcomment', 'name': 'testname'}], 0)
'> 0: testcomment'
'''
return format_commit(commits, index, '> ')
def point_to_commit(db_data, commit_index):
commit_index = int(commit_index)
commits = db_data['commits']
commit = commits[commit_index]
print(format_current_commit(commits, commit_index))
checkout(commit['name'])
db_data['current'] = commit_index
write_db(db_data)
if __name__ == '__main__' and cmdargs.tests:
import doctest
doctest.testmod()
elif __name__ == '__main__':
git_repo = cmdargs.repository
git_log_data = os.path.basename(git_repo) + ".gitfwd"
if not os.path.exists(git_repo):
print("Directory " + git_repo + " does not exist.")
sys.exit(0)
elif not os.path.exists(git_repo + "/.git"):
print("Directory " + git_repo + "/.git is not a git repository.")
if not os.path.exists(git_log_data) or cmdargs.reset:
write_commits_to_index(get_commits_from_repo())
db_data = read_db()
commits = db_data['commits']
if cmdargs.list:
current_index = get_current_index(db_data, -1)
for i in range(len(commits)):
print((format_current_commit if i == current_index else format_commit)(commits, i))
elif cmdargs.reset:
del_db_data('current')
elif cmdargs.direction:
treeish = to_treeish(cmdargs.direction, db_data)
if treeish['type'] == 'error':
print(treeish['message'])
else:
point_to_commit(db_data, treeish['index'])
elif cmdargs.index:
treeish = to_treeish(cmdargs.index, db_data)
if treeish['type'] == 'error':
print(treeish['message'])
else:
point_to_commit(db_data, treeish['index'])
elif cmdargs.branch:
treeish = to_treeish(cmdargs.branch, db_data)
if treeish['type'] == 'error':
print(treeish['message'])
else:
print("Checking out branch " + treeish['name'])
checkout(treeish['name'])
else:
print('No command specified.')
| null |
gitforward.py
|
gitforward.py
|
py
| 9,717 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "doctest.testmod",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 267,
"usage_type": "attribute"
}
] |
509941835
|
#import pygtk
#pygtk.require('2.0')
#import gtk
import gobject
import pygst
pygst.require('0.10')
gobject.threads_init()
import gst
import time
class Listen(object):
def __init__(self):
self.voice = False
self.lock = False
self.init_gst()
def init_gst(self):
"""Initialize the speech components"""
#######################################
# NOTE: the flag lm=
# This flag is used as the Language Model or library for phoneme stuff
# Look into "pocketsphinx lm flag"
#######################################
self.pipeline = gst.parse_launch('gconfaudiosrc ! audioconvert ! audioresample '
+ '! vader name=vad auto-threshold=true '
+ '! pocketsphinx lm=/home/raul/downloads/en-us.lm name=asr ! fakesink')
asr = self.pipeline.get_by_name('asr')
asr.connect('partial_result', self.asr_partial_result)
asr.connect('result', self.asr_result)
asr.set_property('configured', True)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message::application', self.application_message)
self.pipeline.set_state(gst.STATE_PAUSED)
def asr_partial_result(self, asr, text, uttid):
"""Forward partial result signals on the bus to the main thread."""
# print('forward partial signal')
struct = gst.Structure('partial_result')
struct.set_value('hyp', text)
# print(text)
struct.set_value('uttid', uttid)
asr.post_message(gst.message_new_application(asr, struct))
def asr_result(self, asr, text, uttid):
"""Forward result signals on the bus to the main thread."""
# print('forward result signal')
struct = gst.Structure('result')
struct.set_value('hyp', text)
print(text)
struct.set_value('uttid', uttid)
asr.post_message(gst.message_new_application(asr, struct))
def application_message(self, bus, msg):
"""Receive application messages from the bus."""
msgtype = msg.structure.get_name()
if msgtype == 'partial_result':
self.partial_result(msg.structure['hyp'], msg.structure['uttid'])
elif msgtype == 'result':
self.final_result(msg.structure['hyp'], msg.structure['uttid'])
self.pipeline.set_state(gst.STATE_PAUSED)
def partial_result(self, hyp, uttid):
"""Delete any previous selection, insert text and select it."""
# print(hyp)
self.lock = True
def final_result(self, hyp, uttid):
"""Insert the final result."""
# print(hyp)
self.lock = False
return
def listen(self, voice):
"""Handle voice threshold"""
if voice:
self.pipeline.set_state(gst.STATE_PLAYING)
self.lock = True
else:
vader = self.pipeline.get_by_name('vad')
vader.set_property('silent', True)
self.lock = False
i=Listen()
def listening(voice):
if voice:
print('im listening')
i.listen(True)
i.voice=True
return i.voice
else:
print('not hearing you')
i.listen(False)
i.voice=False
return i.voice
import audioop
import pyaudio
chunk = 1024
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=44100,
input=True,
frames_per_buffer=chunk)
THRESH=1500
while True:
data = stream.read(chunk)
rms = audioop.rms(data, 2) #width=2 for format=paInt16
if i.voice is True:
if i.lock is True:
pass
else:
if rms < THRESH:
i.voice=listening(False)
elif i.voice is False:
if i.lock is False:
if rms > THRESH:
print('rms: %s' % (str(rms)))
i.voice=listening(True)
| null |
listen_test.py
|
listen_test.py
|
py
| 4,031 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygst.require",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gobject.threads_init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gst.parse_launch",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "gst.STATE_PAUSED",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "gst.Structure",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "gst.message_new_application",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "gst.Structure",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "gst.message_new_application",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "gst.STATE_PAUSED",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "gst.STATE_PLAYING",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pyaudio.paInt16",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "audioop.rms",
"line_number": 120,
"usage_type": "call"
}
] |
582657459
|
from django.utils import timezone
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render, get_object_or_404
from django.views import View
from django.views.generic import TemplateView
from users.models import UserProfile
from .models import Tweet
class TimelineView(TemplateView, View):
template_name = 'tweety/home.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
followed = request.user.userprofile.follows.all()
latest_tweets = Tweet.objects.filter(profile__in=followed).order_by('-pub_datetime')[:25]
else:
latest_tweets = Tweet.objects.all().order_by('-pub_datetime')[:10]
return self.render_to_response({"latest_tweets": latest_tweets})
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect('login')
else:
message = request.POST['message']
tweet = Tweet(profile=request.user.userprofile, message=message, pub_datetime=timezone.now())
tweet.save()
return redirect('home')
class ProfileView(LoginRequiredMixin, TemplateView, View):
template_name = "tweety/profile.html"
def get(self, request, *args, **kwargs):
profile = get_object_or_404(UserProfile, pk=kwargs['id'])
latest_tweets = profile.tweet_set.all().order_by('-pub_datetime')[:10]
return self.render_to_response({
'latest_tweets': latest_tweets,
'profile_user': profile.user,
'follows': request.user.userprofile.is_following(profile.user.id)
})
def post(self, request, *args, **kwargs):
pk = kwargs['id']
unfollow = request.user.userprofile.follows.filter(pk=pk).exists()
if unfollow:
request.user.userprofile.follows.remove(pk)
else:
request.user.userprofile.follows.add(pk)
return redirect(request.path)
| null |
tweety/views.py
|
views.py
|
py
| 2,029 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.views.generic.TemplateView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.Tweet.objects.filter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Tweet.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "models.Tweet",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "models.Tweet.objects.all",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Tweet.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.Tweet",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.Tweet",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "users.models.UserProfile",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 53,
"usage_type": "call"
}
] |
426311405
|
#! python
# -*- coding:utf-8 -*-
import os
import sys
import json
from xml.dom.minidom import Document
try:
basestring
except NameError:
basestring = str
try:
long
except NameError:
long = int
# python3中没有unicode了
try:
unicode
except NameError:
unicode = str
# 加上不确定的层级缩进,60比较合适
BASE_LENGTH = 60
BASE_INDENT = " "
INDENT_LIST = {}
class Writer(object):
def __init__(self,doc_name,sheet_name):
# 文件名包含中文则需要转unicode
self.doc_name = unicode(doc_name, "utf-8")
self.sheet_name = sheet_name
# 文件后缀
def suffix(self):
pass
# 文件内容
def context(self,ctx):
pass
# 注释开始
def comment_start(self):
pass
# 注释结束
def comment_end(self):
pass
# 文件注释(千万不要加时间,影响svn)
def comment(self):
where = format( "from %s_%s"% (self.doc_name,self.sheet_name) )
comment = [
self.comment_start(),
'DO NOT MODITY! Auto generated by py_exceltools',
'https://www.python.org/',
'http://www.python-excel.org/',
'',
where,
self.comment_end(),
'\n\n'
]
return "\n".join( comment )
class JsonWriter(Writer):
# 文件后缀
def suffix(self):
return ".json"
# 文件内容(字符串)
def context(self,ctx):
return json.dumps(ctx,ensure_ascii=False,\
indent=4,sort_keys=True,separators=(',', ':') )
class XmlWriter(Writer):
# 文件后缀
def suffix(self):
return ".xml"
# 注释开始
def comment_start(self):
return "<!--"
# 注释结束
def comment_end(self):
return "-->"
#创建根元素
def root_element(self):
root = self.doc.createElement( self.doc_name + "_" + self.sheet_name )
return root
# dict类型转换为xml
def dict_to_xml(self,root,value):
# 需要对key排序,不然每次导出的xml字段是乱的,对版本管理不友好
for k in sorted( value ) :
v = value[k]
sub_root = self.doc.createElement( k )
self.to_xml( sub_root,v )
root.appendChild( sub_root )
# list类型转换为xml
def list_to_xml(self,root,value):
for k,v in enumerate( value ) :
# xml中并不支持array,用item来命名,外加一个index属性
sub_root = self.doc.createElement( "item" )
sub_root.setAttribute( "index",str( k ) )
self.to_xml( sub_root,v )
root.appendChild( sub_root )
# 转换为xml节点
def to_xml(self,root,value):
sub_node = None
val_type_str = None
val_type = type( value )
if int == val_type :
# python3中没有Long类型,int64也用int表示
val_type_str = "int64"
sub_node = self.doc.createTextNode( str( value ) )
elif long == val_type :
val_type_str = "int64"
sub_node = self.doc.createTextNode( str( value ) )
elif float == val_type :
val_type_str = "number"
# 去除带小数时的小数点,100.0 ==>> 100
if long( value ) == float( value ) :
sub_node = self.doc.createTextNode( str( long( value ) ) )
else:
sub_node = self.doc.createTextNode( str( value ) )
elif str == val_type or unicode == val_type :
val_type_str = "string"
sub_node = self.doc.createTextNode( value )
elif dict == val_type :
self.dict_to_xml( root,value )
elif list == val_type :
self.list_to_xml( root,value )
else :
raise Exception( "invalid type",val_type )
# 类型为dict或者list的,没有这个type属性
if val_type_str : root.setAttribute( "type",val_type_str )
if sub_node : root.appendChild( sub_node )
# 文件内容
def context(self,ctx):
#创建DOM文档对象
self.doc = Document()
root = self.root_element()
self.to_xml( root,ctx )
self.doc.appendChild( root )
return self.comment() + self.doc.toprettyxml( indent=" " )
class LuaWriter(Writer):
# 文件后缀
def suffix(self):
return ".lua"
# 注释开始
def comment_start(self):
return "--[["
# 注释结束
def comment_end(self):
return "]]"
# 获取缩进字符串
def indent_ctx( self,indent ):
if indent <= 0: return ""
if indent not in INDENT_LIST:
ctx = BASE_INDENT*indent
INDENT_LIST[indent] = ctx
return INDENT_LIST[indent]
# dict转换为lua类型
def dict_to_lua(self,value,indent):
dict_ctx_list = []
cur_indent = self.indent_ctx(indent)
next_indent = self.indent_ctx(indent + 1)
total_len = 0
any_indent = False
# 需要对key排序,不然每次导出的字段是乱的,对版本管理不友好
for k in sorted( value ) :
k_indent,lk = self.to_lua( k,indent )
is_indent,lv = self.to_lua( value[k],indent + 1 )
# key要用[]括起来,防止有数字key
key = "".join( ["[",lk,"]"] )
# 子类型有缩进,则必须换行
if is_indent :
any_indent = True
val = "".join( [key," =","\n",lv] )
else :
val = "".join( [key," = ",lv] )
dict_ctx_list.append( val )
if not any_indent : total_len += len(val)
# 是否换行
if any_indent or total_len > BASE_LENGTH :
sep = ",\n" + next_indent
dict_str = sep.join( dict_ctx_list )
return True,"".join(
[cur_indent,"{\n",next_indent,dict_str,"\n",cur_indent,"}"])
else :
dict_str = ",".join( dict_ctx_list )
return False,"".join( ["{",dict_str,"}"] )
# list转换为lua类型
def list_to_lua(self,value,indent):
list_ctx_list = []
cur_indent = self.indent_ctx(indent)
next_indent = self.indent_ctx(indent + 1)
total_len = 0
any_indent = False
for v in value :
is_indent,lv = self.to_lua( v,indent + 1 )
if is_indent : any_indent = True
if not any_indent :
total_len = total_len + len( lv )
list_ctx_list.append( lv )
if any_indent :
# 处理{99,{a = 1,b = 2},"abc"}这种有些换行有些不换行的,把缩进补上
for k,v in enumerate( list_ctx_list ) :
if not v.startswith( BASE_INDENT ) :
list_ctx_list[k] = next_indent + v
# 子元素是dict或者list并且换了行,则都必须换行
list_str = ",\n".join( list_ctx_list )
return True,"".join(
[cur_indent,"{\n",list_str,"\n",cur_indent,"}"] )
elif total_len > BASE_LENGTH :
# 元素太多,一行显示不下,比如策划配置了上千个{a,a,a,a,a,a,a,a,a,a,a,a}
# 应该自动分行,一行显示合适的个数
cur_len = 0
cur_ctx = []
line_ctx = []
for ctx in list_ctx_list :
# +1是算上后面的增加的","
one_len = len(ctx) + 1
if cur_len + one_len > BASE_LENGTH :
line_ctx.append( ",".join( cur_ctx ) )
cur_len = 0
cur_ctx = []
cur_len += one_len
cur_ctx.append( ctx )
if any(cur_ctx) : line_ctx.append( ",".join( cur_ctx ) )
sep = ",\n" + next_indent
list_str = sep.join( line_ctx )
return True,"".join(
[cur_indent,"{\n",next_indent,list_str,"\n",cur_indent,"}"] )
else :
# 返回 {a,b,c}这种不换行的格式
list_str = ",".join( list_ctx_list )
return False,"".join( ["{",list_str,"}"] )
# 变量转换到lua字符串
def to_lua(self,value,indent):
val_type = type( value )
if int == val_type :
return False,str( value )
elif long == val_type :
return False,str( value )
elif float == val_type :
# 1001.0 -->> 001 去除多余小数点
if int( value ) == value :
return False,str( int(value) )
return False,str( value )
elif str == val_type or unicode == val_type:
# 字符串要用单引号,因为Lua里单引号级别比双引号高
return False,"".join(["'",value,"'"])
elif dict == val_type :
return self.dict_to_lua(value,indent)
elif list == val_type :
return self.list_to_lua(value,indent)
else :
raise Exception( "invalid type",val_type )
#文件内容
def context(self,ctx):
is_indent,str_ctx = self.to_lua( ctx,0 )
return "".join( [self.comment(),"return\n",str_ctx] )
| null |
writer.py
|
writer.py
|
py
| 9,202 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dumps",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.Document",
"line_number": 147,
"usage_type": "call"
}
] |
538449215
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 1 15:45:34 2017
Triadiagonal matrix algorithm/Thomas algorithm
Tx = b
Same as Gauss elimination ,
except coefficient matrix is tridiagonal
@author: Nikhil Yewale
"""
from IPython import get_ipython
get_ipython().magic('reset -sf')
import numpy as np
def TDMA(d,c,a,b): #d is main diag,a is super-diag,c is sub-diag,b is RHS vector
# Forward elimination phase / triangularization
for i in range(1,len(d)):
lam = a[i-1]/d[i-1]
d[i] = d[i] - lam*c[i-1]
b[i] = b[i] - lam*b[i-1]
# Backward substitution
b[-1] = b[-1]/d[-1]
for i in range(len(b)-2,-1,-1):
b[i] = (b[i] - c[i-1]*b[i+1])/d[i]
return b
import scipy.sparse
# creating a triadigonal matrix
#You can create using spdiags,diags as
# mentioned in
#http://hplgit.github.io/bumpy/doc/pub/lectures-basics-1.html#___sec29
N=6
diago = np.zeros((3,N)) # 3 diagonals
diago[0,:] = -1
diago[1,:] = 3
diago[2,:] = -1
A = scipy.sparse.spdiags(diago, [-1,0,1], N, N, format='csc')
sol = TDMA(A.diagonal(), A.A.diagonal(1),A.A.diagonal(-1),np.ones((len(diago[1,:]),1)))
sparsealgsolution = scipy.sparse.linalg.spsolve(A,np.ones((len(diago[1,:]),1))) #compare with solvecommand
| null |
TDMA algorithm.py
|
TDMA algorithm.py
|
py
| 1,264 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "IPython.get_ipython",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse.spdiags",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse.linalg.spsolve",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 36,
"usage_type": "call"
}
] |
222099727
|
# --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import random
import socket
import struct
import six
import threading
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from meta.MetaService import Client
from meta.ttypes import EdgeItem
from meta.ttypes import ErrorCode
from meta.ttypes import GetEdgeReq
from meta.ttypes import GetEdgeResp
from meta.ttypes import GetPartsAllocReq
from meta.ttypes import GetPartsAllocResp
from meta.ttypes import GetTagReq
from meta.ttypes import GetTagResp
from meta.ttypes import ListHostsReq
from meta.ttypes import ListHostsResp
from meta.ttypes import ListEdgesReq
from meta.ttypes import ListEdgesResp
from meta.ttypes import ListSpacesReq
from meta.ttypes import ListSpacesResp
from meta.ttypes import ListTagsReq
from meta.ttypes import ListTagsResp
from meta.ttypes import TagItem
if six.PY3:
Timer = threading.Timer
else:
Timer = threading._Timer
class RepeatTimer(Timer):
def __init__(self, interval, function):
Timer.__init__(self, interval, function)
self.daemon = True # set the RepeatTimer thread as a daemon thread, so it can end when main thread ends
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
class MetaClient:
def __init__(self, addresses, timeout=1000,
connection_retry=3):
"""Initializer
Arguments:
- addresses: meta server addresses
- timeout: maximum connection timeout
- connection_retry: maximum number of connection retries
Returns: empty
"""
self._addresses = addresses
self._timeout = timeout
self._connection_retry = connection_retry
self._space_name_map = {} # map<space_name, space_id>
self._space_part_location = {} # map<space_name, map<part_id, list<address>>>
self._space_part_leader = {} # map<space_name, map<part_id, leader'saddress>>
self._space_tag_items = {} # map<space_name, map<tag_item.tag_name, tag_item>>
self._space_edge_items = {} # map<space_name, map<edge_item.edge_name, edge_item>>
self._tag_name_map = {} # map<space_name, map<tag_item.tag_id, tag_item.tag_name>>
self._edge_name_map = {} # map<space_name, map<edge_item.edge_name, edge_item>>
self._client = None
def connect(self):
"""connect to meta servers
Arguments: emtpy
Returns:
- error_code: the code indicates whether the connection is successful
"""
while self._connection_retry > 0:
code = self.do_connect(self._addresses)
if code == 0:
return ErrorCode.SUCCEEDED
self._connection_retry -= 1
return ErrorCode.E_FAIL_TO_CONNECT
def do_connect(self, addresses):
address = addresses[random.randint(0, len(addresses)-1)]
host = address[0]
port = address[1]
try:
transport = TSocket.TSocket(host, port)
transport.setTimeout(self._timeout)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
transport.open()
self._client = Client(protocol)
self.update_schemas()
RepeatTimer(2, self.update_schemas).start() # call updatSchemas() every 2 seconds
return 0
except Exception as x:
print(x)
return -1
def update_schemas(self):
for space_id_name in self.list_spaces():
space_name = space_id_name.name # class IdName
self._space_name_map[space_name] = space_id_name.id.get_space_id()
self._space_part_location[space_name] = self.get_parts_alloc(space_name)
self._space_part_leader[space_name] = {}
# Loading tag schema's cache
tags = {}
tags_name = {}
for tag_item in self.get_tags(space_name):
tags[tag_item.tag_name] = tag_item
tags_name[tag_item.tag_id] = tag_item.tag_name
self._space_tag_items[space_name] = tags
self._tag_name_map[space_name] = tags_name
# Loading edge schema's cache
edges = {}
edges_name = {}
for edge_item in self.get_edges(space_name):
edges[edge_item.edge_name] = edge_item
edges_name[edge_item.edge_type] = edge_item.edge_name
self._space_edge_items[space_name] = edges
self._edge_name_map[space_name] = edges_name
# Update leader of partions
self.set_space_part_leader()
def get_space_id_from_cache(self, space_name):
"""get space id of the space
Arguments:
- space_name: name of the space
Returns:
- space_id: id of the space
"""
if space_name not in self._space_name_map.keys():
return -1
else:
return self._space_name_map[space_name]
def get_space_part_leader_from_cache(self, space_name, part_id):
"""get leader of the partion
Arguments:
- space_name: name of the space
- part_id: id of the partition
Returns:
- leader address of the partition
"""
if space_name not in self._space_part_leader.keys():
return None
if part_id not in self._space_part_leader[space_name].keys():
return None
return self._space_part_leader[space_name][part_id]
def update_space_part_leader(self, space_name, part_id, leader):
self._space_part_leader[space_name][part_id] = leader
def set_space_part_leader(self):
list_hosts_req = ListHostsReq()
list_hosts_resp = self._client.listHosts(list_hosts_req)
if list_hosts_resp.code != ErrorCode.SUCCEEDED:
print('set_space_part_leader error, eror code: ', list_hosts_resp.code)
return None
for host_item in list_hosts_resp.hosts:
host = socket.inet_ntoa(struct.pack('I',socket.htonl(host_item.hostAddr.ip & 0xffffffff)))
port = host_item.hostAddr.port
leader = (host, port)
for space, part_ids in host_item.leader_parts.items():
for part_id in part_ids:
self._space_part_leader[space][part_id] = leader
def list_spaces(self):
"""list all of the spaces
Arguments: empty
Returns:
- spaces: IdName of all spaces
IdName's attributes:
- id
- name
"""
list_spaces_req = ListSpacesReq()
list_spaces_resp = self._client.listSpaces(list_spaces_req)
if list_spaces_resp.code == ErrorCode.SUCCEEDED:
return list_spaces_resp.spaces # IdName
else:
print('list spaces error, error code: ', list_spaces_resp.code)
return None
def get_part_alloc_from_cache(self, space_name, part_id):
"""get addresses of the partition
Arguments:
- space_name: name of the space
- part_id: id of the partition
Returns:
- addresses: addresses of the partition
"""
if space_name in self._space_part_location.keys():
parts_alloc = self._space_part_location[space_name]
if part_id in parts_alloc.keys():
return parts_alloc[part]
return None
def get_parts_alloc(self, space_name):
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
get_parts_alloc_req = GetPartsAllocReq(space_id)
get_parts_alloc_resp = self._client.getPartsAlloc(get_parts_alloc_req)
if get_parts_alloc_resp.code == ErrorCode.SUCCEEDED:
address_map = {}
for part_id, host_addrs in get_parts_alloc_resp.parts.items():
addresses = []
for host_addr in host_addrs:
host = socket.inet_ntoa(struct.pack('I',socket.htonl(host_addr.ip & 0xffffffff)))
port = host_addr.port
addresses.append((host, port))
address_map[part_id] = addresses
return address_map
else:
print("get parts alloc error, error code: ", getParts_alloc_resp.code)
return None
def get_parts_alloc_from_cache(self):
""" get addresses of partitions of spaces
Arguments: empty
Returns:
- space_part_location: map<space_name, map<part_id, list<address>>>
"""
return self._space_part_location
def get_tag_item_from_cache(self, space_name, tag_name):
""" get TagItem of the tag
Arguments:
- space_name: name of the space
- tag_name: name of the tag
Returns:
- TagItem
TagItem's attributes:
- tag_id
- tag_name
- version
- schema
"""
if space_name in self._space_tag_items.keys() and tag_name in self._space_tag_items[space_name].keys():
return self._space_tag_items[space_name][tag_name]
return None
def get_tag_name_from_cache(self, space_name, tag_id):
""" get tag_name of the tag
Arguments:
- space_name: name of the space
- tag_id: id of the tag
Returns:
- tag_name: name of the tag
"""
if space_name in self._tag_name_map.keys():
tag_names = self._tag_name_map[space_name]
if tag_id in tag_names.keys():
return tag_names[tag_id]
return None
def get_tags(self, space_name):
""" get TagItems of the space
Arguments:
- space_name: name of the space
Returns:
- tags: TagItems
TagItem's attributes:
- tag_id
- tag_name
- version
- schema
"""
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
list_tags_req = ListTagsReq(space_id)
list_tags_resp = self._client.listTags(list_tags_req)
if list_tags_resp.code == ErrorCode.SUCCEEDED:
return list_tags_resp.tags
else:
print('get tags error, error code: ', list_tags_resp.code)
return None
def get_tag(self, space_name, tag_name, version=0):
""" get tag schema of the given version
Arguments:
- space_name: name of the space
- tag_name: name of the tag
- version: version of the tag schema
Returns:
- Schema: tag schema of the given version
Schema's attributes:
- columns
- schema_prop
"""
space_id = self.get_space_id_from_cache(space_name)
get_tag_req = GetTagReq(space_id, tag_name, version)
get_tag_resp = self._client.getTag(get_tag_req)
if get_tag_resp.code == ErrorCode.SUCCEEDED:
return get_tag_resp.schema
else:
return None
def get_tag_schema(self, space_name, tag_name, version=0):
""" get tag schema columns of the given version
Arguments:
- space_name: name of the space
- tag_name: name of the tag
- version: version of the tag schema
Returns:
- result: columns of the tag schema
"""
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
get_tag_req = GetTagReq(space_id, tag_name, version)
get_tag_resp = self._client.getTag(get_tag_req)
result = {}
for column_def in get_tag_resp.schema.columns:
result[column_def.name] = column_def.type.type
return result
def get_edge_item_from_cache(self, space_name, edge_name):
""" get EdgeItem of the edge
Arguments:
- space_name: name of the space
- edge_name: name of the edge
Returns:
- EdgeItem
EdgeItem's attributes:
- edge_type
- edge_name
- version
- schema
"""
if space_name not in self._space_edge_items.keys():
edges = {}
for edge_item in self.getEdges(space_name):
edges[edge_item.edge_name] = edge_item
self._space_edge_items[space_name] = edges
edge_items = self._space_edge_items[space_name]
if edge_name in edge_items.keys():
return edge_items[edge_name]
else:
return None
def get_edge_name_from_cache(self, space_name, edge_type):
""" get edge name of the edge
Arguments:
- space_name: name of the space
- edge_type: edge type of the edge
Returns:
- edge_name: name of the edge
"""
if space_name in self._edge_name_map.keys():
edge_names = self._edge_name_map[space_name]
if edge_type in edge_names.keys():
return edge_names[edge_type]
return None
def get_edges(self, space_name):
""" get EdgeItems of the space
Arguments:
- space_name: name of the space
Returns:
- edges: EdgeItems
EdgeItem's attributes:
- edge_type
- edge_name
- version
- schema
"""
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
list_edges_req = ListEdgesReq(space_id)
list_edges_resp =self._client.listEdges(list_edges_req)
if list_edges_resp.code == ErrorCode.SUCCEEDED:
return list_edges_resp.edges
else:
print('get tags error, error code: ', list_edges_resp.code)
return None
def get_edge(self, space_name, edge_name, version=0):
""" get edge schema of the given version
Arguments:
- space_name: name of the space
- edge_name: name of the edge
- version: version of the edge schema
Returns:
- schema of the edge
Schema's attributes:
- columns
- schema_prop
"""
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
get_edge_req = GetEdgeReq(space_id, edge_name, version)
get_edge_resp = self._client.getEdge(get_edge_req)
if get_edge_resp.code == ErrorCode.SUCCEEDED:
return get_edge_resp.Schema
else:
print('get edge error, error code: ', get_edge_resp.code)
return None
def get_edge_schema(self, space_name, edge_name, version=0):
""" get edge schema columns of the given version
Arguments:
- space_name: name of the space
- edge_name: name of the edge
- version: version of the edge schema
Returns:
- result: columns of the edge schema
"""
space_id = self.get_space_id_from_cache(space_name)
if space_id == -1:
return None
get_edge_req = GetEdgeReq(space_id, edge_name, version)
get_edge_resp = self._client.getEdge(get_edge_req)
result = {}
for column_def in get_edge_resp.schema.columns:
result[column_def.name] = column_def.type.type
return result
| null |
nebula/ngMeta/MetaClient.py
|
MetaClient.py
|
py
| 15,921 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "six.PY3",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "threading._Timer",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.ErrorCode.E_FAIL_TO_CONNECT",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "thrift.transport.TSocket.TSocket",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "thrift.transport.TSocket",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "thrift.transport.TTransport.TBufferedTransport",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "thrift.transport.TTransport",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "thrift.protocol.TBinaryProtocol.TBinaryProtocol",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "thrift.protocol.TBinaryProtocol",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "meta.MetaService.Client",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ListHostsReq",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "socket.inet_ntoa",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "socket.htonl",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ListSpacesReq",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.GetPartsAllocReq",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "socket.inet_ntoa",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "socket.htonl",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ListTagsReq",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.GetTagReq",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.GetTagReq",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ListEdgesReq",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.GetEdgeReq",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "meta.ttypes.ErrorCode.SUCCEEDED",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "meta.ttypes.ErrorCode",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "meta.ttypes.GetEdgeReq",
"line_number": 432,
"usage_type": "call"
}
] |
554855756
|
import logging
import time
import traceback
from datetime import datetime, timedelta
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
from glob import glob
from motu_utils.utils_cas import authenticate_CAS_for_URL
from motu_utils.utils_http import open_url
from siphon import http_util
from siphon.catalog import TDSCatalog
from xarray.backends import NetCDF4DataStore
from check_connection import CheckConnection
from config import config
import utils
from utils import FileFailedException, Failed_Files, check_dir
logger = logging.getLogger(__name__)
WIND_VAR_LIST = ['surface_downward_eastward_stress', 'wind_stress_divergence', 'northward_wind', 'sampling_length',
'wind_speed_rms', 'wind_vector_curl',
'northward_wind_rms', 'eastward_wind', 'wind_speed', 'wind_vector_divergence', 'wind_stress',
'wind_stress_curl', 'eastward_wind_rms', 'surface_type',
'surface_downward_northward_stress']
WAVE_VAR_LIST = ['VHM0_WW', 'VMDR_SW2', 'VMDR_SW1', 'VMDR', 'VTM10', 'VTPK', 'VPED',
'VTM02', 'VMDR_WW', 'VTM01_SW2', 'VHM0_SW1',
'VTM01_SW1', 'VSDX', 'VSDY', 'VHM0', 'VTM01_WW', 'VHM0_SW2']
DAILY_PHY_VAR_LIST = ['thetao', 'so', 'uo', 'vo', 'zos', 'mlotst', 'bottomT', 'siconc', 'sithick', 'usi', 'vsi']
GFS_25_VAR_LIST = ['Temperature_surface', 'Wind_speed_gust_surface', 'u-component_of_wind_maximum_wind',
'v-component_of_wind_maximum_wind', 'Dewpoint_temperature_height_above_ground',
'U-Component_Storm_Motion_height_above_ground_layer',
'V-Component_Storm_Motion_height_above_ground_layer', 'Relative_humidity_height_above_ground']
GFS_50_VAR_LIST = ['Temperature_surface', 'u-component_of_wind_maximum_wind',
'v-component_of_wind_maximum_wind', 'U-Component_Storm_Motion_height_above_ground_layer',
'V-Component_Storm_Motion_height_above_ground_layer',
'Relative_humidity_height_above_ground']
def get_global_wave(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points):
"""
retrieve all wave variables for a specific timestamp, latitude, longitude concidering
the temporal resolution of the dataset to calculate interpolated values
"""
logger.debug('obtaining GLOBAL_REANALYSIS_WAV dataset for DATE [%s, %s] LAT [%s, %s] LON [%s, %s]' % (
str(date_lo), str(date_hi), str(lat_lo), str(lat_hi), str(lon_lo), str(lon_hi)))
dataset_temporal_resolution = 180
if date_lo >= datetime(2019, 1, 1, 6):
CheckConnection.set_url('nrt.cmems-du.eu')
base_url = 'https://nrt.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'GLOBAL_ANALYSIS_FORECAST_WAV_001_027-TDS'
product = 'global-analysis-forecast-wav-001-027'
VM_FOLDER = '/eodata/CMEMS/NRT/GLO/WAV/GLOBAL_ANALYSIS_FORECAST_WAV_001_027'
offset = 0.1
elif date_lo >= datetime(1993, 1, 1, 6):
CheckConnection.set_url('my.cmems-du.eu')
base_url = 'https://my.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'GLOBAL_REANALYSIS_WAV_001_032-TDS'
product = 'global-reanalysis-wav-001-032'
VM_FOLDER = '/eodata/CMEMS/REP/GLO/WAV/GLOBAL_REANALYSIS_WAV_001_032'
offset = 0.2
# time lower
time_in_min = (date_lo.hour * 60) + date_lo.minute
rest = time_in_min % dataset_temporal_resolution
t_lo = date_lo - timedelta(minutes=rest)
# time upper
time_in_min = (date_hi.hour * 60) + date_hi.minute
rest = time_in_min % dataset_temporal_resolution
t_hi = date_hi + timedelta(minutes=dataset_temporal_resolution - rest)
y_lo = float(lat_lo) - offset
y_hi = float(lat_hi) + offset
x_lo = float(lon_lo) - offset
x_hi = float(lon_hi) + offset
if Path(VM_FOLDER).exists():
logger.debug('Accessing local data %s' % VM_FOLDER)
datasets_paths = []
for day in range((t_hi - t_lo).days + 1):
dt = t_lo + timedelta(day)
path = Path(VM_FOLDER, '%s' % dt.year, '%.2d' % dt.month, '%.2d' % dt.day, '*.nc')
dataset = list(glob(str(path)))
if len(dataset) > 0:
datasets_paths.append(sorted(dataset)[0])
ds_nc = xr.open_mfdataset(datasets_paths)
if ds_nc.coords['latitude'].values[0] == ds_nc.coords['latitude'].max():
tmp = y_lo
y_lo = y_hi
y_hi = tmp
if ds_nc.coords['longitude'].values[0] == ds_nc.coords['longitude'].max():
tmp = x_lo
x_lo = x_hi
x_hi = tmp
dataset = ds_nc.sel(longitude=slice(x_lo, x_hi), latitude=slice(y_lo, y_hi),
time=slice(t_lo, t_hi)).compute()
else:
url = base_url + '&service=' + service + '&product=' + product + '&x_lo={0}&x_hi={1}&y_lo={2}&y_hi={3}&t_lo={4}&t_hi={5}&mode=console'.format(
x_lo, x_hi, y_lo,
y_hi,
utils.date_to_str(
t_lo),
utils.date_to_str(
t_hi))
dataset = try_get_data(url)
return dataset.interp(longitude=lon_points, latitude=lat_points, time=time_points).to_dataframe()[
WAVE_VAR_LIST].reset_index(drop=True)
def try_get_data(url):
try:
CheckConnection.is_online()
url_auth = authenticate_CAS_for_URL(url, config['UN_CMEMS'], config['PW_CMEMS'])
response = open_url(url_auth)
CheckConnection.is_online()
read_bytes = response.read()
CheckConnection.is_online()
return xr.open_dataset(read_bytes)
except Exception as e:
logger.error(traceback.format_exc())
raise ValueError('Error:', e, 'Request: ', url)
def get_global_wind(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points):
logger.debug('obtaining WIND_GLO_WIND_L4_NRT_OBSERVATIONS dataset for DATE [%s, %s] LAT [%s, %s] LON [%s, %s]' % (
str(date_lo), str(date_hi), str(lat_lo), str(lat_hi), str(lon_lo), str(lon_hi)))
# offset according to the dataset resolution
offset = 0.25
dataset_temporal_resolution = 360
if date_lo >= datetime(2018, 1, 1, 6):
CheckConnection.set_url('nrt.cmems-du.eu')
base_url = 'https://nrt.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'WIND_GLO_WIND_L4_NRT_OBSERVATIONS_012_004-TDS'
product = 'CERSAT-GLO-BLENDED_WIND_L4-V6-OBS_FULL_TIME_SERIE'
VM_FOLDER = '/eodata/CMEMS/NRT/GLO/WIN/WIND_GLO_WIND_L4_NRT_OBSERVATIONS_012_004'
elif date_lo >= datetime(1992, 1, 1, 6):
CheckConnection.set_url('my.cmems-du.eu')
base_url = 'https://my.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'WIND_GLO_WIND_L4_REP_OBSERVATIONS_012_006-TDS'
product = 'CERSAT-GLO-BLENDED_WIND_L4_REP-V6-OBS_FULL_TIME_SERIE'
VM_FOLDER = '/eodata/CMEMS/REP/GLO/WIN/WIND_GLO_WIND_L4_REP_OBSERVATIONS_012_006'
# time range
time_in_min = (date_lo.hour * 60) + date_lo.minute
rest = time_in_min % dataset_temporal_resolution
t_lo = date_lo - timedelta(minutes=rest) # extract the lower bound
time_in_min = (date_hi.hour * 60) + date_hi.minute
rest = time_in_min % dataset_temporal_resolution
t_hi = date_hi + timedelta(minutes=dataset_temporal_resolution - rest)
# coordinates bbox
y_lo = float(lat_lo) - offset
y_hi = float(lat_hi) + offset
x_lo = float(lon_lo) - offset
x_hi = float(lon_hi) + offset
if Path(VM_FOLDER).exists():
logger.debug('Accessing local data %s' % VM_FOLDER)
datasets_paths = []
for day in range((t_hi - t_lo).days + 1):
dt = t_lo + timedelta(day)
path = Path(VM_FOLDER, '%s' % dt.year, '%.2d' % dt.month, '%.2d' % dt.day, '*.nc')
dataset = list(glob(str(path)))
datasets_paths.extend(dataset)
ds_nc = xr.open_mfdataset(datasets_paths)
if ds_nc.coords['lat'].values[0] == ds_nc.coords['lat'].max():
tmp = y_lo
y_lo = y_hi
y_hi = tmp
if ds_nc.coords['lon'].values[0] == ds_nc.coords['lon'].max():
tmp = x_lo
x_lo = x_hi
x_hi = tmp
dataset = ds_nc.sel(lon=slice(x_lo, x_hi), lat=slice(y_lo, y_hi),
time=slice(t_lo, t_hi)).compute()
else:
url = base_url + '&service=' + service + '&product=' + product + '&x_lo={0}&x_hi={1}&y_lo={2}&y_hi={3}&t_lo={4}&t_hi={5}&mode=console'.format(
x_lo, x_hi, y_lo,
y_hi,
utils.date_to_str(
t_lo),
utils.date_to_str(
t_hi))
dataset = try_get_data(url)
return dataset.interp(lon=lon_points, lat=lat_points, time=time_points).to_dataframe()[WIND_VAR_LIST].reset_index(
drop=True)
def get_GFS(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points):
logger.debug('obtaining GFS 0.25 dataset for DATE [%s, %s] LAT [%s, %s] LON [%s, %s]' % (
str(date_lo), str(date_hi), str(lat_lo), str(lat_hi), str(lon_lo), str(lon_hi)))
start_date = datetime(date_lo.year, date_lo.month, date_lo.day) - timedelta(days=1)
# offset according to the dataset resolution
offset = 0.25
# consider the supported time range
if start_date < datetime(2015, 1, 15):
logger.debug('GFS 0.25 DATASET is out of supported range')
return get_GFS_50(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points)
x_arr_list = []
base_url = 'https://rda.ucar.edu/thredds/catalog/files/g/ds084.1'
CheckConnection.set_url('rda.ucar.edu')
# calculate a day prior for midnight interpolation
http_util.session_manager.set_session_options(auth=(config['UN_RDA'], config['PW_RDA']))
start_cat = TDSCatalog(
"%s/%s/%s%.2d%.2d/catalog.xml" % (base_url, start_date.year, start_date.year, start_date.month, start_date.day))
name = 'gfs.0p25.%s%.2d%.2d18.f006.grib2' % (start_date.year, start_date.month, start_date.day)
ds_subset = start_cat.datasets[name].subset()
query = ds_subset.query().lonlat_box(north=lat_hi + 0.25, south=lat_lo - 0.25, east=lon_hi + 0.25,
west=lon_lo - 0.25).variables(
*GFS_25_VAR_LIST)
CheckConnection.is_online()
try:
data = ds_subset.get_data(query)
x_arr = xr.open_dataset(NetCDF4DataStore(data))
if 'time1' in list(x_arr.coords):
x_arr = x_arr.rename({'time1': 'time'})
x_arr_list.append(x_arr)
except Exception as e:
logger.warning('dataset %s is not complete' % name)
for day in range((date_hi - date_lo).days + 1):
end_date = datetime(date_lo.year, date_lo.month, date_lo.day) + timedelta(days=day)
end_cat = TDSCatalog(
"%s/%s/%s%.2d%.2d/catalog.xml" % (base_url, end_date.year, end_date.year, end_date.month, end_date.day))
for cycle in [0, 6, 12, 18]:
for hours in [3, 6]:
name = 'gfs.0p25.%s%.2d%.2d%.2d.f0%.2d.grib2' % (
end_date.year, end_date.month, end_date.day, cycle, hours)
if name in list(end_cat.datasets):
ds_subset = end_cat.datasets[name].subset()
query = ds_subset.query().lonlat_box(north=lat_hi + offset, south=lat_lo - offset, east=lon_hi + offset,
west=lon_lo - offset).variables(*GFS_25_VAR_LIST)
CheckConnection.is_online()
try:
data = ds_subset.get_data(query)
x_arr = xr.open_dataset(NetCDF4DataStore(data))
if 'time1' in list(x_arr.coords):
x_arr = x_arr.rename({'time1': 'time'})
x_arr_list.append(x_arr)
except Exception as e:
print(e)
logger.warning('dataset %s is not complete' % name)
else:
logger.warning('dataset %s is not found' % name)
dataset = xr.combine_by_coords(x_arr_list, coords=['time'], combine_attrs='override', compat='override').squeeze()
lon_points = ((lon_points + 180) % 360) + 180
b = xr.DataArray([1] * len(lon_points))
res = dataset.interp(longitude=lon_points, latitude=lat_points, time=time_points, bounds_dim=b).to_dataframe()[
GFS_25_VAR_LIST]
return res
def get_GFS_50(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points):
logger.debug('obtaining GFS 0.50 dataset for DATE [%s, %s] LAT [%s, %s] LON [%s, %s]' % (
str(date_lo), str(date_hi), str(lat_lo), str(lat_hi), str(lon_lo), str(lon_hi)))
base_url = 'https://www.ncei.noaa.gov/thredds/model-gfs-g4-anl-files-old/'
CheckConnection.set_url('ncei.noaa.gov')
# offset according to the dataset resolution
offset = 0.5
x_arr_list = []
start_date = datetime(date_lo.year, date_lo.month, date_lo.day) - timedelta(days=1)
for day in range((date_hi - start_date).days + 1):
dt = datetime(start_date.year, start_date.month, start_date.day) + timedelta(days=day)
catalog = TDSCatalog(
'%s%s%.2d/%s%.2d%.2d/catalog.xml' % (base_url, dt.year, dt.month, dt.year, dt.month, dt.day))
for hour in [3, 6]:
for cycle in [0, 6, 12, 18]:
attempts = 0
while True:
try:
attempts += 1
name = 'gfsanl_4_%s%.2d%.2d_%.2d00_00%s.grb2' % (dt.year, dt.month, dt.day, cycle, hour)
if name in list(catalog.datasets):
ds_subset = catalog.datasets[name].subset()
query = ds_subset.query().lonlat_box(north=lat_hi + offset, south=lat_lo - offset,
east=lon_hi + offset, west=lon_lo - offset).variables(
*GFS_50_VAR_LIST)
CheckConnection.is_online()
data = ds_subset.get_data(query)
x_arr = xr.open_dataset(NetCDF4DataStore(data))
if 'time1' in list(x_arr.coords):
x_arr = x_arr.rename({'time1': 'time'})
x_arr_list.append(x_arr)
else:
logger.warning('dataset %s is not found' % name)
break
except Exception as e:
logger.error(traceback.format_exc())
CheckConnection.is_online()
logger.error(e)
logger.error('Filename %s - Failed connecting to GFS Server - number of attempts: %d' % (
name, attempts))
if attempts > 15:
raise e
time.sleep(2)
dataset = xr.combine_by_coords(x_arr_list, coords=['time'], combine_attrs='override', compat='override').squeeze()
lon_points = ((lon_points + 180) % 360) + 180
res = dataset.interp(lon=lon_points, lat=lat_points, time=time_points).to_dataframe()[GFS_50_VAR_LIST]
# res[['Wind_speed_gust_surface', 'Dewpoint_temperature_height_above_ground']] = [[np.nan, np.nan]] * len(res)
return res.reset_index(drop=True)
def get_global_phy_daily(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points, lon_points):
logger.debug('obtaining GLOBAL_ANALYSIS_FORECAST_PHY Daily dataset for DATE [%s, %s] LAT [%s, %s] LON [%s, %s]' % (
str(date_lo), str(date_hi), str(lat_lo), str(lat_hi), str(lon_lo), str(lon_hi)))
# offset according to the dataset resolution
offset = 0.1
if date_lo >= datetime(2019, 1, 2):
CheckConnection.set_url('nrt.cmems-du.eu')
base_url = 'https://nrt.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS'
product = 'global-analysis-forecast-phy-001-024'
VM_FOLDER = '/eodata/CMEMS/NRT/GLO/PHY/GLOBAL_ANALYSIS_FORECAST_PHY_001_024'
NRT_FLAG = True
elif date_lo >= datetime(1993, 1, 2):
CheckConnection.set_url('my.cmems-du.eu')
base_url = 'https://my.cmems-du.eu/motu-web/Motu?action=productdownload'
service = 'GLOBAL_REANALYSIS_PHY_001_030-TDS'
product = 'global-reanalysis-phy-001-030-daily'
VM_FOLDER = '/eodata/CMEMS/REP/GLO/PHY/GLOBAL_REANALYSIS_PHY_001_030'
NRT_FLAG = False
# time range
t_lo = datetime(date_lo.year, date_lo.month, date_lo.day, 12) - timedelta(days=1)
t_hi = datetime(date_hi.year, date_hi.month, date_hi.day, 12) + timedelta(days=1)
# coordinates bbox
y_lo = float(lat_lo) - offset
y_hi = float(lat_hi) + offset
x_lo = float(lon_lo) - offset
x_hi = float(lon_hi) + offset
# depth
z_hi = 0.50
z_lo = 0.49
if Path(VM_FOLDER).exists():
logger.debug('Accessing local data %s' % VM_FOLDER)
datasets_paths = []
for day in range((t_hi - t_lo).days + 1):
dt = t_lo + timedelta(day)
path = Path(VM_FOLDER, '%s' % dt.year, '%.2d' % dt.month, '%.2d' % dt.day,
'mercatorpsy4v3r1_gl12_mean_%s%.2d%.2d_*.nc' % (dt.year, dt.month, dt.day) if NRT_FLAG
else 'mercatorglorys12v1_gl12_mean_%s%.2d%.2d_*.nc' % (dt.year, dt.month, dt.day))
dataset = list(glob(str(path)))
if len(dataset) > 0:
datasets_paths.append(dataset[0])
ds_nc = xr.open_mfdataset(datasets_paths)
if ds_nc.coords['latitude'].values[0] == ds_nc.coords['latitude'].max():
tmp = y_lo
y_lo = y_hi
y_hi = tmp
if ds_nc.coords['longitude'].values[0] == ds_nc.coords['longitude'].max():
tmp = x_lo
x_lo = x_hi
x_hi = tmp
dataset = ds_nc.sel(longitude=slice(x_lo, x_hi), latitude=slice(y_lo, y_hi),
time=slice(t_lo, t_hi), depth=slice(z_lo, z_hi)).compute()
else:
url = base_url + '&service=' + service + '&product=' + product + \
'&x_lo={0}&x_hi={1}&y_lo={2}&y_hi={3}&t_lo={4}&t_hi={5}&z_lo={6}&z_hi={7}&mode=console'.format(x_lo, x_hi,
y_lo,
y_hi,
utils.date_to_str(
t_lo)
,
utils.date_to_str(
t_hi),
z_lo, z_hi)
dataset = try_get_data(url)
return dataset.interp(longitude=lon_points, latitude=lat_points, time=time_points).to_dataframe()[
DAILY_PHY_VAR_LIST].reset_index(drop=True)
def append_to_csv(in_path: Path, out_path: Path) -> None:
logger.debug('append_environment_data in file %s' % in_path)
header = True
try:
for df_chunk in pd.read_csv(in_path, parse_dates=['BaseDateTime'], date_parser=utils.str_to_date,
chunksize=utils.CHUNK_SIZE):
if len(df_chunk) > 1:
# remove index column
df_chunk.drop(['Unnamed: 0'], axis=1, errors='ignore', inplace=True)
# retrieve the data for each file once
lat_hi = df_chunk.LAT.max()
lon_hi = df_chunk.LON.max()
lat_lo = df_chunk.LAT.min()
lon_lo = df_chunk.LON.min()
date_lo = df_chunk.BaseDateTime.min()
date_hi = df_chunk.BaseDateTime.max()
# query parameters
time_points = xr.DataArray(list(df_chunk['BaseDateTime'].values))
lat_points = xr.DataArray(list(df_chunk['LAT'].values))
lon_points = xr.DataArray(list(df_chunk['LON'].values))
df_chunk.reset_index(drop=True, inplace=True)
df_chunk = pd.concat([df_chunk, get_GFS(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points,
lat_points, lon_points)], axis=1)
df_chunk = pd.concat(
[df_chunk, get_global_phy_daily(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points,
lat_points, lon_points)], axis=1)
df_chunk = pd.concat(
[df_chunk,
get_global_wind(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points,
lon_points)], axis=1)
df_chunk = pd.concat(
[df_chunk,
get_global_wave(date_lo, date_hi, lat_lo, lat_hi, lon_lo, lon_hi, time_points, lat_points,
lon_points)], axis=1)
df_chunk.to_csv(out_path, mode='a', header=header, index=False)
header = False
except Exception as e:
# discard the file in case of an error to resume later properly
out_path.unlink(missing_ok=True)
raise FileFailedException(out_path.name, e)
def append_environment_data_to_year(filtered_dir: Path, merged_dir: Path) -> None:
csv_list = check_dir(filtered_dir)
for file in csv_list:
if Path(merged_dir, file).exists() or file in Failed_Files: continue
append_to_csv(Path(filtered_dir, file), Path(merged_dir, file))
def append_environment_data_to_file(file_name, filtered_dir, merged_dir):
append_to_csv(Path(filtered_dir, file_name), Path(merged_dir, file_name))
| null |
weather.py
|
weather.py
|
py
| 22,576 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "xarray.open_mfdataset",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "motu_utils.utils_cas.authenticate_CAS_for_URL",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "config.config",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "motu_utils.utils_http.open_url",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "xarray.open_dataset",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "xarray.open_mfdataset",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "siphon.http_util.session_manager.set_session_options",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "siphon.http_util.session_manager",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "siphon.http_util",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "config.config",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "siphon.catalog.TDSCatalog",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "xarray.open_dataset",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "xarray.backends.NetCDF4DataStore",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "siphon.catalog.TDSCatalog",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "xarray.open_dataset",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "xarray.backends.NetCDF4DataStore",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "xarray.combine_by_coords",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "xarray.DataArray",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "siphon.catalog.TDSCatalog",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "xarray.open_dataset",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "xarray.backends.NetCDF4DataStore",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.is_online",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "xarray.combine_by_coords",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection.set_url",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "check_connection.CheckConnection",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "xarray.open_mfdataset",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "utils.date_to_str",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "utils.str_to_date",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "utils.CHUNK_SIZE",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "xarray.DataArray",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "xarray.DataArray",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "xarray.DataArray",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "utils.FileFailedException",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "utils.check_dir",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "utils.Failed_Files",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 449,
"usage_type": "call"
}
] |
520903612
|
import scrapy
class ContributorsSpider(scrapy.Spider):
name = "contributors"
def start_requests(self):
urls = [
'https://challenges.openideo.com/challenge/combatzikafuturethreats/research',
'https://challenges.openideo.com/challenge/future-of-highered/research',
'https://challenges.openideo.com/challenge/financial-longevity/research']
for url in urls:
yield scrapy.Request(url=url, callback=self.traverse)
def traverse(self,response):
page_num = int(response.css("span.js-page-count::text").extract_first())
# page_num = 1
for i in range(1,page_num+1):
url = response.url + "?page=" + str(i)
yield scrapy.Request(url=url, callback=self.parse)
def parse(self,response):
rows = response.css('div.col-keep-distance')
for row in rows:
articles = row.css('article')
for article in articles:
contribution_page_url = article.css('h1.listing-title a::attr(href)').extract_first()
contribution_page = response.urljoin(contribution_page_url)
yield scrapy.Request(url=contribution_page, callback=self.contribution)
def contribution(self, response):
author_url = response.css('div.details h1.secondary-text a::attr(href)').extract_first()
author_page = response.urljoin(author_url)
# yield {
# "contribution":response.css('h1.headline-text::text').extract_first(),
# }
yield scrapy.Request(url=author_page, callback=self.author)
def author(self, response):
yield {
"author": response.css('h1.headline-text::text').extract_first().strip(),
"geolocation": response.css('p.geolocation::text').extract_first().strip(),
# "author": response.css('h1.headline-text::text').extract_first(),
# "geolocation": response.css('p.geolocation::text').extract_first(),
}
| null |
OpenIDEOSpider/spiders/Contributors.py
|
Contributors.py
|
py
| 1,735 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.Spider",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 37,
"usage_type": "call"
}
] |
646772027
|
import unittest
from ..api.model import Model, Field, DatetimeField
from ..api.model.utils import from_fields
import datetime
import gc
class ModelInheritanceCase(unittest.TestCase):
def setUp(self):
class User(Model):
name = Field(str)
self.User = User
def test_simple_model(self):
user = self.User(name="John F. Bar")
self.assertIsInstance(user, self.User)
self.assertEqual(user.name, "John F. Bar")
def test_invalid_attribute(self):
with self.assertRaises(KeyError):
user = self.User(name="L. Dongs", bad_field="Bang!")
def test_missing_attribute(self):
with self.assertRaises(AttributeError):
user = self.User()
class ModelCase(unittest.TestCase):
def test_field_options(self):
"""test different field configs on model"""
class User(Model):
name = Field(str)
age = Field(int)
brothers_name = Field(str, nullable=True)
catchphrase = Field(str, default="To be, or not to run unittests on all your shitty code")
u1 = User(
name="John F. Bar",
age=22,
brothers_name="Also John" #set the nullable
#default the catchphrase
)
u2 = User(
name="John F. Bar",
age=23,
#default the nullable (to null, ofc)
catchphrase="You're a wizzard, Harry!"
)
#failing cases:
with self.assertRaises(AttributeError):
u3 = User(
name="John F. Bar"
#missing age
)
with self.assertRaises(ValueError):
u4 = User(
name="John F. Bar",
age=200,
catchphrase=None #field is not nullable, so ValueError, since None is not a valid field value
)
with self.assertRaises(TypeError):
u5 = User(
name="John F. Bar",
age=200,
catchphrase=10 #invalid field type on a defaulting field
)
with self.assertRaises(TypeError):
u6 = User(
name="John F. Bar",
age=200,
brothers_name=10 #invalid field type on a nullable field
)
#test that we can do a from_fields on the u2
d = from_fields(getattr(User, "__modelfields__"), getattr(u2, "__modeldata"), False)
def test_to_json_dict(self):
class User(Model):
name = Field(str)
u = User.from_json_dict({
"name": "John"
})
j = User.to_json_dict(u)
with self.assertRaises(ValueError):
User.to_json_dict({"foo":"bar"})
def test_attribute_setters(self):
class User(Model):
name = Field(str)
age = Field(int)
motto = Field(str, default="I like Trains")
u1 = User(name="John", age=200) #ok case
with self.assertRaises(KeyError):
u2 = User(name="John", age=200, undefined_field="Derp") #KeyError, undefined_field is not a field on User class
self.assertEqual(u1.name, "John")
self.assertEqual(u1.age, 200)
self.assertEqual(u1.motto, "I like Trains")
# setters
u1.name = "Not John" #should be fine
u1.age = 400 #also fine
u1.motto = "I Don't like Trains"
self.assertEqual(u1.name, "Not John")
self.assertEqual(u1.age, 400)
self.assertEqual(u1.motto, "I Don't like Trains")
with self.assertRaises(AttributeError):
u1.undefined_field = "Some String" #AttributeError, undefined_field is not a valid attribute
with self.assertRaises(AttributeError):
val = u1.undefined_field #AttributeError, undefined_field is not a valid attribute
with self.assertRaises(AttributeError):
del u1.undefined_field #field does not exist, so AttributeError
with self.assertRaises(AttributeError):
del u1.name #field not nullable
with self.assertRaises(TypeError):
u1.name = 1 #TypeError, name must be str
del u1.motto #should be fine, since motto is defaultable
self.assertEqual(u1.motto, "I like Trains") #assert that motto takes on default value when del is called
def test_invalid_json_dict(self):
class User(Model):
name = Field(str)
with self.assertRaises(ValueError):
User.from_json_dict(["i", "am", "not", "a", "dict"])
class ModelDehydratorProxyCase(unittest.TestCase):
def test_proxy(self):
class User(Model):
created = DatetimeField("%c", nullable=True)
#jump through hoops to get current timestamp, but decoded from an encoded timestamp
#to ensure that encoding to that format ("%c") again should return same result.
now = datetime.datetime.strptime(datetime.datetime.now().strftime("%c"), "%c")
u = User.from_json_dict({
"created": now.strftime("%c")
})
self.assertEqual( u.raw.created, now.strftime("%c") )
#set to none
u.raw.created = None
# validate that field is empty on model
self.assertIsNone(u.created)
# set back to time again
u.raw.created = now.strftime("%c")
# validate that time is set again
self.assertIsInstance(u.created, datetime.datetime)
# and check that it matches the original time
self.assertEqual(u.created, now)
def test_proxy_missing_key(self):
default_nick = "tbug Mc.Swag"
build_nick = "FooBar"
class User(Model):
name = Field(str)
nick = Field(str, default=default_nick)
u = User.from_json_dict({
"name": "John",
"nick": build_nick
})
with self.assertRaises(KeyError):
u.raw["does_not_exist"]
with self.assertRaises(AttributeError):
u.raw.does_not_exist
with self.assertRaises(KeyError):
u.raw["does_not_exist"] = "Other Name"
with self.assertRaises(AttributeError):
u.raw.does_not_exist = "Other Name"
with self.assertRaises(AttributeError):
del u.raw.name
with self.assertRaises(KeyError):
del u.raw["name"]
self.assertEqual(u.raw.nick, build_nick)
del u.raw.nick #should default
self.assertEqual(u.raw.nick, default_nick)
#set it back to build nick and try to delete as key
u.raw.nick = build_nick
self.assertEqual(u.raw.nick, build_nick)
del u.raw["nick"]
self.assertEqual(u.raw.nick, default_nick)
self.assertEqual(len(u), len(u.raw), 2)
| null |
eta/tests/test_model.py
|
test_model.py
|
py
| 6,779 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "api.model.Model",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "api.model.Model",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "api.model.utils.from_fields",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "api.model.Model",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "api.model.Model",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "api.model.Model",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "api.model.Model",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "api.model.DatetimeField",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "api.model.Model",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "api.model.Field",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "api.model.Field",
"line_number": 179,
"usage_type": "call"
}
] |
30407902
|
#!/usr/bin/env python3.6
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from orm_setup import Base, Caso
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
from flask import session as login_session
import random
import string
from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
from flask import redirect
from flask import flash
from flask import jsonify
# flask app variable
app = Flask(__name__)
# Create session and connect with postgres Database
engine = create_engine("postgres://vagrant:laCumbre1@/law")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Obtain credentials from JSON file
CLIENT_ID = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_id']
CLIENT_SECRET = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_secret']
redirect_uris = json.loads(open('client_secrets.json', 'r')
.read())['web']['redirect_uris']
app.secret_key = CLIENT_SECRET
APPLICATION_NAME = "law-app-udacity"
# Create anti-forgery state token
@app.route('/login')
def showLogin():
"""
Function for log in endpoint.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
login_session['state'] = state
return render_template('/login.html', STATE=state,
CLIENT_ID=CLIENT_ID, login_status=login_status)
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""
Function for connecting with the google account.
"""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already'
'connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px; '
output += ' -webkit-border-radius: 150px;-moz-border-radius:150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print("done!")
return output
@app.route('/gdisconnect')
def gdisconnect():
"""
Function for log out.
"""
access_token = login_session.get('access_token')
if access_token is None:
print('Access Token is None')
response = make_response(json.dumps('Current user not connected.'),
401)
response.headers['Content-Type'] = 'application/json'
return response
print('In gdisconnect access token is %s', access_token)
print('User name is: ')
print(login_session['username']).encode('utf-8').strip()
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s'\
% login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print('result is ')
print(result)
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return redirect('/index')
else:
response = make_response(json.dumps('Failed to revoke token for\
given user.'), 400)
response.headers['Content-Type'] = 'application/json'
return response
# Endpoints
@app.route('/')
@app.route('/index')
def index():
"""
Index endpoint. Calculate the percentages of cases and
send variables to the template.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
total = session.query(Caso).count()
casos_penal = session.query(Caso).filter_by(categoria="penal")
penal = casos_penal.count()
penal = round(penal * 100 / total, 1)
casos_cont_admin = session.query(Caso).filter_by(
categoria="cont_administrativo")
cont_admin = casos_cont_admin.count()
cont_admin = round(cont_admin * 100 / total, 1)
casos_der_civil = session.query(Caso).filter_by(categoria="derecho_civil")
der_civil = casos_der_civil.count()
der_civil = round(der_civil * 100 / total, 1)
casos_familia = session.query(Caso).filter_by(categoria="familia")
familia = casos_familia.count()
familia = round(familia * 100 / total, 1)
casos_laboral = session.query(Caso).filter_by(categoria="laboral")
laboral = casos_laboral.count()
laboral = round(laboral * 100 / total, 1)
return render_template('index.html', penal=penal, laboral=laboral,
familia=familia, der_civil=der_civil,
cont_admin=cont_admin, login_session=login_session,
login_status=login_status)
# Functions for the categories.
@app.route('/penal')
def Penal():
"""
Function for the category "penal". Send the cases of this
category.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
casos = session.query(Caso).filter_by(categoria="penal")
total = session.query(Caso).count()
penal = casos.count()
penal = round(penal * 100 / total, 1)
return render_template('/categorias/penal.html', casos=casos, total=total,
penal=penal, login_session=login_session,
login_status=login_status)
@app.route('/cont-administrativo')
def ContAdministrativo():
"""
Function for the category "cont-administrativo". Send the cases of this
category and the percentage.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
casos = session.query(Caso).filter_by(categoria="cont_administrativo")
total = session.query(Caso).count()
cont_admin = casos.count()
cont_admin = round(cont_admin * 100 / total, 1)
return render_template('/categorias/cont-administrativo.html',
casos=casos, total=total, cont_admin=cont_admin,
login_session=login_session,
login_status=login_status)
@app.route('/der-civil')
def DerechoCivil():
"""
Function for the category "Derecho Civil". Send the cases of this
category and the percentage.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
casos = session.query(Caso).filter_by(categoria="derecho_civil")
total = session.query(Caso).count()
der_civil = casos.count()
der_civil = round(der_civil * 100 / total, 1)
return render_template('/categorias/der-civil.html', casos=casos,
total=total, der_civil=der_civil,
login_session=login_session,
login_status=login_status)
@app.route('/familia')
def Familia():
"""
Function for the category "Familia". Send the cases of this
category and the percentage.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
casos = session.query(Caso).filter_by(categoria="familia")
total = session.query(Caso).count()
familia = casos.count()
familia = round(familia * 100 / total, 1)
return render_template('/categorias/familia.html', casos=casos,
total=total,
familia=familia, login_session=login_session,
login_status=login_status)
@app.route('/laboral')
def Laboral():
"""
Function for the category "Laboral". Send the cases of this
category and the percentage.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
casos = session.query(Caso).filter_by(categoria="laboral")
total = session.query(Caso).count()
laboral = casos.count()
laboral = round(laboral * 100 / total, 1)
return render_template('/categorias/laboral.html', casos=casos,
total=total, laboral=laboral,
login_session=login_session,
login_status=login_status)
# JSON endpoints for the categories.
@app.route('/laboral/JSON')
def LaboralJSON():
"""
Endpoint JSON for the category "Laboral". Send the cases of this
category in JSON format.
"""
casos = session.query(Caso).filter_by(categoria="laboral")
return jsonify(Caso=[i.serialize for i in casos])
@app.route('/penal/JSON')
def penalJSON():
"""
Endpoint JSON for the category "Penal". Send the cases of this
category in JSON format.
"""
casos = session.query(Caso).filter_by(categoria="penal")
return jsonify(Caso=[i.serialize for i in casos])
@app.route('/cont-administrativo/JSON')
def ContAdministrativoJSON():
"""
Endpoint JSON for the category "Cont. Administrativo". Send
the cases of this category in JSON format.
"""
casos = session.query(Caso).filter_by(categoria="cont_administrativo")
return jsonify(Caso=[i.serialize for i in casos])
@app.route('/der-civil/JSON')
def DerechoCivilJSON():
"""
Endpoint JSON for the category "Derecho Civil". Send
the cases of this category in JSON format.
"""
casos = session.query(Caso).filter_by(categoria="derecho_civil")
return jsonify(Caso=[i.serialize for i in casos])
@app.route('/familia/JSON')
def FamiliaJSON():
"""
Endpoint JSON for the category "Familia". Send
the cases of this category in JSON format.
"""
casos = session.query(Caso).filter_by(categoria="familia")
return jsonify(Caso=[i.serialize for i in casos])
# CRUD Functions
@app.route('/caso-<int:caso_id>')
def VerCaso(caso_id):
"""
Function for displaying an individual case. Checking the login
first for allowing the "edit" and "delete" buttons.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
caso = session.query(Caso).filter_by(id=caso_id)
return render_template('/caso/caso-ver.html', caso=caso,
login_session=login_session,
login_status=login_status)
@app.route('/caso-<int:caso_id>/JSON')
def VerCasoJSON(caso_id):
"""
Endpoint JSON for an individual case. Send the response
in JSON format.
"""
caso = session.query(Caso).filter_by(id=caso_id)
return jsonify(Caso=[i.serialize for i in caso])
@app.route('/nuevo-caso', methods=['GET', 'POST'])
def NuevoCaso():
"""
Function for making a new case. Checking first if the user
is logged in, then requesting the inputs.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
new_case = Caso(nombre=request.form['input-nombre'],
categoria=request.form['input-categoria'],
creado=request.form['input-creado'],
juzgado=request.form['input-juzgado'],
actor=request.form['input-actor'],
demandado=request.form['input-demandado'],
precio=request.form['input-precio'],
descripcion=request.form['input-descripcion'],
status='Active')
session.add(new_case)
session.commit()
flash("Case created successfully.")
return redirect(url_for('index'))
else:
return render_template('/caso/caso-nuevo.html',
login_session=login_session,
login_status=login_status)
@app.route('/caso-<int:caso_id>/edit', methods=['GET', 'POST'])
def EditarCaso(caso_id):
"""
Function for editing the case. Checking first if the user
is logged in, then requesting the inputs.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
if 'username' not in login_session:
return redirect('/login')
caso_editado = session.query(Caso).filter_by(id=caso_id).one()
if request.method == 'POST':
caso_editado.nombre = request.form['input-nombre']
caso_editado.creado = request.form['input-creado']
caso_editado.juzgado = request.form['input-juzgado']
caso_editado.categoria = request.form['input-categoria']
caso_editado.actor = request.form['input-actor']
caso_editado.demandado = request.form['input-demandado']
caso_editado.precio = request.form['input-precio']
caso_editado.descripcion = request.form['input-descripcion']
caso_editado.status = request.form['input-status']
session.add(caso_editado)
session.commit()
flash("Case updated successfully.")
return redirect(url_for('VerCaso', caso_id=caso_id))
else:
return render_template('/caso/caso-editar.html', caso=caso_editado,
login_session=login_session,
login_status=login_status)
@app.route('/caso-<int:caso_id>/delete', methods=['GET', 'POST'])
def BorrarCaso(caso_id):
"""
Function for deleting the case. Checking first if the user
is logged in, then commiting the delete.
"""
# Detect login status
login_status = None
if 'email' in login_session:
login_status = True
if 'username' not in login_session:
return redirect('/login')
caso_borrar = session.query(Caso).filter_by(id=caso_id).one()
if request.method == 'POST':
session.delete(caso_borrar)
session.commit()
flash("Case deleted successfully.")
return redirect(url_for('index'))
else:
return render_template('/caso/caso-borrar.html', caso=caso_borrar,
login_session=login_session,
login_status=login_status)
# execute the program
if __name__ == '__main__':
"""
Function for running the app on any ip and port 8000.
"""
app.secret_key = 'SUPER_SECRET_KEY'
app.debug = True
app.run(host='0.0.0.0', port=8000)
| null |
run.py
|
run.py
|
py
| 17,651 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "orm_setup.Base.metadata",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "orm_setup.Base",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "oauth2client.client.flow_from_clientsecrets",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "oauth2client.client.FlowExchangeError",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "httplib2.Http",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "httplib2.Http",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 203,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 205,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 209,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 214,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 218,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 222,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 244,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 246,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 266,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 268,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 289,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 291,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 312,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 314,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 335,
"usage_type": "argument"
},
{
"api_name": "orm_setup.Caso",
"line_number": 337,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 354,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 364,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 374,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 384,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 394,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 410,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 422,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "orm_setup.Caso",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 442,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 443,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 445,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 470,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 473,
"usage_type": "argument"
},
{
"api_name": "flask.request.method",
"line_number": 475,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 478,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 479,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 479,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 482,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 483,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "orm_setup.Caso",
"line_number": 509,
"usage_type": "argument"
},
{
"api_name": "flask.request.method",
"line_number": 511,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 511,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 518,
"usage_type": "name"
}
] |
209374747
|
import pandas as pd
from sklearn.datasets import make_classification
def get_test_data(n_features=40, n_informative=10, n_redundant=10, n_samples=10000):
X, cont = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant,
random_state=0, shuffle=False)
time_idx = pd.DatetimeIndex(periods=n_samples, freq=pd.tseries.offsets.BDay(),
end=pd.datetime.today())
X = pd.DataFrame(X, index=time_idx)
cont = pd.Series(cont, index=time_idx).to_frame('bin')
# Create name of columns
columns = ['I_' + str(i) for i in range(n_informative)]
columns += ['R_' + str(i) for i in range(n_redundant)]
columns += ['N_' + str(i) for i in range(n_features - len(columns))]
X.columns = columns
cont['w'] = 1. / cont.shape[0]
cont['t1'] = pd.Series(cont.index, index=cont.index)
return X, cont
| null |
datasets.py
|
datasets.py
|
py
| 982 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.tseries.offsets.BDay",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.tseries",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pandas.datetime.today",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 19,
"usage_type": "call"
}
] |
565630867
|
import matlab.engine
import os
import logging
from tqdm import tqdm
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision.utils import save_image
import torch
import utils
import scipy.io as io
import numpy as np
from sklearn.decomposition import PCA
import random
import logger
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
randconst = torch.rand(1).type(Tensor) * 2 - 1
def make_figure_dir(folder):
os.makedirs(folder + '/figures/scatter', exist_ok=True)
os.makedirs(folder + '/figures/histogram', exist_ok=True)
os.makedirs(folder + '/figures/deviceSamples', exist_ok=True)
# os.makedirs(folder + '/figures/scatter_and_histogram', exist_ok=True)
def PCA_model(data_path):
pca = PCA(n_components=2, svd_solver='randomized')
dataset = io.loadmat(data_path, struct_as_record=False, squeeze_me=True)
data = dataset['new']
pca.fit(data)
return pca
def PCA_analysis(generator, pca, eng, params, numImgs=100):
generator.eval()
imgs = sample_images(generator, numImgs, params)
generator.train()
Efficiency = torch.zeros(numImgs)
img = torch.squeeze(imgs[:, 0, :]).data.cpu().numpy()
img = matlab.double(img.tolist())
wavelength = matlab.double([params.w] * numImgs)
desired_angle = matlab.double([params.a] * numImgs)
abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
# img = img[np.where(Efficiency.reshape(-1) > 0), :]
# Efficiency = Efficiency[Efficiency > 0]
img_2 = pca.transform(img)
fig_path = params.output_dir + \
'/figures/scatter/Iter{}.png'.format(params.iter)
utils.plot_scatter(img_2, Efficiency, params.iter, fig_path)
fig_path = params.output_dir + \
'/figures/histogram/Iter{}.png'.format(params.iter)
utils.plot_histogram(Efficiency, params.iter, fig_path)
imgs = imgs[:8, :, :].unsqueeze(2).repeat(1, 1, 64, 1)
fig_path = params.output_dir + \
'/figures/deviceSamples/Iter{}.png'.format(params.iter)
save_image(imgs, fig_path, 2)
'''
grads = eng.GradientFromSolver_1D_parallel(img, wavelength, desired_angle)
grad_2 = pca.transform(grads)
if params.iter % 2 == 0:
utils.plot_envolution(params.img_2_prev, params.eff_prev, params.grad_2_prev,
img_2, Efficiency, params.iter, params.output_dir)
else:
utils.plot_arrow(img_2, Efficiency, grad_2, params.iter, params.output_dir)
params.img_2_prev = img_2
params.eff_prev = Efficiency
params.grad_2_prev = grad_2
'''
return img_2, Efficiency
def sample_images(generator, batch_size, params):
if params.noise_constant == 1:
noise = (torch.ones(batch_size, params.noise_dims).type(
Tensor) * randconst) * params.noise_amplitude
else:
if params.noise_distribution == 'uniform':
noise = (torch.rand(batch_size, params.noise_dims).type(
Tensor) * 2. - 1.) * params.noise_amplitude
else:
noise = (torch.randn(batch_size, params.noise_dims).type(
Tensor)) * params.noise_amplitude
lamda = torch.ones(batch_size, 1).type(Tensor) * params.w
theta = torch.ones(batch_size, 1).type(Tensor) * params.a
z = torch.cat((lamda, theta, noise), 1)
if params.cuda:
z.cuda()
images = generator(z, params.binary_amp)
images = torch.sign(images)
strucs = images.cpu().detach().numpy()
img = torch.squeeze(images[:, 0, :]).data.cpu().numpy()
img = matlab.double(img.tolist())
return img, strucs
def generate_test_images(generator, batch_size, params, wavelength, angle):
if params.noise_constant == 1:
noise = (torch.ones(batch_size, params.noise_dims).type(
Tensor) * randconst) * params.noise_amplitude
else:
if params.noise_distribution == 'uniform':
noise = (torch.rand(batch_size, params.noise_dims).type(
Tensor) * 2. - 1.) * params.noise_amplitude
else:
noise = (torch.randn(batch_size, params.noise_dims).type(
Tensor)) * params.noise_amplitude
lamda = torch.ones(batch_size, 1).type(Tensor) * wavelength
theta = torch.ones(batch_size, 1).type(Tensor) * angle
z = torch.cat((lamda, theta, noise), 1)
if params.cuda:
z.cuda()
images = generator(z, params.binary_amp)
images = torch.sign(images)
strucs = images.cpu().detach().numpy()
img = torch.squeeze(images[:, 0, :]).data.cpu().numpy()
img = matlab.double(img.tolist())
return img, strucs
def evaluate(generator, eng, numImgs, params):
generator.eval()
filename = 'ccGAN_imgs_Si_w' + \
str(params.w) + '_' + str(params.a) + 'deg.mat'
img, strucs = sample_images(generator, numImgs, params)
file_path = os.path.join(params.output_dir, 'outputs', filename)
logging.info('Generation is done. \n')
Efficiency = torch.zeros(numImgs)
wavelength = matlab.double([params.w] * numImgs)
desired_angle = matlab.double([params.a] * numImgs)
abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
max_eff_index = np.argmax(Efficiency)
max_eff = Efficiency[max_eff_index]
best_struc = strucs[max_eff_index, :, :].reshape(-1)
fig_path = params.output_dir + '/figures/Efficiency.png'
utils.plot_histogram(Efficiency, params.numIter, fig_path)
print('{} {} {} {} {} {} {:.2f}'.format('The best efficiency for',
'wavelength =', params.w, 'and angle =', params.a, 'is', max_eff))
io.savemat(file_path, mdict={
'strucs': strucs, 'effs': Efficiency, 'best_struc': best_struc,
'max_eff_index': max_eff_index, 'max_eff': max_eff})
def test(generator, eng, numImgs, params):
generator.eval()
filename = 'ccGAN_imgs_Si_w' + \
str(params.w) + '_' + str(params.a) + 'deg_test.mat'
img, strucs = sample_images(generator, numImgs, params)
file_path = os.path.join(params.output_dir, 'outputs', filename)
logging.info('Test starts. \n')
Efficiency = torch.zeros(numImgs)
wavelength = matlab.double([params.w] * numImgs)
desired_angle = matlab.double([params.a] * numImgs)
abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
max_eff_index = np.argmax(Efficiency)
max_eff = Efficiency[max_eff_index]
best_struc = strucs[max_eff_index, :, :].reshape(-1)
print('{} {} {} {} {} {} {:.2f}'.format('The best efficiency for',
'wavelength =', params.w, 'and angle =', params.a, 'is', max_eff))
io.savemat(file_path, mdict={
'strucs': strucs, 'effs': Efficiency, 'best_struc': best_struc,
'max_eff_index': max_eff_index, 'max_eff': max_eff})
def test_group(generator, eng, numImgs, params, test_num):
generator.eval()
logging.info('Test group starts. \n')
Efficiency = torch.zeros(numImgs)
if params.heatmap:
lamda_list = list(range(params.hwstart, params.hwend + params.hwstride, params.hwstride))
theta_list = list(range(params.hastart, params.haend + params.hastride, params.hastride))
H = len(lamda_list)
W = len(theta_list)
heat_scores = np.zeros((H, W))
with tqdm(total=H * W, ncols=70) as t:
for lamda, i in zip(lamda_list[::-1], range(H)):
for theta, j in zip(theta_list, range(W)):
img, _ = generate_test_images(generator, numImgs, params, lamda, theta)
wavelength = matlab.double([lamda] * numImgs)
desired_angle = matlab.double([theta] * numImgs)
abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
heat_scores[i, j] = np.max(Efficiency)
t.update()
fig_path = params.output_dir + '/figures/heatmap_batch{}.png'.format(params.solver_batch_size_start)
utils.plot_heatmap(lamda_list, theta_list, heat_scores, fig_path)
print("Plot heatmap successfully!")
else:
max_eff_index = []
max_eff = []
best_struc = []
with tqdm(total=test_num, ncols=70) as t:
for i in range(test_num):
lamda = random.uniform(600, 1200)
theta = random.uniform(40, 80)
img, strucs = generate_test_images(generator, numImgs, params, lamda, theta)
wavelength = matlab.double([lamda] * numImgs)
desired_angle = matlab.double([theta] * numImgs)
abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
max_now = np.argmax(Efficiency)
max_eff_index.append(max_now)
max_eff.append(Efficiency[max_now])
best_struc.append(strucs[max_now, :, :].reshape(-1))
t.update()
print('{} {:.2f} {} {:.2f} {} {:.2f} {} {:.2f} '.format('Lowest:', min(max_eff), 'Highest:', max(
max_eff), 'Average:', np.mean(np.array(max_eff)), 'Var:', np.var(np.array(max_eff))))
def train(models, optimizers, schedulers, eng, params):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
generator = models
optimizer_G = optimizers
scheduler_G = schedulers
generator.train()
pca = PCA_model("PCA.mat")
make_figure_dir(params.output_dir)
# lamda_list = [600, 700, 800, 900, 1000, 1100, 1200]
# theta_list = [40, 50, 60, 70, 80]
if params.restore_from is None:
Eff_mean_history = []
Binarization_history = []
pattern_variance = []
iter0 = 0
imgs_2 = []
Effs_2 = []
else:
Eff_mean_history = params.checkpoint['Eff_mean_history']
iter0 = params.checkpoint['iter']
Binarization_history = params.checkpoint['Binarization_history']
pattern_variance = params.checkpoint['pattern_variance']
imgs_2 = params.checkpoint['imgs_2']
Effs_2 = params.checkpoint['Effs_2']
if params.tensorboard:
loss_logger = logger.set_logger(params.output_dir)
with tqdm(total=params.numIter, leave=False, ncols=70) as t:
for i in range(params.numIter):
it = i + 1
normIter = it / params.numIter
params.iter = it + iter0
scheduler_G.step()
# binarization amplitude in the tanh function
if params.iter < 1000:
params.binary_amp = int(params.iter / 100) + 1
# use solver and phyiscal gradient to update the Generator
params.solver_batch_size = int(params.solver_batch_size_start + (params.solver_batch_size_end -
params.solver_batch_size_start) * (1 - (1 - normIter)**params.solver_batch_size_power))
if params.noise_constant == 1:
noise = (torch.ones(params.solver_batch_size, params.noise_dims).type(
Tensor) * randconst) * params.noise_amplitude
else:
if params.noise_distribution == 'uniform':
noise = ((torch.rand(params.solver_batch_size, params.noise_dims).type(
Tensor) * 2. - 1.) * params.noise_amplitude)
else:
noise = (torch.randn(params.solver_batch_size, params.noise_dims).type(
Tensor)) * params.noise_amplitude
"""
batch equivalent
"""
# lamdaconst = torch.rand(1).type(Tensor) * 600 + 600
# thetaconst = torch.rand(1).type(Tensor) * 40 + 40
# lamda = torch.ones(params.solver_batch_size,
# 1).type(Tensor) * lamdaconst
# theta = torch.ones(params.solver_batch_size,
# 1).type(Tensor) * thetaconst
"""
batch randomized
"""
lamda = torch.rand(params.solver_batch_size, 1).type(Tensor) * 600 + 600
theta = torch.rand(params.solver_batch_size, 1).type(Tensor) * 40 + 40
z = torch.cat((lamda, theta, noise), 1)
z = z.to(device)
generator.to(device)
gen_imgs = generator(z, params.binary_amp)
img = torch.squeeze(gen_imgs[:, 0, :]).data.cpu().numpy()
img = matlab.double(img.tolist())
wavelength = matlab.double(lamda.cpu().numpy().tolist())
desired_angle = matlab.double(theta.cpu().numpy().tolist())
Grads_and_Effs = eng.GradientFromSolver_1D_parallel(
img, wavelength, desired_angle)
Grads_and_Effs = Tensor(Grads_and_Effs)
grads = Grads_and_Effs[:, 1:]
Efficiency_real = Grads_and_Effs[:, 0]
Eff_max = torch.max(Efficiency_real.view(-1))
Eff_reshape = Efficiency_real.view(-1, 1).unsqueeze(2)
Gradients = Tensor(grads).unsqueeze(
1) * gen_imgs * (1. / params.sigma * torch.exp((Eff_reshape - Eff_max) / params.sigma))
# Train generator
optimizer_G.zero_grad()
binary_penalty = params.binary_penalty_start if params.iter < params.binary_step_iter else params.binary_penalty_end
if params.binary == 1:
g_loss_solver = - torch.mean(torch.mean(Gradients, dim=0).view(-1)) - torch.mean(
torch.abs(gen_imgs.view(-1)) * (2.0 - torch.abs(gen_imgs.view(-1)))) * binary_penalty
else:
g_loss_solver = - torch.mean(torch.mean(Gradients, dim=0).view(-1))
g_loss_solver.backward()
optimizer_G.step()
if params.tensorboard:
loss_logger.scalar_summary('loss', g_loss_solver.cpu().detach().numpy(), it)
if it == 1 or it % params.save_iter == 0:
# visualization
generator.eval()
outputs_imgs = sample_images(generator, 100, params)
Binarization = torch.mean(torch.abs(outputs_imgs.view(-1)))
Binarization_history.append(Binarization)
diversity = torch.mean(torch.std(outputs_imgs, dim=0))
pattern_variance.append(diversity.data)
numImgs = 1 if params.noise_constant == 1 else 100
img_2_tmp, Eff_2_tmp = PCA_analysis(
generator, pca, eng, params, numImgs)
imgs_2.append(img_2_tmp)
Effs_2.append(Eff_2_tmp)
Eff_mean_history.append(np.mean(Eff_2_tmp))
utils.plot_loss_history(
([], [], Eff_mean_history, pattern_variance, Binarization_history), params.output_dir)
generator.train()
# save model
model_dir = os.path.join(
params.output_dir, 'model', 'iter{}'.format(it + iter0))
os.makedirs(model_dir, exist_ok=True)
utils.save_checkpoint({'iter': it + iter0,
'gen_state_dict': generator.state_dict(),
'optim_G_state_dict': optimizer_G.state_dict(),
'scheduler_G_state_dict': scheduler_G.state_dict(),
'Eff_mean_history': Eff_mean_history,
'Binarization_history': Binarization_history,
'pattern_variance': pattern_variance,
'Effs_2': Effs_2,
'imgs_2': imgs_2
},
checkpoint=model_dir)
if it == params.numIter:
model_dir = os.path.join(params.output_dir, 'model')
utils.save_checkpoint({'iter': it + iter0,
'gen_state_dict': generator.state_dict(),
'optim_G_state_dict': optimizer_G.state_dict(),
'scheduler_G_state_dict': scheduler_G.state_dict(),
'Eff_mean_history': Eff_mean_history,
'Binarization_history': Binarization_history,
'pattern_variance': pattern_variance,
'Effs_2': Effs_2,
'imgs_2': imgs_2
},
checkpoint=model_dir)
io.savemat(params.output_dir + '/scatter.mat',
mdict={'imgs_2': np.asarray(imgs_2), 'Effs_2': np.asarray(Effs_2)})
return
t.update()
| null |
train_and_evaluate.py
|
train_and_evaluate.py
|
py
| 17,317 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.cuda.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.rand",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "utils.plot_scatter",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "utils.plot_histogram",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.sign",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.ones",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.sign",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "utils.plot_histogram",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "scipy.io.savemat",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "scipy.io.savemat",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "utils.plot_heatmap",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.var",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "logger.set_logger",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "matlab.engine.double",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "matlab.engine.double",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "matlab.engine",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "torch.max",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "torch.std",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "utils.plot_loss_history",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "utils.save_checkpoint",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "utils.save_checkpoint",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "scipy.io.savemat",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 410,
"usage_type": "call"
}
] |
178625929
|
"""
Team Sleep Deprived: Victoria Gao, Renee Mui, Anya Zorin
SoftDev
K13 -- Template for Success
2020-10-19
"""
from flask import Flask, render_template
import csv, random
app = Flask(__name__)
@app.route("/")
def hello():
return "No hablo queso!"
@app.route("/occupyflaskst")
def get_job():
with open("data/occupations.csv", "r", newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
header = next(csv_file) # get rid of the header
dictionary = {} # initialize the dictionary
for row in reader:
dictionary[row[0]] = [float(row[1]),row[2]] # populate the dictionary
del dictionary['Total'] #delete the last row
percentages = [percent[0] for percent in dictionary.values()]
random_job = random.choices(list(dictionary.keys()),weights = percentages, k=1)
return render_template('tablified.html',
title = "K13 -- Template for Success",
jobs = list(dictionary.keys()),
percents = percentages,
urls = [url[1] for url in dictionary.values()],
r_job = random_job[0],
length = len(list(dictionary.keys()))
)
if __name__ == "__main__":
app.debug = True
app.run()
| null |
13_tempwork/app.py
|
app.py
|
py
| 1,395 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.choices",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 27,
"usage_type": "call"
}
] |
20983452
|
# -*- coding: utf8 -*-
import os
import unittest
from appium import webdriver
from time import sleep
directory = '%s/' % os.getcwd()
class IosDeviceTests(unittest.TestCase):
"ios device测试用例"
def setUp(self):
"Setup for the test"
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '9.2'
desired_caps['deviceName'] = 'iPhone 6s'
desired_caps['udid'] = '71f6ab27b47d210d0268c7e179504b47b3a4d498'
desired_caps['app'] = os.path.abspath('./apps/TestApp.app')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.driver.implicitly_wait(10)
def tearDown(self):
"Tear down the test"
self.driver.quit()
def test_x(self):
pass
#---START OF SCRIPT
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(IosDeviceTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| null |
tests/appium_test_ios_device.py
|
appium_test_ios_device.py
|
py
| 999 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "appium.webdriver.Remote",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "unittest.TestLoader",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "unittest.TextTestRunner",
"line_number": 36,
"usage_type": "call"
}
] |
599078195
|
import numpy as np
import os
import matplotlib
matplotlib.use('agg')
import matplotlib.dates as mpd
from watchtower import commits_
import matplotlib.pyplot as plt
from tqdm import tqdm
import calendar
import pandas as pd
import traceback
def count_doc_commits(user, project, search_queries=None,
groupby='month', start='2017-01-01', stop=None):
"""
Parameters
----------
user : string
github username
project : string
project name
search_queries : list of strings
Strings to search within commits
groupby : ['month', 'weekday']
unit of time to group commits.
since : time string
Only use commits after this time
"""
# Load commit data and return the date of each commit
if search_queries is None:
search_queries = ['DOC', 'docs', 'docstring', 'documentation', 'docathon']
start = pd.to_datetime(start)
if stop is None:
stop = pd.datetime.today()
else:
stop = pd.to_datetime(stop)
if project == 'IPython':
import IPython; IPython.embed()
commits = commits_.load_commits(user, project)
if commits is None:
return None, None
commits['message'] = [commit['message'] for commit in commits['commit']]
if commits is None or not len(commits):
raise ValueError(
'No commits: load_commits returned None, '
'or None like : %r' % commits)
dates = pd.to_datetime([ii['author']['date']
for ii in commits['commit']])
commits.index = dates
# Define full date range
all_dates = pd.date_range(start, stop, freq='D')
all_dates = pd.DataFrame(np.zeros(all_dates.shape[0], dtype=int),
index=all_dates)
# Remove commits from the past we don't want
mask_since = (dates > start) * (dates < stop)
commits = commits[mask_since]
# Find commits that match our queries
mask_doc = np.zeros(commits.shape[0])
for query in search_queries:
# This is a really hacky way to do this but python keeps giving me errors
for ix, message in enumerate(commits['message'].values):
if message.find(query) != -1:
mask_doc[ix] += 1
mask_doc = np.array(mask_doc) > 0
commits['is_doc'] = mask_doc
# Tally the total number vs. doc-related commits
commits_doc = commits['is_doc'].resample('D').sum()
commits_all = commits['is_doc'].resample('D').count()
for date, val in commits_all.items():
all_dates.loc[date, 'All'] = val
for date, val in commits_doc.items():
all_dates.loc[date, 'Doc'] = val
# Clean up
all_dates = all_dates.drop(0, axis=1)
all_dates = all_dates.replace(np.nan, 0)
all_dates = all_dates.astype(int)
return all_dates
def plot_commits(all_dates):
# --- Plotting ---
fig, ax = plt.subplots(figsize=(8, 4))
for label in all_dates.columns:
ax.bar(all_dates.index.to_pydatetime(), all_dates[label].values,
label=label)
# Plot today
today = pd.datetime.today()
ax.axvline(today, ls='--', alpha=.5, lw=2, color='k')
ax.grid("off")
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_formatter(mpd.DateFormatter('%b\n%d'))
# Y-axis formatting
ax.set_ylabel("# commits")
ax.set_ylim([0, np.max([5, int(ax.get_ylim()[-1])])])
yticks = ax.get_yticks()
for l in yticks:
ax.axhline(l, linewidth=0.75, zorder=-10, color="0.5")
ax.set_yticks(yticks)
ax.legend(loc=1)
ax.set_title(project, fontweight="bold", fontsize=22)
plt.tight_layout()
plt.autoscale(tight=True)
return fig, ax
# --- Run the script ---
informations = pd.read_csv(".downloaded_projects").values
try:
os.makedirs("build/images")
except OSError:
pass
groupby = 'weekday'
start = '2017-02-02'
stop = '2017-03-10'
exceptions = []
all_dates = []
for user, project in tqdm(informations):
try:
this_all_dates = count_doc_commits(user, project,
groupby=groupby, start=start, stop=stop)
fig, ax = plot_commits(this_all_dates)
if fig is None:
exceptions.append(project)
continue
filename = os.path.join("build/images", project.lower() + ".png")
fig.savefig(filename, bbox_inches='tight')
# Collect data so we can save it
this_all_dates['project'] = project
all_dates.append(this_all_dates)
except Exception as e:
exceptions.append(project)
traceback.print_exception(None, e, e.__traceback__)
all_dates = pd.concat(all_dates, axis=0)
all_dates.to_csv('.totals.csv')
print('Finished building images.\nExceptions: {}'.format(exceptions))
| null |
src/watchtower/plot_commits.py
|
plot_commits.py
|
py
| 4,921 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.datetime.today",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pandas.to_datetime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "IPython.embed",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "watchtower.commits_.load_commits",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "watchtower.commits_",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "pandas.datetime.today",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.dates.DateFormatter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.dates",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.autoscale",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exception",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 151,
"usage_type": "call"
}
] |
483279032
|
import discord
import asyncio
import pickle
import logging
import time
class area():
def __init__(self):
self.areaName = '???'
self.areaDescription = '''in the abyss of nothingness you are all, and all is you.
And yet...
You can\'t seem to shake the feeling that you are not alone'''
class player():
def __init__(self):
self.Location = area()
self.Username = ''
client = discord.Client()
PlayerList = {}
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
print(PlayerList)
@client.event
async def on_message(message):
global PlayerList
if message.content.startswith('!test'):
counter = 0
tmp = await client.send_message(message.channel, 'Calculating messages...')
async for log in client.logs_from(message.channel, limit=100):
if log.author == message.author:
counter += 1
await client.edit_message(tmp, 'You have {} messages.'.format(counter))
elif message.content.startswith('!sleep'):
await asyncio.sleep(5)
await client.send_message(message.channel, 'Done sleeping')
elif message.content.lower().startswith('!checkin'):
if (message.author.id in PlayerList):
await client.send_message(message.channel, 'Hello %s Welcome back' % PlayerList[message.author.id].Username)
await client.send_message(message.channel, 'you are in the {name} would you like to look around'.format(
PlayerList[message.author.id].Location.areaName))
def check(msg):
return msg.content.startswith('$look around')
message = await client.wait_for_message(author=message.author, check=check)
await client.send_message(message.channel, PlayerList[message.author.id].Location.areaDescription)
else:
await client.send_message(message.channel,
'Hello %s Welcome to Discordia this is an interactive text based MMO \n which is currently under production'
% message.author.name )
await client.send_message(message.channel, 'If you would liek to create a profile please type \"!d create profile\" ')
elif message.content.startswith('!d create profile'):
await client.send_message(message.channel, 'thank you for joining this projec')
await client.send_message(message.channel, 'To pick a name for your in game player type $name nameHere')
PlayerList[message.author.id] = player()
PlayerList[message.author.id].Location = area()
def check(msg):
return msg.content.startswith('$name')
message = await client.wait_for_message(author=message.author, check=check)
name = message.content[len('$name'):].strip()
PlayerList[message.author.id].Username = name
await client.send_message(message.channel, '{} is a good name'.format(name))
elif message.content.startswith('!savegame'):
afile = open('PlayerData', 'wb')
pickle.dump(PlayerList, afile)
afile.close()
elif message.content.startswith('!loadgame'):
file2 = open('PlayerData', 'rb')
PlayerList = pickle.load(file2)
file2.close()
client.run('Mjg4OTY2NjI1MjAwMTc3MTUy.DjZkJA.aRmsmA2Yq7AjH5e5VIoqMiG_ftQ')
| null |
diBot.py
|
diBot.py
|
py
| 3,488 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.Client",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 91,
"usage_type": "call"
}
] |
262006952
|
# Interpolacion de Lagrange
# Polinomio en forma simbólica
import numpy as np
import sympy as sym
import matplotlib.pyplot as plt
# INGRESO , Datos de prueba
# Conjunto x
conj_x_1 = float(input(" Conjunto X punto 1: "))
conj_x_2 = float(input(" Conjunto X punto 2: "))
conj_x_3 = float(input(" Conjunto X punto 3: "))
conj_x_4 = float(input(" Conjunto X punto 4: "))
# Conjunto y
print("_____________")
conj_y_1 = float(input(" Conjunto Y punto 1: "))
conj_y_2 = float(input(" Conjunto Y punto 2: "))
conj_y_3 = float(input(" Conjunto Y punto 3: "))
conj_y_4 = float(input(" Conjunto Y punto 4: "))
# xi: 0, 0.33, 0.66, 1
# yi: 1, 1.391, 1.935, 2.718
xi = np.array([conj_x_1, conj_x_2, conj_x_3, conj_x_4])
yi = np.array([conj_y_1, conj_y_2, conj_y_3, conj_y_4])
# PROCEDIMIENTO
n = len(xi)
x = sym.Symbol('x')
# Polinomio
polinomio = 0
for i in range(0, n, 1):
# Termino de Lagrange
termino = 1
for j in range(0, n, 1):
if (j != i):
termino = termino * (x - xi[j]) / (xi[i] - xi[j])
polinomio = polinomio + termino * yi[i]
# Expande el polinomio
px = polinomio.expand()
# para evaluacion numérica
pxn = sym.lambdify(x, polinomio)
# Puntos para la gráfica
a = np.min(xi)
b = np.max(xi)
muestras = 101
xi_p = np.linspace(a, b, muestras)
yi_p = pxn(xi_p)
# Salida
print('Polinomio de Lagrange, expresiones')
print(polinomio)
print('\nPolinomio de Lagrange: ')
print(px)
# Gráfica
plt.title('Interpolación Lagrange')
plt.plot(xi, yi, 'o', label='Puntos', color='black')
plt.plot(xi_p, yi_p, label='Polinomio', color='hotpink')
plt.xlabel('Polinomio de Lagrange: {0}'.format(px))
plt.legend()
plt.show()
| null |
metodosMatematicos/expo/polynomial.py
|
polynomial.py
|
py
| 1,708 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sympy.Symbol",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sympy.lambdify",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
}
] |
626302394
|
import socketserver
import json
import configparser
import os
from conf import settings
STATUS_CODE = {
1: "verify success",
2: "verify failed",
3: "file exist",
4: "file exist,but not complete",
5: "file not exist"
}
class ServerHandler(socketserver.BaseRequestHandler):
def handle(self):
while 1:
data = self.request.recv(1024).strip()
data = json.loads(data.decode('utf-8'))
'''
{"action":"auth","username","hl","pwd":123} json格式数据
'''
if data.get("action"):
if hasattr(self, data.get("action")): # 通过反射进行命令分发
func = getattr(self, data.get("action"))
func(**data) # 传入字典的键值对
else:
print("Invalid cmd")
else:
print("Invalid cmd")
# 开始校验用户名和密码
def auth(self, **data):
print(data)
username = data["username"]
password = data["password"]
verify_success = self.authenticate(username, password) # True or False
if verify_success:
self.send_response(1)
else:
self.send_response(2)
# 和accounts.cfg文件中保存的用户名和密码做比较
def authenticate(self, username, password):
cfg = configparser.ConfigParser()
cfg.read(settings.ACCOUNT_PATH)
if username in cfg.sections():
if cfg[username]["Password"] == password:
print("用户名验证通过")
# 取得用户名和用户路径
self.user = username
self.homePath = os.path.join(settings.BASE_DIR, "home", self.user)
if not os.path.exists(self.homePath):
os.makedirs(self.homePath) # 创建用户目录存储用户文件
return True
# 向客户端发送是否校验成功的信息
def send_response(self, state_code):
response = {"status_code": state_code}
self.request.sendall(json.dumps(response).encode("utf-8"))
#
def put(self, **data):
print(data)
file_name = data['file_name']
file_size = data['file_size']
target_path = data['target_path']
abs_path = os.path.join(self.homePath, target_path, file_name)
# 这里分几种情况处理:1. 文件存在 2.文件不存在 3.文件完整 4.文件不完整
received_size = 0
if os.path.exists(abs_path):
file_has_size = os.stat(abs_path).st_size
if file_has_size < file_size:
# 文件存在,但是文件不完整 -->断点续传
self.request.sendall('4'.encode('utf-8'))
choice = self.request.recv(1024).decode('utf-8')
if choice == 'Y':
self.request.sendall(str(file_has_size).encode('utf-8'))
received_size += file_has_size
fd = open(abs_path, 'ab')
else:
fd = open(abs_path, 'wb')
else:
# 文件完整
self.request.sendall('3'.encode('utf-8'))
return
else:
# 文件不存在,则让client端直接上传文件
self.request.sendall('5'.encode('utf-8'))
fd = open(abs_path, 'wb')
while received_size < file_size: # 持续接收直到接收完毕
try:
data = self.request.recv(1024)
except Exception as e:
break
fd.write(data)
received_size += len(data)
print('file upload success')
fd.close()
def ls(self, **data):
file_list = os.listdir(self.homePath)
file_str = '\n'.join(file_list)
if not len(file_str):
file_str = '<empty dir>'
self.request.sendall(file_str.encode('utf-8'))
def cd(self, **data):
dirname = data['dirname']
if dirname == '..':
self.homePath = os.path.dirname(self.homePath)
else:
self.homePath = os.path.join(self.homePath, dirname)
self.request.sendall(self.homePath.encode('utf-8'))
def mkdir(self, **data):
dirname = data['dirname']
dirpath = os.path.join(self.homePath, dirname)
print(dirpath)
if not os.path.exists(dirpath):
if '/' in dirname:
os.makedirs(dirpath)
else:
os.mkdir(dirpath)
self.request.sendall('make dir success'.encode('utf-8'))
else:
self.request.sendall('dir already exist'.encode('utf-8'))
| null |
FTP_server/core/server.py
|
server.py
|
py
| 4,713 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "socketserver.BaseRequestHandler",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "conf.settings.ACCOUNT_PATH",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "conf.settings",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "conf.settings.BASE_DIR",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "conf.settings",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 125,
"usage_type": "call"
}
] |
315408774
|
# _*_ coding:utf-8 _*_
# def1
import requests
import re
import time
import os
import json
# import jihua_2008cai
import cookie_yang
import shenjihua
# import jihuaapk
# 登陆headers_bets
headers={
'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
}
headers_bets =headers
filename='cookes.txt'
requests_cookie=''
YICITOU =1
# ======================================================================
def login(flag):
urls ='http://main.by189.cn/do_login'
params={
'username': '!guest!',
'password': '!guest!'
}
global requests_cookie
try:
requests_cookie =requests.Session()
html = requests_cookie.post(urls,data=params,headers=headers)
JSON_data_History= json.loads(html.text)
if JSON_data_History['status']==1:
cookie_yang.save_cookies_lwp(requests_cookie.cookies, filename)
# placeOrder()
if flag ==1:
placeOrder()
print(JSON_data_History['msg'])
else:
print(JSON_data_History['msg'])
except Exception as e:
print('login----',e)
placeOrder()
# ======================================================================
# 获取当前下注的期
def now_oder_qihao():
url ='http://by189.cn/Mobile/Ajax/mobileAllData?lottery_code=all'
res = requests_cookie.get(url,headers=headers)
# print(res.json()['lottery']['cqssc']['next_phase'])
qihao_now = res.json()['lottery']['bjpk10']['next_phase']
return qihao_now
# 获取当前下注的期
# ======================================================================
def getsSelfData():
url ='http://by189.cn/Mobile/Ajax/mobileAllData?lottery_code=all'
res = requests_cookie.get(url,headers=headers)
# print(res.json()['lottery']['cqssc']['next_phase'])
# qihao_now = res.json()['lottery']['cqssc']['next_phase']
# print(res.json())
aa = res.json()
bb =aa["lottery"]["bjpk10"]["open_result"]
# bb=['10', '04', '08', '01', '06', '02', '05', '09', '03', '07']
# cc = []
# for item in bb:
# cc.append(int(item))
# aa["lottery"]["bjpk10"]["open_result"] = cc
return res.json()
# ======================================================================
def writeFile(d):
# d ={'differtime': 254, 'next_phase': '20180413058', 'open_phase': '20180413057', 'open_result': ['8', '0', '7', '5', '7'], 'myBuyMoney': 0, 'historyLottery': ['0', '1', '3', '6', '9']}
f = open('historyMoney.txt','a',encoding='UTF-8')
time_ ='当前时间'+ time.strftime('%Y-%m-%d %H:%M:%S')+'\n'
nowQI ='第'+str(d['open_phase'])+'期'+'\n'
buyHao = '下注记录:'+str(d['historyLottery'])+'\n'
money_ ='单注money_:'+str(pow(2, d['myBuyMoney']))+'\n'
result ='上期结果:'+str(d['open_result'])+'\n'
kong= '======================================================================'
f.write('\n'+kong+'\n'+time_+nowQI+buyHao+money_+result+str(d))
f.close()
# ======================================================================
moreBeishu=0
def readFile():
global moreBeishu
# get_open_phase = {"differtime":507,'myBuyMoney':1233,"next_phase":"20180413037","open_phase":"20180413036","open_result":["7","4","9","8","8"]}
f = open('historyMoney.txt','r',encoding='UTF-8')
lines =f.readlines()
f.close()
if len(lines)>0:
get_open_phase = lines[-1]
_getsSelfData = getsSelfData()
aa =get_open_phase.replace('\'','\"')
get_open_phase =json.loads(aa)
# if True:
if str(get_open_phase['next_phase']) == _getsSelfData['lottery']['bjpk10']['open_phase']:
# isWinning =set(get_open_phase['historyLottery']) &set(_getsSelfData['lottery']['bjpk10']['open_result'][0])
kaijiang_ge = _getsSelfData['lottery']['bjpk10']['open_result'][0]
buyhistory =get_open_phase['historyLottery']
if kaijiang_ge in buyhistory:
f = open('isWinning.txt','a',encoding='UTF-8')
f.write(str(moreBeishu)+'倍中'+'\n')
moreBeishu =0
f.close()
return 0
else:
if get_open_phase['myBuyMoney']=='':
return 0
else:
f = open('isWinning.txt','a',encoding='UTF-8')
if get_open_phase['myBuyMoney'] ==2 or get_open_phase['myBuyMoney'] =='2':
print("从头再来")
# get_open_phase['myBuyMoney']=-1
r = get_open_phase['myBuyMoney']+1
moreBeishu =r
f.write('当前:挂'+str(r-1)+'倍\n')
f.close()
return r
else:
return 0
return 0
# ======================================================================
def isFile():
if os.path.exists(r'historyMoney.txt'):
pass
else:
print(' ')
f = open('historyMoney.txt','w',encoding='UTF-8')
f = open('isWinning.txt','w',encoding='UTF-8')
f.close()
# ======================================================================
# 模拟下单
ISBUY=True
def placeOrder():
isFile()
global ISBUY
if not ISBUY:
m =time.localtime().tm_min
# 234 789 回复
if m%10>=2 and m%10<=4 or m%10>=7 and m%10<=9:
# 整点后回复
ISBUY = True
if time.localtime().tm_hour<9:
print('休息中', time.strftime('%Y-%m-%d %H:%M:%S'))
return
time.sleep(1)
if ISBUY and is_timebuy():
urls_bets = 'http://main.by189.cn/bets'
# 提交的参数
params_bets={
'code':'bjpk10',
'drawNumber':now_oder_qihao(),
}
# 拿到数据决定买什么的参数
# get_jihua_parms = jihua_2008cai.get_url()
get_jihua_parms = shenjihua.get_links()
# get_jihua_parms = jihuaapk.get_info()18- 01k-feng-04----06 feng18
orders = get_jihua_parms['buyParms']
historyLottery_will = get_jihua_parms['will_buyhao']
myMoney = readFile()
# 二次拼接成接口需要的形式
print('下注========',pow(2, myMoney)*YICITOU)
for idnex, item in enumerate(orders):
for kk in item:
listData = 'orders'+'['+str(idnex)+']'+'['+kk+']'# 拼接出来key
item['money'] =1
params_bets[listData]=item[kk]
params_bets1 =params_bets # 下单数据
requests_cookie.cookies=get_cookie()
html = requests_cookie.post(urls_bets,data=params_bets1,headers=headers_bets)
print('print========myMoney',params_bets1)
JSON_data_History= json.loads(html.text)
if JSON_data_History['status']==1:
getNowData = getsSelfData()
getNowData['lottery']['bjpk10']['myBuyMoney']=myMoney
getNowData['lottery']['bjpk10']['historyLottery']=historyLottery_will
writeFile(getNowData['lottery']['bjpk10'])
ISBUY = False
print('下单成功:'+time.strftime('%Y-%m-%d %H:%M:%S'))
print('下单money:'+JSON_data_History['money'])
return
else:
print('失败了================'+JSON_data_History['info'])
if JSON_data_History['info']=='单笔投注金额不能大于':
login(1)
else:
if not is_timebuy():
print('等着吧---不在时间内',time.strftime('%Y-%m-%d %H:%M:%S'))
elif not ISBUY:
print('等着吧---买过了',time.strftime('%Y-%m-%d %H:%M:%S'))
else:
pass
# ======================================================================
def is_timebuy():
now = time.localtime()
h = now.tm_hour
m = now.tm_min
#s = now.tm_sec
if h>=9:
# 5min 18- 01k-feng-04----06 feng18
if m%10>=0 and m%10<=1 or m%10>=5 and m%10<=6:
# 0-1 5-6
# 234 6789
return True
else:
pass
return False
def hongbao():
html = requests_cookie.post(urls,data=params,headers=headers)
# ======================================================================
def get_cookie():
if cookie_yang.load_cookies_from_lwp(filename):
return cookie_yang.load_cookies_from_lwp(filename)
else:
print('无cookie')
return
# ======================================================================
if __name__ =='__main__':
# writeFile()
# writeFile()
login(2)
time.sleep(5)
# placeOrder()
while True:
pass
try:
placeOrder()
time.sleep(59)
except Exception as e:
print(e)
| null |
jihua_more13-20180416/bjpk10/login.py
|
login.py
|
py
| 8,913 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.Session",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cookie_yang.save_cookies_lwp",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "time.localtime",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "shenjihua.get_links",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "cookie_yang.load_cookies_from_lwp",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cookie_yang.load_cookies_from_lwp",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 242,
"usage_type": "call"
}
] |
488170646
|
import warnings
warnings.filterwarnings('always')
import numpy as np
import pandas as pd
import gc
import os
from pathlib import Path
from ast import literal_eval
from collections.abc import Mapping
from sklearn.model_selection import train_test_split, StratifiedKFold, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from xgboost import XGBClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
datadir = Path("/home/solarml/")
resultDir = Path("/home/solarml/WideSpace")
names = ['fcclean', 'fcfilnan']
## settings
seed_no = 42
np.random.seed(seed_no)
# folds = 5
outerFolds = 5
innerFolds = 5
skf = StratifiedKFold(n_splits=innerFolds, shuffle=True, random_state=seed_no)
svm = SVC()
linsvm = LinearSVC()
logr = LogisticRegression()
dtree = DecisionTreeClassifier()
mlp = MLPClassifier()
rfrst = RandomForestClassifier()
xgboost = XGBClassifier()
xtrees = ExtraTreesClassifier()
MLmodels = {
'svm' : svm,
'linsvm' : linsvm,
'logr' : logr,
'dtree' : dtree,
'mlp' : mlp,
'rfrst' : rfrst,
'xgboost' : xgboost,
'xtrees' : xtrees
}
MLparams = {
'linsvm' : {
'penalty':['l2', 'l1'],
'C': list(np.logspace(-4, 3, num=8, endpoint=True)),
'max_iter': [5000, 10000, 20000],
'random_state' : [seed_no],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
},
'svm' : {
'kernel':['rbf', 'poly', 'sigmoid'],
'C': list(np.logspace(-4, 3, num=8, endpoint=True)),
'gamma':['scale', 'auto', 0.001, 0.01, 0.1, 1, 10],
'degree':[2, 3, 4],
'coef0':[-10, -1, -0.1, -0.01, -0.001, 0.0, 0.001, 0.01, 0.1, 1, 10],
'max_iter': [5000, 10000, 20000],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
},
'logr' : {
'penalty':['l2', 'l1'],
'solver':['saga', 'liblinear'],
'random_state' : [seed_no],
'C': list(np.logspace(-4, 3, num=8, endpoint=True)),
'max_iter': [5000, 10000, 20000],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
},
'dtree' : {
'criterion':['gini', 'entropy'],
'min_samples_leaf': range(2, 100, 2),
'min_samples_split': range(5, 130, 5),
'max_depth' : [2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
},
'mlp' : {
'hidden_layer_sizes': [(50,), (40,), (30,), (20,), (10,), (8,), (6,),
(50, 2), (40, 2), (30, 2), (20, 2), (10, 2), (8, 2), (6, 2),
(50, 3), (40, 3), (30, 3), (20, 3), (10, 3), (8, 3), (6, 3),
(50, 4), (40, 4), (30, 4), (20, 4), (10, 4), (8, 4), (6, 4),
(50, 20), (50, 10), (50, 5), (60,), (60, 20), (60, 10), (60, 5), (60, 4), (60, 3), (60, 2),
(4,), (3,), (2,), (4, 4), (4, 2), (2, 1), (2, 2), (3, 1), (3, 2),
(60, 30), (50, 25), (40, 20), (30, 15), (20, 10), (10, 5)],
'learning_rate': ['constant', 'invscaling', 'adaptive'],
'learning_rate_init': [0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05],
'max_iter': [5000, 10000, 20000],
'solver': ['adam', 'lbfgs']
},
'rfrst' : {
'criterion':['gini', 'entropy'],
'n_estimators':[10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000],
'min_samples_leaf': range(2, 100, 2),
'min_samples_split': range(5, 130, 5),
'max_depth' : [2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
'max_features': ['auto', 'sqrt', 'log2'],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
},
'xgboost' : {
'max_depth' : range(2, 60, 2),
'n_estimators' :[10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000],
'scale_pos_weight': range(1, 400, 50),
'subsample': [0.5, 0.6, 0.7, 0.8, 0.9, 1],
'alpha' : np.logspace(-5, 1,num=7),
'gamma' : np.logspace(-5, 1,num=7),
'lambda' : range(1, 22, 1)
},
'xtrees' : {
'random_state' : [seed_no],
'criterion':['gini', 'entropy'],
'n_estimators':[10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000],
'min_samples_leaf': range(2, 100, 2),
'min_samples_split': range(5, 130, 5),
'max_depth' : [2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
'max_features': ['auto', 'sqrt', 'log2', None],
'class_weight':['balanced', {0: 1, 1: 1}, {0: 1, 1: 10}, {0: 1, 1: 20},
{0: 1, 1: 50}, {0: 1, 1: 100}, {0: 1, 1: 150},
{0: 1, 1: 300}, {0: 1, 1: 400}, {0: 1, 1: 500}]
}
}
mtrics = ['precision', 'recall', 'f1']
colnames = []
for m in mtrics:
mntr = '_'.join(['mean_train', m])
mnte = '_'.join(['mean_test', m])
rnk = '_'.join(['rank_test', m])
colnames.extend([mntr, mnte, rnk])
colnames.append('params')
def binaryClf_scorer(ytrue, ypred):
scores = {}
tn, fp, fn, tp = confusion_matrix(ytrue, ypred).ravel()
prf = precision_recall_fscore_support(ytrue, ypred, beta=1, pos_label=1, average='binary')[:3]
scores['precision'] = prf[0]
scores['recall'] = prf[1]
scores['f1'] = prf[2]
scores['POD'] = float(tp/(tp+fn))
scores['FAR'] = float(fp/(tp+fp))
scores['TSS'] = float((tp/(tp+fn)) - (fp/(tn+fp)))
N = float(tn + fp + fn + tp)
expRand = float(((tp + fn)*(tp + fp) + (tn + fn)*(tn + fp))/N)
scores['HSS'] = float((tp + tn - expRand) / (N - expRand))
for k, v in scores.items():
scores[k] = round(v, 2)
return scores
allMetrics = ['precision', 'recall', 'f1', 'POD', 'FAR', 'TSS', 'HSS']
colsOut = ['_'.join(['mean_train', m]) for m in mtrics] + ['_'.join(['mean_val', m]) for m in mtrics] + ['_'.join(['test', m]) for m in allMetrics]
colsOut.append('params')
class NestedTuner():
"""Class instances are classifiers optimized in the given parameter space"""
tuned_clfs = []
def __init__(self, name, model, prams, vectors, targets):
self.name = name
self.model = model # classifier
self.prams = prams # hyper-parameter space
self.vectors = vectors # data matrix
self.targets = targets # target vector - labels
self.inCVresults = self.tune()
self.inCVbest = self.findBest()
NestedTuner.tuned_clfs.append(self)
def tune(self):
grid_df = pd.DataFrame(columns=colnames)
print('Tuning Classifier {}: \n'.format(self.name))
clf = RandomizedSearchCV(self.model, self.prams, random_state=seed_no,
scoring=mtrics, n_iter=1000, cv=skf, verbose=1,
refit=False, return_train_score=True, n_jobs=-1)
model = clf.fit(self.vectors, self.targets.ravel())
for col in colnames:
grid_df[col] = model.cv_results_[col]
return grid_df.sort_values(by='rank_test_f1', ascending=True)
def findBest(self):
best =self.inCVresults.loc[(self.inCVresults['mean_test_precision']>=0.6) & (self.inCVresults['mean_test_recall']>=0.6), :]
best.reset_index()
good =self.inCVresults.loc[(self.inCVresults['mean_test_precision']>=0.5) & (self.inCVresults['mean_test_recall']>=0.5), :]
good.reset_index()
if len(best)>0 :
return best.iloc[0].to_dict()
elif len(good)>0 :
return good.iloc[0].to_dict()
else:
return self.inCVresults.iloc[0].to_dict()
del good, best
gc.collect()
class ModelNestedCV():
'''Class instances are estimates of model performance'''
def __init__(self, name, model, prams, vectors, targets):
self.name = name
self.model = model
self.prams = prams
self.vectors = vectors
self.targets = targets
self.outerResults = self.outerCV()
self.performance = self.foldAverage()
def outerCV(self):
allscores = pd.DataFrame(columns=colsOut)
outer = StratifiedKFold(n_splits=outerFolds, shuffle=True, random_state=seed_no)
fld = 0
for train_ix, test_ix in outer.split(self.vectors, self.targets):
Xtrain, Xtest = self.vectors[train_ix], self.vectors[test_ix]
Ytrain, Ytest = self.targets[train_ix], self.targets[test_ix]
tuned = NestedTuner(self.name, self.model, self.prams, Xtrain, Ytrain)
inresults = resultDir / '_'.join([self.name, str(fld), 'inCV3.csv'])
tuned.inCVresults.to_csv(inresults, index=True, header=True)
innerModels = tuned.inCVbest
cols = allscores.columns.to_list()
allscores.loc[fld, 'mean_train_precision'] = innerModels['mean_train_precision']
allscores.loc[fld, 'mean_train_recall'] = innerModels['mean_train_recall']
allscores.loc[fld, 'mean_train_f1'] = innerModels['mean_train_f1']
allscores.loc[fld, 'mean_val_precision'] = innerModels['mean_test_precision']
allscores.loc[fld, 'mean_val_recall'] = innerModels['mean_test_recall']
allscores.loc[fld, 'mean_val_f1'] = innerModels['mean_test_f1']
hparams = innerModels['params']
allscores.loc[fld, 'params'] = str(hparams)
if isinstance(hparams, Mapping):
hpDict = hparams
clf = self.model
clf.set_params(**hpDict)
clf.fit(Xtrain, Ytrain.ravel())
rslts = binaryClf_scorer(Ytest.ravel(), clf.predict(Xtest))
for m in allMetrics:
tst = '_'.join(['test', m])
allscores.loc[fld, tst] = rslts[m]
elif np.isnan(hparams):
for m in allMetrics:
tst = '_'.join(['test', m])
allscores.loc[fld, tst] = np.nan
else :
hpDict = literal_eval(hparams)
clf = self.model
clf.set_params(**hpDict)
clf.fit(Xtrain, Ytrain.ravel())
rslts = binaryClf_scorer(Ytest.ravel(), clf.predict(Xtest))
for m in allMetrics:
tst = '_'.join(['test', m])
allscores.loc[fld, tst] = rslts[m]
fld += 1
return allscores
def foldAverage(self):
df = self.outerResults
foldsOut = df.index.values + 1
keys = ['_'.join(['fold', str(x)]) for x in foldsOut]
df['outer_fold'] = keys
df.set_index('outer_fold', drop=True, inplace=True)
for col in df.columns.values[:-1]:
df.loc['average', col] = df[col].mean(skipna=True)
df.loc['std', col] = df[col].std(skipna=True)
return df
clfs = ['svm', 'linsvm', 'logr', 'dtree', 'mlp', 'rfrst', 'xgboost', 'xtrees']
datacols = ['HELIO_LONGITUDE', 'logFint', 'logfl', 'duration', 'trise', 'cycle',
'WIDTH_2', 'LINEAR_SPEED_2']
for fname in names:
datafile = datadir / '_'.join([fname, 'corr.csv'])
savedir = resultDir / fname
if not os.path.exists(savedir):
os.makedirs(savedir)
dataset = pd.read_csv(datafile, sep=',', header=0)
print(dataset.describe())
X = dataset[datacols].values
Y = dataset[['SEP']].values
del dataset
gc.collect()
scaler = MinMaxScaler()
Xsc = scaler.fit_transform(X)
for c in clfs:
mdl = ModelNestedCV(c, MLmodels[c], MLparams[c], Xsc, Y)
df = mdl.performance
savefile = savedir / '_'.join([c, 'NCVimbl3.csv'])
df.to_csv(savefile, index=True, header=True)
del df, mdl
gc.collect()
| null |
NestedCV_flareCME_v01.py
|
NestedCV_flareCME_v01.py
|
py
| 13,106 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "xgboost.XGBClassifier",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.ExtraTreesClassifier",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_fscore_support",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.RandomizedSearchCV",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "collections.abc.Mapping",
"line_number": 244,
"usage_type": "argument"
},
{
"api_name": "numpy.isnan",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 304,
"usage_type": "call"
}
] |
284733767
|
from cgl.plugins.Qt import QtCore, QtGui, QtWidgets
import logging
import threading
import time
from cgl.core.config.config import ProjectConfig
class ProgressGif(QtWidgets.QWidget):
def __init__(self, title='CG Lumberjacking...', height=150, cfg=None):
QtWidgets.QWidget.__init__(self)
layout = QtWidgets.QVBoxLayout(self)
self.gif_height = QtCore.QSize(height, height)
if not cfg:
self.cfg = ProjectConfig()
else:
self.cfg = cfg
self.message = QtWidgets.QLabel(title)
self.message.setProperty('class', 'ultra_title')
self.message.setAlignment(QtCore.Qt.AlignCenter)
self.progress_bar = QtWidgets.QLabel()
self.progress_bar.setAlignment(QtCore.Qt.AlignCenter)
self.movie = QtGui.QMovie(self.cfg.image_path('chopping_wood.gif'))
self.movie.setScaledSize(self.gif_height)
self.progress_bar.setMovie(self.movie)
layout.addWidget(self.message)
layout.addWidget(self.progress_bar)
def hide(self):
self.message.hide()
self.progress_bar.hide()
def show(self):
self.movie.start()
self.message.show()
self.progress_bar.show()
logging.info(self.movie.scaledSize())
class ProgressDialog(QtWidgets.QDialog):
def __init__(self, message='Achieving Kickassity', gif_name='chopping_wood.gif', cfg=None):
QtWidgets.QDialog.__init__(self)
self.setWindowTitle("Hold My Beer")
if not cfg:
cfg = ProjectConfig()
self.message = QtWidgets.QLabel(message)
self.movie_screen = QtWidgets.QLabel()
mov_path = cfg.image_path(gif_name)
self.movie = QtGui.QMovie(cfg.image_path(gif_name))
logging.info(self.movie.isValid())
self.movie.start()
self.movie_screen.setMovie(self.movie)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.message)
layout.addWidget(self.movie_screen)
self.setLayout(layout)
@staticmethod
def update_gif():
for i in range(60):
QtGui.qApp.processEvents()
def process_method(progress_bar, target, args=(), text=None):
#progress_bar.show()
orig_text = progress_bar.message.text()
if text:
progress_bar.message.setText(text)
time.sleep(1)
# QtGui.qApp.processEvents()
p = threading.Thread(target=target, args=args)
# QtGui.qApp.processEvents()
p.start()
p.join()
##progress_bar.hide()
return p
class ProcessThread(threading.Thread):
def __init__(self, progress_bar, target, args, text=None):
progress_bar.show()
self.job_done = threading.Event()
if text:
progress_bar.message.setText(text)
threading.Thread.__init__(self, target=target, args=args)
progress_bar.hide()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
form = ProgressDialog()
form.show()
app.exec_()
| null |
cgl/ui/widgets/progress_gif.py
|
progress_gif.py
|
py
| 2,977 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cgl.plugins.Qt.QtWidgets.QWidget",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QWidget.__init__",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QWidget",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QVBoxLayout",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtCore.QSize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtCore",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cgl.core.config.config.ProjectConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QLabel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtCore.Qt",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtCore",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QLabel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtCore.Qt",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtCore",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtGui.QMovie",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtGui",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QDialog",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QDialog.__init__",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QDialog",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "cgl.core.config.config.ProjectConfig",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QLabel",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QLabel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtGui.QMovie",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtGui",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QVBoxLayout",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "cgl.plugins.Qt.QtGui.qApp.processEvents",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtGui.qApp",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtGui",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "threading.Event",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets.QApplication",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cgl.plugins.Qt.QtWidgets",
"line_number": 96,
"usage_type": "name"
}
] |
24379017
|
import typing
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
import pandas as pd
import matplotlib.pyplot as plt
import cv2
action_num = 4
overlay_num = 1
ma_kernel_sizes = np.array([5, 15, 31, 61], np.int64)
ma_kernel_size_halfs = ma_kernel_sizes // 2
ma_kernels = [np.ones(size) / size for size in ma_kernel_sizes]
chance_ma_kernel_size = 15
chance_ma_kernel_size_halfs = chance_ma_kernel_size // 2
chance_ma_kernel = np.ones(chance_ma_kernel_size) / chance_ma_kernel_size
def load_from_csv(csv_filepath):
dtypes_csv = [('time', 'str'), ('open', 'f4'), ('high', 'f4'), ('low', 'f4'), ('close', 'f4'), ('volume', 'i4')]
df = pd.read_csv(csv_filepath, names=('time', 'open', 'high', 'low', 'close', 'volume'), parse_dates=[0], dtype=dtypes_csv)
return df
def csv_to_binary(csv_filepath, binary_filepath):
df = load_from_csv(csv_filepath)
df['time'] = df['time'].values.astype('u8') // 1000000000
df['open'] = (df['open'] * 1000).astype('i4')
df['high'] = (df['high'] * 1000).astype('i4')
df['low'] = (df['low'] * 1000).astype('i4')
df['close'] = (df['close'] * 1000).astype('i4')
df = df.drop('volume', axis=1)
records = df.to_records(index=False)
with open(binary_filepath, 'wb') as f:
f.write(records.tobytes())
def read_records(binary_filepath):
dtypes = [('time', 'u8'), ('open', 'i4'), ('high', 'i4'), ('low', 'i4'), ('close', 'i4')]
with open(binary_filepath, 'rb') as f:
b = np.frombuffer(f.read(), dtype=dtypes)
return b
def records_to_dataframe(records):
df = pd.DataFrame(records)
df = df.set_index(pd.to_datetime(df['time'], unit='s')).drop('time', axis=1)
return df
def records_to_time(records):
return records[['time']].view(('u8', 1))
def values_view_from_records(records):
return records[['open', 'high', 'low', 'close']].view(('i4', 4))
def get_separation_indices(records):
interval = 60 * 60
time = records[['time']].astype('u8')
dif_time = np.diff(time)
time_areas = np.nonzero((dif_time > interval).astype('i4'))[0]
time_areas += 1
return time_areas
def tickdata(filepath):
"""binary to pandas DataFrame using numpy.
参考: (´・ω・`;)ヒィィッ すいません - pythonでMT4のヒストリファイルを読み込む
http://fatbald.seesaa.net/article/447016624.html
"""
with open(filepath, 'rb') as f:
ver = np.frombuffer(f.read(148)[:4], 'i4')
if ver == 400:
dtype = [('time', 'u4'), ('open', 'f8'), ('low', 'f8'), ('high', 'f8'), ('close', 'f8'), ('volume', 'f8')]
df = pd.DataFrame(np.frombuffer(f.read(), dtype=dtype))
df = df['time open high low close volume'.split()]
elif ver == 401:
dtype = [('time', 'u8'), ('open', 'f8'), ('high', 'f8'), ('low', 'f8'), ('close', 'f8'), ('volume', 'i8'),
('s', 'i4'), ('r', 'i8')]
df = pd.DataFrame(np.frombuffer(f.read(), dtype=dtype).astype(dtype[:-2]))
df = df.set_index(pd.to_datetime(df['time'], unit='s')).drop('time', axis=1)
return df
def running_max_min_view(a, window_size, step_size):
nrows = (a.shape[0] - window_size) // step_size + 1
ncols = int(np.prod(a.shape[1:]) * window_size)
return as_strided(a, shape=(nrows, ncols), strides=(step_size * a.strides[0], a.itemsize))
def running_max_min(a, window_size, step_size):
return running_max_min_view(a, window_size, step_size).ptp(1)
class TradeEnvironment:
def __init__(self, binary_filepath, window_size=30, height_width=(200, 240)):
# グラフ描画のサイズなど初期化
self.w = height_width[1] # グラフ画像幅(px)
self.w_max = self.w - 1 # グラフ画像幅(px)-1
self.h = height_width[0] # グラフ画像高さ(px)
self.h_max = self.h - 1 # グラフ画像高さ(px)-1
self.img = np.zeros((1, self.h, self.w), np.float32) # グラフ描画先データ、これが状態となる
# グラフ描画時の窓サイズ計算
self.window_size = window_size
# 全データ読み込み
self.records = read_records(binary_filepath)
# 一定期間データが存在しないエリアを探し出し、その間を1エピソードとする
self.episodes = get_separation_indices(self.records)
# エピソードとして使える区間があるか調べる
if self.episodes.shape[0] < 2:
raise Exception('No area exists for episode in histrical data.')
valid_episode_exists = False
for i in range(self.episodes.shape[0] - 1):
if self.window_size * 2 <= self.episodes[i + 1] - self.episodes[i]:
valid_episode_exists = True
break
if not valid_episode_exists:
raise Exception('No episode area exists lager than window size.')
# その他変数初期化
self.num_acions = 4 # 選択可能アクション数
self.spread = 5 # スプレッド
self.loss_cut = 100 # これ以上損したらロスカットされる
# 注文関係
self.position_type = 0 # ポジションタイプ、0: なし、1: 買い、-1: 売り
self.position_action = 0 # ポジション決めた際のアクション
self.position_q_action = 0 # ポジション決めた際のQアクション
self.position_index_in_episode = -1 # ポジション持った時のエピソード内でのインデックス
self.position_start_value = 0 # ポジション持った時の pip
# グラフ表示用変数初期化
self.fig = None
self.ax_img = None
self.axs = None
# 1エピソードの対象となるエリアをランダムに選択
self.cur_episode = -1 # 現在のエピソードのインデックス
self.episode_time = None # 1エピソード全体分の time 値
self.episode_values = None # 1エピソード全体分の open, high, low, close 値
self.index_in_episode = 0 # episode_values 内での現在値に対応するインデックス
def draw_img(self) -> None:
end = self.index_in_episode + 1 # エピソード内での現在の最新値インデックス+1
img = self.img # 描画先バッファ
w = self.w # 画像幅(px)
h = self.h # 画像高さ(px)
chart_x = 5 # チャート部のX左端(px)
chart_y = 0 # チャート部のY上端(px)
chart_w = w - 10 # チャート部の幅(px)
chart_h = h # チャート部の高さ(px)
chart_w_for_scale = chart_w - 1 # チャート部X座標スケーリング用のチャート幅(px)
chart_h_for_scale = chart_h - 1 # チャート部Y座標スケーリング用のチャート高さ(px)
chart_right = chart_x + chart_w # チャート右端のX左端(px)+1
chart_bottom = chart_y + chart_h # チャート下端のY左端(px)+1
h_max = self.h_max
img[:] = 0
position_type = self.position_type
position_start_value = self.position_start_value
positional_reward = self.calc_positional_reward() if position_start_value else 0
if positional_reward < 0:
ind_x1 = 0
ind_x2 = 5
elif 0 < positional_reward:
ind_x1 = w - 5
ind_x2 = w
window_size = self.window_size
time = self.episode_time[end - window_size:end]
values = self.episode_values[end - window_size:end]
ma = []
# 可能なら移動平均を計算
for ki in range(len(ma_kernel_sizes)):
size_needed = window_size + ma_kernel_size_halfs[ki] * 2
if size_needed <= end:
start = end - size_needed
ma.append(np.convolve(self.episode_values[start:end, 1], ma_kernels[ki], mode='valid'))
ma.append(np.convolve(self.episode_values[start:end, 2], ma_kernels[ki], mode='valid'))
ma.append(np.convolve(self.episode_values[start:end, 3], ma_kernels[ki], mode='valid'))
# 表示範囲となる最大最小を探す
time_max = time.max()
time_min = time_max - window_size * 60
values_min = values.min()
values_max = values.max()
for y in ma:
values_min = min(values_min, y.min())
values_max = max(values_max, y.max())
if position_type:
values_min = min(values_min, position_start_value)
values_max = max(values_max, position_start_value)
# values_min -= values_min % 100
# values_max += 100 - values_max % 100
time_scale = chart_w_for_scale / (time_max - time_min)
value_scale = -chart_h_for_scale / (values_max - values_min)
value_translate = chart_h_for_scale - values_min * value_scale
cur = int(np.rint(values[-1, 3] * value_scale + value_translate).item())
if position_type:
pos = int(np.rint(position_start_value * value_scale + value_translate).item())
else:
pos = 0
trg = img[0]
chart_trg = trg[chart_y:chart_bottom, chart_x:chart_right]
# インジケーター描画
# 目盛り描画
for y in np.rint(
np.arange(values_min - values_min % 50, values_max + 51 - (values_max % 50), 50) * value_scale +
value_translate).astype(np.int32):
if 0 <= y and y < chart_trg.shape[0]:
chart_trg[y, :] = 0.1
# ポジション持っていたら、ポジった値から現在値まで塗りつぶす
if position_type and positional_reward:
ind_y1 = max(min(pos, cur), 0)
ind_y2 = min(max(pos, cur), h_max) + 1
trg[ind_y1:ind_y2, ind_x1:ind_x2] = 1
# 現在値として水平線を描画
if 0 <= cur and cur < h:
cur_y1 = max(cur - 1, 0)
cur_y2 = min(cur + 1, h_max) + 1
trg[cur_y1:cur_y2, :] = 1.0
# チャートを描画開始
pts = np.empty((values.shape[0], 1, 2), dtype=np.int32)
pts[:, 0, 0] = np.rint((time - time_min) * time_scale)
# 可能なら移動平均線を描画
for y in ma:
pts[:, 0, 1] = np.rint(y * value_scale + value_translate)
cv2.polylines(chart_trg, [pts], False, 0.3)
# open, high, low, close を描画
for value_type in range(4):
pts[:, 0, 1] = np.rint(values[:, value_type] * value_scale + value_translate)
cv2.polylines(chart_trg, [pts], False, 0.7)
def reset(self, random_episode_or_index=True) -> np.ndarray:
"""エピソードをリセットしエピソードの先頭初期状態に戻る.
Returns:
状態.
"""
self.settle()
min_episode_len = self.window_size * 2
eps = self.episodes
while True:
if isinstance(random_episode_or_index, bool):
if random_episode_or_index:
self.cur_episode = random.randint(0, len(eps) - 2)
else:
self.cur_episode += 1
if len(eps) - 1 <= self.cur_episode:
self.cur_episode = 0
elif isinstance(random_episode_or_index, int):
if random_episode_or_index < 0:
random_episode_or_index = 0
elif len(eps) <= random_episode_or_index:
random_episode_or_index = len(eps) - 1
self.cur_episode = random_episode_or_index
i1 = eps[self.cur_episode]
i2 = eps[self.cur_episode + 1]
if min_episode_len <= i2 - i1:
rcds = self.records[i1:i2]
self.episode_time = records_to_time(rcds)
self.episode_values = values_view_from_records(rcds)
self.index_in_episode = self.window_size
self.draw_img()
return self.img
def get_value(self) -> float:
"""現在の値を取得する.
Return:
現在値.
"""
return self.episode_values[self.index_in_episode, 3].item()
def order(self, position_type: int, action: int, q_action: int) -> None:
"""注文する.
Args:
position_type: -1 売り、1 買い.
action: ポジション決めた際のアクション.
q_action: ポジション決めた際のQアクション.
Returns:
報酬.
"""
if self.position_type != 0:
raise Exception('Can not order when you already have position.')
self.position_type = position_type
self.position_action = action
self.position_q_action = q_action
self.position_index_in_episode = self.index_in_episode
self.position_start_value = self.get_value() + position_type * self.spread
def calc_positional_reward(self) -> float:
"""現在のポジションと現在値から損益を計算する.
"""
return (self.get_value() - self.position_start_value) * self.position_type if self.position_type != 0 else 0
def calc_reward(self, settle: bool = True) -> typing.Tuple[float, int, int, int]:
"""現在の報酬値を取得.
Return:
(報酬, ポジションを決めた際のアクション, ポジションを決めた際のQアクション, ポジションを決めた際のエピソード内でのインデックス) のタプル.
"""
return (self.calc_positional_reward()
if settle else 0), self.position_action, self.position_q_action, self.position_index_in_episode
def settle(self) -> typing.Tuple[float, int, int, int]:
"""決済する.
Returns:
(報酬, ポジションを決めた際のアクション, ポジションを決めた際のQアクション, ポジションを決めた際のエピソード内でのインデックス) のタプル.
"""
reward = self.calc_reward()
if self.position_type == 0:
return reward
self.position_type = 0
self.position_action = 0
self.position_q_action = 0
self.position_index_in_episode = -1
self.position_start_value = 0
return reward
def is_action_ignored(self, action: int) -> bool:
"""step メソッドにアクションを指定しても無視されるかどうか調べる."""
return action == 1 and 0 < self.position_type or action == 2 and self.position_type < 0 or action == 3 and self.position_type == 0
def step(self, action: int,
q_actino: int) -> typing.Tuple[np.ndarray, typing.Tuple[float, int, int, int], bool, object]:
"""指定のアクションを行い次の状態を得る.
Args:
action: アクション、0 何もしない、1 買う、2 売る、3 決済.
action: Qアクション、ポジションと共に情報記録するために使用.
Returns:
(状態, 報酬, エピソード終了かどうか, その他情報) のタプル.
"""
terminal = self.episode_values.shape[0] - 1 <= self.index_in_episode
buy = action == 1 and self.position_type != 1
sell = action == 2 and self.position_type != -1
exit = (action == 3 or terminal) and self.position_type != 0
losscut = self.position_type != 0 and self.calc_positional_reward() < -self.loss_cut
reward = self.settle() if buy or sell or exit or losscut else self.calc_reward(False)
if buy:
# 買い
self.order(1, action, q_actino)
elif sell:
# 売り
self.order(-1, action, q_actino)
# 次の分足が窓に入る様に進める
if not terminal:
self.index_in_episode += 1
self.draw_img()
return self.img, reward, terminal, None
# def render(self):
# """現在の状態をグラフ表示する.
# """
# if self.fig is None:
# self.fig = plt.figure()
# self.ax_img = self.fig.add_subplot(2, 3, 1)
# self.ax = self.fig.add_subplot(2, 3, 2 + i)
# df = records_to_dataframe(self.episode_values.framewise_records[f])
# self.axs[f].cla()
# df.plot(ax=self.axs[f])
# self.ax_img.cla()
# self.ax_img.imshow(self.img.sum(axis=0))
# plt.pause(0.001)
| null |
cpf/python/training/auto_trade/trade_environment.py
|
trade_environment.py
|
py
| 14,583 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.lib.stride_tricks.as_strided",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.convolve",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "numpy.rint",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "cv2.polylines",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.rint",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "cv2.polylines",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 360,
"usage_type": "attribute"
}
] |
621293821
|
# importing dependencies
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import cv2
from keras.models import model_from_json
from keras.preprocessing import image
import base64
model = model_from_json(open("fer.json", "r").read())
model.load_weights('fer.h5')
face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
import numpy as np
# convertiing the byte 64 data to image
def data_uri_to_cv2_img(uri):
encoded_data = uri.split(',')[1]
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
# preprocess the image
# identifies the emotion of that particular image with the help of fer.h5
def capturePredict(data):
data='data:image/jpeg;base64,'+data
img = data_uri_to_cv2_img(data)
cv2.imwrite('color_img.jpg', img)
img = cv2.imread("color_img.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces_detected = face_haar_cascade.detectMultiScale(img, 1.32, 5)
preditemotion="NO VALUE"
for (x, y, w, h) in faces_detected:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image
roi_gray = cv2.resize(roi_gray, (48, 48))
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
predictions = model.predict(img_pixels)
# find max indexed array3
max_index = np.argmax(predictions[0])
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
predicted_emotion = emotions[max_index]
print(predicted_emotion)
preditemotion=predicted_emotion
cv2.putText(img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# returns predicted emotion
return preditemotion
| null |
cam.py
|
cam.py
|
py
| 1,962 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "keras.models.model_from_json",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.data",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.frombuffer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "numpy.expand_dims",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 50,
"usage_type": "attribute"
}
] |
161769567
|
# -*- coding:utf-8 -*-
'''
@author: misldy
'''
import unittest
from appium import webdriver
platformName = "Android"
deviceName = "K120180523001"
platformVersion = "7.1.2"
appPackage = "com.sunmi.ota"
appActivity = "com.sunmi.ota.ui.activity.UpgradeActivity"
class LoginAndroidDriver(unittest.TestCase):
def setUp(self):
#初始化测试平台
caps = {}
caps["platformName"] = platformName
caps["deviceName"] = deviceName
caps["platformVersion"] = platformVersion
caps["appPackage"] = appPackage
caps["appActivity"] = appActivity
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
def tearDown(self):
self.driver.quit()
| null |
src/lAppium/test/case3.py
|
case3.py
|
py
| 747 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "appium.webdriver.Remote",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 23,
"usage_type": "name"
}
] |
220538042
|
"""
byceps.blueprints.admin.seating.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, request
from ....services.party import service as party_service
from ....services.seating import (
area_service as seating_area_service,
seat_group_service,
seat_service,
)
from ....services.ticketing import (
category_service as ticketing_category_service,
)
from ....util.authorization import register_permission_enum
from ....util.framework.blueprint import create_blueprint
from ....util.framework.templating import templated
from ....util.views import permission_required
from .authorization import SeatingPermission
blueprint = create_blueprint('seating_admin', __name__)
register_permission_enum(SeatingPermission)
@blueprint.route('/<party_id>')
@permission_required(SeatingPermission.view)
@templated
def index_for_party(party_id):
"""List seating areas for that party."""
party = _get_party_or_404(party_id)
seat_count = seat_service.count_seats_for_party(party.id)
area_count = seating_area_service.count_areas_for_party(party.id)
category_count = ticketing_category_service.count_categories_for_party(
party.id
)
group_count = seat_group_service.count_seat_groups_for_party(party.id)
return {
'party': party,
'seat_count': seat_count,
'area_count': area_count,
'category_count': category_count,
'group_count': group_count,
}
@blueprint.route('/parties/<party_id>/areas', defaults={'page': 1})
@blueprint.route('/parties/<party_id>/areas/pages/<int:page>')
@permission_required(SeatingPermission.view)
@templated
def area_index(party_id, page):
"""List seating areas for that party."""
party = _get_party_or_404(party_id)
per_page = request.args.get('per_page', type=int, default=15)
areas_with_occupied_seat_counts = (
seating_area_service.get_areas_for_party_paginated(
party.id, page, per_page
)
)
seat_total_per_area = seat_service.get_seat_total_per_area(party.id)
return {
'party': party,
'areas_with_occupied_seat_counts': areas_with_occupied_seat_counts,
'seat_total_per_area': seat_total_per_area,
}
@blueprint.route('/parties/<party_id>/seat_groups')
@permission_required(SeatingPermission.view)
@templated
def seat_group_index(party_id):
"""List seat groups for that party."""
party = _get_party_or_404(party_id)
groups = seat_group_service.get_all_seat_groups_for_party(party.id)
return {
'party': party,
'groups': groups,
}
def _get_party_or_404(party_id):
party = party_service.find_party(party_id)
if party is None:
abort(404)
return party
| null |
byceps/blueprints/admin/seating/views.py
|
views.py
|
py
| 2,838 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "util.framework.blueprint.create_blueprint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "util.authorization.register_permission_enum",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "authorization.SeatingPermission",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "services.seating.seat_service.count_seats_for_party",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "services.seating.seat_service",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "services.seating.area_service.count_areas_for_party",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "services.seating.area_service",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "services.ticketing.category_service.count_categories_for_party",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "services.ticketing.category_service",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "services.seating.seat_group_service.count_seat_groups_for_party",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "services.seating.seat_group_service",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "util.views.permission_required",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "authorization.SeatingPermission.view",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "authorization.SeatingPermission",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "util.framework.templating.templated",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "services.seating.area_service.get_areas_for_party_paginated",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "services.seating.area_service",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "services.seating.seat_service.get_seat_total_per_area",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "services.seating.seat_service",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "util.views.permission_required",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "authorization.SeatingPermission.view",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "authorization.SeatingPermission",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "util.framework.templating.templated",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "services.seating.seat_group_service.get_all_seat_groups_for_party",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "services.seating.seat_group_service",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "util.views.permission_required",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "authorization.SeatingPermission.view",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "authorization.SeatingPermission",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "util.framework.templating.templated",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "services.party.service.find_party",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "services.party.service",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 100,
"usage_type": "call"
}
] |
626362340
|
import numpy as np
import pylab
from multiprocessing import Pool, cpu_count
def RungeKutta(func, init, a, b, h=0.1):
X = []
T = np.arange(a, b, h)
x = init
for t in T:
X.append(x)
k1 = h * func(x, t)
k2 = h * func(x + k1 / 2, t + h / 2)
k3 = h * func(x + k2 / 2, t + h / 2)
k4 = h * func(x + k3, t + h)
x += (1 / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return T, X
def RungeKutta2(f1, f2, x_init, y_init, a, b, h=0.1):
X = []
Y = []
T = np.arange(a, b, h)
x = x_init
y = y_init
for t in T:
if(x > np.pi):
x -= 2 * np.pi
if(x < -np.pi):
x += 2 * np.pi
if (2*t/3) % (2*np.pi) <= h and t > 100:
X.append(x)
Y.append(y)
k1 = h * f1(x, y, t)
l1 = h * f2(x, y, t)
k2 = h * f1(x + k1 / 2, y + l1 / 2, t + h / 2)
l2 = h * f2(x + k1 / 2, y + l1 / 2, t + h / 2)
k3 = h * f1(x + k2 / 2, y + l2 / 2, t + h / 2)
l3 = h * f2(x + k2 / 2, y + l2 / 2, t + h / 2)
k4 = h * f1(x + k3, y + l3, t + h)
l4 = h * f2(x + k3, y + l3, t + h)
x += (1 / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
y += (1 / 6) * (l1 + 2 * l2 + 2 * l3 + l4)
return T, X, Y
def helper(fD):
g = 9.8
l = 9.8
q = 0.5
t_max = 200
OmegaD=2/3
f1 = lambda theta,omega,t: omega
f2 = lambda theta,omega,t: -(g/l)*np.sin(theta)-q*omega+fD*np.sin(OmegaD*t)
T, Theta, Omega = RungeKutta2(f1, f2, 0.2, 0, 0, t_max, 0.001)
return [(fD, x) for x in Theta]
def main():
FD = np.linspace(1.35, 1.6, 50)
pool = Pool(4)
pts = pool.map(helper, FD)
pts = [j for i in pts for j in i]
pylab.plot(*zip(*pts), 'k.')
pylab.show()
if __name__ == "__main__":
main()
| null |
Fall 2018/Comp/hw.5/p2.py
|
p2.py
|
py
| 1,796 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.arange",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pylab.plot",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pylab.show",
"line_number": 65,
"usage_type": "call"
}
] |
528180607
|
'''
For example, if the first wire's path is R8,U5,L5,D3, then starting from the central port (o), it goes right 8, up 5, left 5, and finally down 3:
[R8,U5,L5,D3]
...........
...........
...........
....+----+.
....|....|.
....|....|.
....|....|.
.........|.
.o-------+.
...........
[U7,R6,D4,L4]
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
These wires cross at two locations (marked X), but the lower-left one is closer to the central port: its distance is 3 + 3 = 6.
'''
from typing import List, Tuple, Dict
dX = {'R': 1, 'L': -1, 'U': 0, 'D': 0}
dY = {'R': 0, 'L': 0, 'U': 1, 'D': -1}
def crossedWires(A: List[str], B: List[str]) -> int:
# transform Lists,
aP = transform2Points(A)
bP = transform2Points(B)
# combine points
poi = set(aP.keys()) & set(bP.keys())
# get shortest distance
return min([aP[p] + bP[p] for p in poi])
def transform2Points(A: List[str]):
res = {}
x = 0
y = 0
length = 0
for instr in A:
direction = instr[0]
distance = int(instr[1:])
assert direction in ['R', 'L', 'U', 'D']
for _ in range(distance):
x += dX[direction]
y += dY[direction]
length += 1
if (x, y) not in res:
res[(x, y)] = length
return res
def matchingPoints(A: List[Tuple[int]], B: List[Tuple[int]]) -> List[Tuple[int]]:
res = []
for p1 in A:
if B.count(p1) > 0:
res.append(p1)
return res
def mannhattenDistance(A: Tuple[int]) -> int:
return abs(A[0]) + abs(A[1])
one = {
'A': ['R8', 'U5', 'L5', 'D3'],
'B': ['U7', 'R6', 'D4', 'L4'],
'MD': 6
}
assert crossedWires(one['A'], one['B']) == one['MD']
two = {
'A': ['R75', 'D30', 'R83', 'U83', 'L12', 'D49', 'R71', 'U7', 'L72'],
'B': ['U62', 'R66', 'U55', 'R34', 'D71', 'R55', 'D58', 'R83'],
'MD': 159
}
assert crossedWires(two['A'], two['B']) == two['MD']
three = {
'A': ['R98', 'U47', 'R26', 'D63', 'R33', 'U87', 'L62', 'D20', 'R33', 'U53', 'R51'],
'B': ['U98', 'R91', 'D20', 'R16', 'D67', 'R40', 'U7', 'R15', 'U6', 'R7'],
'MD': 135
}
assert crossedWires(three['A'], three['B']) == three['MD']
A, B = open('d03-in.txt').read().split('\n')
A, B = [x.split(',') for x in [A, B]]
res = crossedWires(A, B)
print(res)
| null |
d03p2.py
|
d03p2.py
|
py
| 2,366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 72,
"usage_type": "name"
}
] |
412282511
|
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from Web_iselenium.test.base_page import BasePage
class AddMemberPage(BasePage): # 继承前没有括号
# 这里 的 init 也放到 BasePage, 也删掉, 同样继承 BasePage
# def __init__(self, driver:WebDriver):
# self.driver = driver
###
def add_member(self, username, account, phonenum):
'''
添加联系人,详细信息
:return:
'''
# 输入 用户名
# self.driver.find_element(By.ID, "username").send_keys(username)
# 注意这里 不能 在 "username" 外面 加 双引号,否则变成了 实际参数,而不是 形式参数,类似变量
# # 输入 账号
# self.driver.find_element(By.ID, "memberAdd_acctid").send_keys(account)
# # 输入 手机号
# self.driver.find_element(By.ID, "memberAdd_phone").send_keys(phonenum)
# # 点击 保存, 当页面上 有相同属性的元素有 多个时, 通过 find_elements 找到的 是第一个元素
# self.driver.find_element(By.CSS_SELECTOR, ".js_btn_save").click() # 还是遗漏了 点 '.'
# 改造 find 方法, 已封装, 上面的注释掉
self.find(By.ID, "username").send_keys(username)
# 输入 账号
self.find(By.ID, "memberAdd_acctid").send_keys(account)
# 输入 手机号
self.find(By.ID, "memberAdd_phone").send_keys(phonenum)
# 点击 保存, 当页面上 有相同属性的元素有 多个时, 通过 find_elements 找到的 是第一个元素
self.find(By.CSS_SELECTOR, ".js_btn_save").click() # 还是遗漏了 点 '.'
return True
# 验证保存是否成功的
def get_member(self):
'''
获取所有的联系人姓名
:return:
'''
# 添加显示等待 是 保证它进入了 通讯录这个页面,然后才去做下一步 的click 或 send_keys 动作(因为有时页面加载,渲染是需要时间的)
# 这里 expected_conditons 不知道 传入什么参数 可以点开 查看下源码,同时这里没有添加 括号之前 对 clickable后面的 括号的内容高亮错误显示
# 最终从源码知道 是传入的是元组,需要加一个括号 ,源码---expected_conditions---find_elements--- 源码怎么查看----> 有道笔记
# 10 是 超时时间timeout , (By.CSS_SELECTOR, ".member_colRight_memberTable_th_Checkbox")----这是 源码 中 locator, clickable后面的
# 显示等待已封装, 下面进行改造
locator = (By.CSS_SELECTOR, ".member_colRight_memberTable_th_Checkbox")
self.wait_for_click(10, locator)
WebDriverWait(self.driver,10).until(expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, ".member_colRight_memberTable_th_Checkbox")))
# 上面WebDriverWait的显示等待里面的复选框(并没有点击)是为了验证 是否正确进入了“通讯录“这个页面,即:找到了复选框就证明了正确进入了 需要的 通讯录 页面,也可以找其他元素,比如姓名,来验证
# 改造, 这里是 finds 方法,别写错 find
# find_elements 查找 页面上相同属性的所有元素。[element1, element2....]
# eles_list = self.driver.find_elements(By.CSS_SELECTOR, ".member_colRight_memberTable_td:nth-child(2)") # 理解已截图,所以这一步得到是一个兄弟节点集合,也就是姓名列表,所以需要从列表中遍历并追加条件如 title选出自己要的姓名
eles_list = self.finds(By.CSS_SELECTOR, ".member_colRight_memberTable_td:nth-child(2)")
print(eles_list)
# 用 eles_list去接收 上面搜索定位到的 节点集合
names = [] # 将下面遍历 eles_list 拿到的元素保存在names这里, 将集合的所有names 组成一个列表(----通过 遍历的 ele.追加属性拿到的就是一个列表)
for ele in eles_list: # 遍历集合里面的一个个 元素 (要的names)
names.append(ele.get_attribute("title")) # 追加属性值为title,title 是谷歌里面定位的元素class-id里面有对应的用户名就是 title,每个 遍历到的 元素需要title 符合自己 上面传入的其中一个参数,如 aaa_01
return names # 最后返回的是 names ,测试用例 那里 需要判断的是 用户名 如 aaa_01 是否在names里面
# 跳转 到 这个 列表, 定义方法 找到列表中的一个 元素, ---姓名 ( aaa_01 )
# 下一步需要 修改下上面的 add_member 里面的姓名 ,不写死,用 传参的方式 ,写成 变量
# 注意 这里 ------踩过的坑: return names 小心 不要 放到 for 循环下,不然返回的 names 就 只是 遍历 ele_list 中 的第一个 元素 aaa_01 ,
# 而不是 遍历所有元素 拿到的 姓名 集合
| null |
test/add_member.py
|
add_member.py
|
py
| 5,164 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Web_iselenium.test.base_page.BasePage",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 65,
"usage_type": "name"
}
] |
383564759
|
import torch.optim
from torch.utils.data import DataLoader
from dataset.flickrDataset import Flickr8kDataset
from utils import *
import torch.nn.functional as F
from tqdm import tqdm
from nlgeval import NLGEval
import pickle
import pandas as pd
import numpy as np
import time
# Parameters
caption_file = 'Flickr8k.arabic.full.tsv'
images_features_file = 'flickr8k_bottomUp_features.tsv'
embeddings_file = 'full_grams_cbow_300_twitter.mdl'
data_name = 'Arabic_flickr8k_3_cap_per_img'
checkpoint_file = None #"checkpoint_Arabic_flickr8k_3_cap_per_img.pth.tar" # model checkpoint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors
# cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# Read word map
with open('dataset/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
word_map = tokenizer.word_index
index2word = {v :k for k ,v in word_map.items()}
vocab_size = len(word_map.keys())
# Read features
features = pd.read_csv(images_features_file, sep='\t')
features = features.to_numpy()
print("done downloading")
# Load model
# torch.nn.Module.dump_patches = True #line added
checkpoint = torch.load(checkpoint_file, map_location=device)
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.eval()
nlgeval = NLGEval() # loads the evaluator
batch_size = 1
workers = 1 # for data-loading; right now, only 1 works with h5py
def evaluate(beam_size):
"""
Evaluation
:param beam_size: beam size at which to generate captions for evaluation
:return: Official MSCOCO evaluator scores - bleu4, cider, rouge, meteor
"""
# DataLoader
Test_loader = DataLoader(Flickr8kDataset(imgs=features, split='TEST'),
batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
# Lists to store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
references = list()
hypotheses = list()
indexes = list()
# For each image
for i, (imgs, caps, caplens, allcaps, index) in enumerate(
tqdm(Test_loader, desc="EVALUATING AT BEAM SIZE " + str(beam_size))):
k = beam_size
# Move to GPU device, if available
imgs = imgs.to(device) # (1, 3, 256, 256)
imgs_mean = imgs.mean(1)
imgs_mean = imgs_mean.expand(k ,2048)
# compute mean here instead of normalize before
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[word_map['<START>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Lists to store completed sequences and scores
complete_seqs = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h1, c1 = decoder.init_hidden_state(k) # (batch_size, decoder_dim)
h2, c2 = decoder.init_hidden_state(k)
# two LSTM so two decoder
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = decoder.embedding(k_prev_words).squeeze(1) # (s, embed_dim)
h1 ,c1 = decoder.top_down_attention(
torch.cat([h2 ,imgs_mean ,embeddings], dim=1),
(h1 ,c1)) # (batch_size_t, decoder_dim)
at1 = decoder.att1(imgs)
at2 = decoder.att2(h1)
at3 = decoder.att3(decoder.tanh(at1 + at2.unsqueeze(1))).squeeze(2) # (batch_size, 36)
alpha= decoder.att4(at3)
attention_weighted_encoding = (imgs * alpha.unsqueeze(2)).sum(dim=1)
h2 ,c2 = decoder.language_model(
torch.cat([attention_weighted_encoding ,h1], dim=1) ,(h2 ,c2))
scores = F.log_softmax(decoder.word(h2), dim=1) # (s, vocab_size)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
prev_word_inds = torch.LongTensor(prev_word_inds.to("cpu")).to(device)
next_word_inds = torch.LongTensor(next_word_inds.to("cpu")).to(device)
# Add new words to sequences
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<END>']]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
h1 = h1[prev_word_inds[incomplete_inds]]
c1 = c1[prev_word_inds[incomplete_inds]]
h2 = h2[prev_word_inds[incomplete_inds]]
c2 = c2[prev_word_inds[incomplete_inds]]
imgs_mean = imgs_mean[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 50:
break
step += 1
if len(complete_seqs_scores) > 0:
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
# References
img_caps = allcaps[0].tolist()
img_captions = list(
map(lambda c: [index2word[w] for w in c if w not in {word_map['<START>'], word_map['<END>'], word_map['<PAD>']}],
img_caps)) # remove <start> and pads
img_caps = [' '.join(c) for c in img_captions]
# print(img_caps)
references.append(img_caps)
# Hypotheses
hypothesis = \
([index2word[w] for w in seq if w not in {word_map['<START>'], word_map['<END>'], word_map['<PAD>']}])
hypothesis = ' '.join(hypothesis)
# print(hypothesis)
hypotheses.append(hypothesis)
assert len(references) == len(hypotheses)
# store images indexes
for ind in index:
indexes.append(ind)
# creat resutls.csv
df = pd.read_csv("Flickr8k_text/test.csv", index_col=[0])
test_numpy = df.to_numpy()
id_list = list()
for index in indexes:
id_list.append(test_numpy[index, 0])
results = [id_list] + [hypotheses] + [references]
df = pd.DataFrame(np.array(results).T, columns=["id", "hypotheses", "reference"])
df.to_csv("results.csv")
# Calculate scores
metrics_dict = nlgeval.compute_metrics(references, hypotheses)
return metrics_dict
if __name__ == '__main__':
start = time.time()
beam_size = 5
metrics_dict = evaluate(beam_size)
end = time.time()
print("metrics_dict", metrics_dict)
print("time: ", end - start)
| null |
up_down_model/eval.py
|
eval.py
|
py
| 8,231 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.optim.device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.optim.cuda.is_available",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.optim.cuda",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.optim.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "nlgeval.NLGEval",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "dataset.flickrDataset.Flickr8kDataset",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.optim.LongTensor",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.optim.zeros",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.optim.cat",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.optim.cat",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.optim.LongTensor",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "torch.optim.LongTensor",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.optim.cat",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "nlgeval.compute_metrics",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 205,
"usage_type": "call"
}
] |
421625845
|
"""MyAwesomeCart URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from shop import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index),
path('welcome/',views.welcome,name="welcome"),
path('about/',views.about,name="About us"),
path('contact/',views.contact ,name="Contact"),
path('tracker/',views.tracker ,name="Tracker"),
path('search/',views.search ,name="Search"),
path('productview/',views.productview ,name="Productview"),
path('checkout/',views.checkout ,name="Checkout"),
path('cart/',views.addtocart,name="cart"),
path('viewall/',views.viewall,name="viewall"),
path('delete/',views.delete,name="delete"),
path('bay/',views.bay,name="bay"),
path('save/',views.save,name="save"),
path('gul/',views.gul,name="gul")
]
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| null |
MyAwesomeCart/MyAwesomeCart/urls.py
|
urls.py
|
py
| 1,657 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "shop.views.index",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "shop.views.welcome",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "shop.views.about",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "shop.views.contact",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "shop.views.tracker",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "shop.views.search",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "shop.views.productview",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "shop.views.checkout",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "shop.views.addtocart",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "shop.views.viewall",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "shop.views.delete",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "shop.views.bay",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "shop.views.save",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "shop.views.gul",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "shop.views",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 43,
"usage_type": "attribute"
}
] |
423778160
|
"""
process for cancellation
if the button down is in the queue or l/r is in the queue, do not send again.
wait for a clearing signal, then send again.
opposite of mnD is mnU
opposite of mnL or mnR is mnC
if mnD is in SENT, do not send again until sending mnU
if mnL is in SENT, do not send again until sending mnC
if mnR is in SENT, do not send again until sending mnC
"""
import queue
import inputs
from threading import Thread, Lock
import events
import socket, select
import sys
import time
mutex = Lock()
q = queue.Queue()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(20)
sent = set()
def shouldBeSent(s):
if s[2] in ["R", "L"]:
if s in sent:
return False
else:
sent.add(s)
s_inverse = s[0:2] + "C"
sent.discard(s_inverse)
return True
elif s[2] in ["u", "d"]:
if s in sent:
return False
else:
sent.add(s)
s_inverse = s[0:2] + "C"
sent.discard(s_inverse)
return True
elif s[2] in ["U", "D"]:
return True
elif s[2] in ["C"]:
if s in sent:
return False
else:
sent.add(s)
s_inverse1 = s[0:2] + "u"
s_inverse2 = s[0:2] + "d"
s_inverse3 = s[0:2] + "R"
s_inverse4 = s[0:2] + "L"
sent.discard(s_inverse1)
sent.discard(s_inverse2)
sent.discard(s_inverse3)
sent.discard(s_inverse4)
return True
else:
print("ERROR! Unexpected key to press! : " + s )
return False;
def send_data(device_id, device):
for elem in device:
for event in elem:
key = (device_id, event.code, event.state)
if key in events.EVENTS.keys():
strToSend = events.EVENTS[key]
if not shouldBeSent(strToSend):
continue
while 1:
if not mutex.locked():
mutex.acquire()
break;
else:
time.sleep(0.005)
try:
q.put(strToSend.encode('utf-8'))
except Exception as e:
print(str(e))
print("unable to send" + events.EVENTS[key])
finally:
mutex.release()
time.sleep(0.01)
def process_queue():
while 1:
if not q.empty():
toSend = q.get()
try:
print(toSend)
sent = s.send(toSend)
print("{}, {}".format(sent,len(toSend)))
except Exception as e:
print(str(e))
print("unable to send" + toSend.decode('utf-8'))
if __name__ == "__main__":
if(len(sys.argv) < 3) :
print('Usage : python controllers.py hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
# connect to remote host
try :
s.connect((host, port))
except :
print('Unable to connect')
sys.exit()
print('Connected to remote host. Start sending messages')
t = Thread( target = process_queue, args = () )
t.start()
for device_id, device in enumerate(inputs.devices.gamepads):
t = Thread(target = send_data, args = ( device_id, device ) )
t.start()
| null |
controllers.py
|
controllers.py
|
py
| 3,465 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "threading.Lock",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "events.EVENTS.keys",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "events.EVENTS",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "events.EVENTS",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "events.EVENTS",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "inputs.devices",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 128,
"usage_type": "call"
}
] |
459547368
|
import unittest
import apsw
from testutils import getZserioApi
class InstantiateTypeAsSqlTableFieldTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").instantiate_type_as_sql_table_field
def testReadWrite(self):
connection = apsw.Connection(
self.SQLITE3_MEM_DB,
apsw.SQLITE_OPEN_URI | apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE
)
test32Table = self.api.Test32Table(connection, "test32Table")
test32Table.create_table()
rows = [(13, self.api.Test32(42))]
test32Table.write(rows)
readIterator = test32Table.read()
readRows = []
for row in readIterator:
readRows.append(row)
self.assertEqual(rows, readRows)
SQLITE3_MEM_DB = ":memory:"
| null |
test/language/templates/python/InstantiateTypeAsSqlTableFieldTest.py
|
InstantiateTypeAsSqlTableFieldTest.py
|
py
| 849 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "testutils.getZserioApi",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apsw.Connection",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "apsw.SQLITE_OPEN_URI",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "apsw.SQLITE_OPEN_READWRITE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "apsw.SQLITE_OPEN_CREATE",
"line_number": 14,
"usage_type": "attribute"
}
] |
50046369
|
import sys
import json
import joblib
import datetime
import subprocess
import pandas as pd
from flask_pymongo import PyMongo, ObjectId
from flask_bootstrap import Bootstrap
from flask import Flask, render_template, url_for, request, redirect, jsonify
pd.set_option('max_columns', 1000)
pd.set_option('max_info_columns', 1000)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 30000)
pd.set_option('max_colwidth', 4000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
app = Flask(__name__)
app.secret_key = 'mysecret'
bootstrap = Bootstrap(app)
db_papers = PyMongo(app, uri="mongodb://localhost:27017/papers")
@app.route('/summary', methods=['POST', 'GET'])
def summary():
if request.method == 'POST':
form = request.form
summary_id = joblib.load('static/summary_id') + 1
data_sources = data_object_create(form, summary_id)
summary = summary_json(form, data_sources, summary_id)
print('Summary: {}'.format(summary))
print('Data Sources: {}'.format(data_sources))
ret_summary = db_papers.\
db['summary'].\
insert_one(summary)
print('Summary document object: \n\t{}'.format(ret_summary.inserted_id))
data_lst = [source for source in data_sources]
if not ((len(data_lst) == 1) and (data_lst[0]['data_source'] == 'None')):
ret_data = db_papers.\
db['data'].\
insert_many([source for source in data_sources])
print('Data document objects: \n\t{}'.format(ret_data.inserted_ids))
joblib.dump(summary_id, 'static/summary_id')
return redirect(url_for('summary'))
return render_template('summary.html')
@app.route('/datasets', methods=['POST', 'GET'])
def datasets():
if request.method == 'POST':
form = request.form
data_sources = data_object_create(form)
print('Data Sources: {}'.format(data_sources))
ret_data = db_papers.\
db['data'].\
insert_many(data_sources)
print('Data document objects: \n\t{}'.format(ret_data.inserted_ids))
return redirect(url_for('datasets'))
return render_template('datasets.html')
@app.route('/database_viewer', methods=['POST', 'GET'])
def database_viewer():
return render_template('data_viewer.html', page_name='Table Viewer')
@app.route('/document_view', methods=['POST'])
def document_view():
form = request.form
document = document_json(form)
if form['collection'] == 'Data':
return render_template('data_document_view.html', doc=document, page_name='Document View')
else:
return render_template('summary_document_view.html', doc=document, page_name='Document View')
@app.route('/quit')
def quit():
sys.exit(4)
return ''
@app.route('/query_db', methods=['POST', 'GET'])
def query_db():
js = json.loads(request.data.decode('utf-8'))
collection = js['collection'].lower()
docs = []
docs += list(db_papers.db[collection].find({}))
payload = []
for doc in docs:
document_dict = {}
for k, v in doc.items():
if k == '_id':
document_dict[str(k)] = str(v)
else:
document_dict[str(k)] = v
payload.append(document_dict)
return jsonify(items=payload)
@app.route('/update_db', methods=['POST'])
def update_db():
js = json.loads(request.data.decode('utf-8'))
collection = js['collection'].lower()
id = js['_id']
covars_changed = js['covars_changed']
summary_id = int(float(js['summary_id']))
js.pop('covars_changed', None)
js['summary_id'] = summary_id
if not covars_changed:
return jsonify(js)
js_to_insert = js.copy()
js_to_insert.pop('collection', None)
js_to_insert.pop('_id', None)
query = {'_id': ObjectId(id)}
insert = {'$set': js_to_insert}
ret_data = db_papers. \
db[collection]. \
update_one(query, insert)
if (collection == 'data'):
query_summary = {'summary_id': summary_id}
new_lst = []
for source in list(db_papers.db['summary'].find(query_summary))[0]['data']:
if source['data_source'] == js['data_source']:
for covar in covars_changed:
source[covar] = js[covar]
new_lst.append(source)
insert_summary = {'$set': {'data': new_lst}}
db_papers.db['summary'].update_one(query_summary, insert_summary)
elif (collection == 'summary') and ('data' in covars_changed):
for source in js['data']:
query_data = {'summary_id': summary_id, 'data_source': source['data_source']}
source['summary_id'] = summary_id
insert_data = {'$set': source}
db_papers.db['data'].update_one(query_data, insert_data)
print('Data document objects: \n\t{}'.format(ret_data))
return jsonify(js)
def summary_json(form, data_sources, summary_id):
data = {
"citation": form['Citation'],
"date_added": str(datetime.datetime.today()),
"objective": form['Objective'],
"t_model": form['Theoretical Model'],
"e_model": form['Empirical Approach'],
"data": data_sources,
"conclusions": form['Conclusions'],
"summary_id": summary_id
}
return data
def document_json(form):
data = {}
for key in form:
if key == 'data':
data_lst = []
for data_string in form['data'][1:-1].split('},{'):
if data_string[0] != '{':
data_string = '{' + data_string
if data_string[-1] != '}':
data_string = data_string + '}'
data_lst.append(json.loads(data_string))
data[key] = data_lst
else:
data[key] = form[key]
return data
def data_object_create(form, summary_id):
num_data_sources = int((len(list(form.keys())) - 5) / 3)
return [{"data_source": form['Data_source' + str(i)], "outcomes": form['Outcomes' + str(i)], "covariates": form['Covariates' + str(i)], "summary_id": summary_id} for i in range(num_data_sources)]
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
| null |
command.py
|
command.py
|
py
| 6,251 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.set_option",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask_bootstrap.Bootstrap",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "joblib.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "joblib.dump",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.data.decode",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.request.data.decode",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.ObjectId",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 180,
"usage_type": "call"
}
] |
281244648
|
#! /usr/bin/env python
# coding: utf-8
import math
import cv2
import logging
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import glob
from sklearn.cluster import MeanShift
logger = logging.getLogger(__name__)
def clustering(dens_map, band_width, thresh=0):
"""
clustering density map
"""
# search high value cordinates
while True:
# point[0]: y point[1]: x
point = np.where(dens_map > thresh)
# X[:, 0]: x X[:,1]: y
X = np.vstack((point[1], point[0])).T
if X.shape[0] > 0:
break
else:
if thresh > 0:
thresh -= 0.05
else:
return np.zeros((0, 2))
# MeanShift clustering
logger.debug("START: clustering")
ms = MeanShift(bandwidth=band_width, seeds=X)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters = len(labels_unique)
centroid_arr = np.zeros((n_clusters, 2))
for k in range(n_clusters):
centroid_arr[k] = cluster_centers[k]
logger.debug("DONE: clustering")
return centroid_arr.astype(np.int32)
def batch_clustering(args):
"""
clustering mutiple files
"""
file_lst = glob.glob(args.dens_map_path)
for i, file_path in enumerate(file_lst):
logger.debug("current data: {0} / {1}".format(i + 1, len(file_lst)))
est_dens_map = np.load(file_path)
centroid_arr = clustering(est_dens_map, args.band_width, args.thresh)
file_num = file_path.split("/")[-1][:-4]
np.savetxt("{0}/{1}.csv".format(args.out_clustering_dirc, file_num),
centroid_arr, fmt="%i", delimiter=",")
def plot_prediction_box(img, centroid_arr,hour, minute, out_pred_box_dirc,box_size=12):
"""
draw square box of predict point
"""
# get cordinates of vertex(lert top and right bottom)
def get_rect_vertex(x, y, box_size):
vertex = np.zeros((2, 2), dtype=np.uint16)
shift = int(box_size/2)
# left top corner
vertex[0][0] = x - shift
vertex[0][1] = y - shift
# right bottom corner
vertex[1][0] = x + shift
vertex[1][1] = y + shift
return vertex
logger.debug("Number of cluster: {0}".format(centroid_arr.shape[0]))
for i in range(centroid_arr.shape[0]):
x = int(centroid_arr[i][0])
y = int(centroid_arr[i][1])
img = cv2.circle(img, (x, y), 2, (0, 0, 255), -1, cv2.LINE_AA)
vertex = get_rect_vertex(x, y, box_size)
img = cv2.rectangle(img, (vertex[0][0], vertex[0][1]), (vertex[1][0], vertex[1][1]), (0, 0, 255), 3)
cv2.imwrite("{0}/{1}_{2}.png".format(out_pred_box_dirc, hour, minute), img)
logger.debug("Done({0}:{1}): plot estimation box\n".format(hour, minute))
def make_clustering_parse():
parser = argparse.ArgumentParser(
prog="clustering.py",
usage="clustering pred point",
description="description",
epilog="end",
add_help=True
)
# Data Argment
parser.add_argument("--dens_map_path", type=str,
default="/data/sakka/estimation/20170421/9/dens/*.npy")
parser.add_argument("--out_clustering_dirc", type=str,
default="/data/sakka/estimation/20170421/9/cord")
parser.add_argument("--out_pred_box_dirc", type=str,
default="/data/sakka/image/estBox")
# Parameter Argument
parser.add_argument("--band_width", type=int,
default=25, help="band width of clustering")
parser.add_argument("--thresh", type=float,
default=0.4, help="threshold to be subjected to clustering")
args = parser.parse_args()
return args
if __name__ == "__main__":
logs_path = "/home/sakka/cnn_by_density_map/logs/clustering.log"
logging.basicConfig(filename=logs_path,
level=logging.DEBUG,
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
args = make_clustering_parse()
logger.debug("Running with args: {0}".format(args))
batch_clustering(args)
| null |
src/model/clustering.py
|
clustering.py
|
py
| 4,248 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.MeanShift",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.uint16",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 128,
"usage_type": "attribute"
}
] |
557927723
|
"""
Copyright (c) 2020 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
import logging
import time
from pywinauto.application import Application
from pywinauto.keyboard import send_keys
from core.orbit_e2e import E2ETestCase, wait_for_condition
def wait_for_main_window(application: Application):
wait_for_condition(lambda: application.top_window().class_name() == "OrbitMainWindow", 30)
class ConnectToStadiaInstance(E2ETestCase):
"""
Connect to the first available stadia instance
"""
def _execute(self):
window = self.suite.top_window()
logging.info('Start connecting to gamelet.')
connect_radio = self.find_control('RadioButton', 'ConnectToStadia')
connect_radio.click_input()
# Wait for the first data item in the instance list to exist
# We're not using find_control here because magic lookup enables us to easily wait for the existence of a row
window.InstanceList.click_input()
window.InstanceList.DataItem0.wait('exists', timeout=100)
instance_list = self.find_control('Table', 'InstanceList')
logging.info('Found %s rows in the instance list', instance_list.item_count())
self.expect_true(instance_list.item_count() >= 1, 'Found at least one instance')
window.InstanceList.DataItem0.double_click_input()
logging.info('Connecting to Instance, waiting for the process list...')
# In the new UI, use small waits until the process list is active, and then some more for the
# semi-transparent "loading" Overlay of the tables to disappear
wait_for_condition(lambda: self.find_control('Custom', 'ProcessesFrame').is_enabled() is True, 25)
wait_for_condition(lambda: self.find_control('Table', 'ProcessList').is_active(), 10)
# This is a bit annoying, but since the overlay is invisible when loading is done, we need to check for
# absence of the overlay... not sure if there is a better way
wait_for_condition(lambda: self.find_control('Group', 'ProcessListOverlay', raise_on_failure=False) is None)
logging.info('Process list ready')
class FilterAndSelectFirstProcess(E2ETestCase):
"""
Select the first process in the process list and verify there is at least one entry in the list
"""
def _execute(self, process_filter):
# Finding FilterProcesses/ProcessList occationally throws from within pywinauto. This is not
# understood. The while loop with the try/except block is a workaround for that.
while (True):
try:
filter_edit = self.find_control('Edit', 'FilterProcesses')
break
except KeyError:
logging.info('Find FilterProcesses failed. Try again.')
while (True):
try:
process_list = self.find_control('Table', 'ProcessList')
break
except KeyError:
logging.info('Find ProcessList failed. Try again.')
logging.info('Waiting for process list to be populated')
wait_for_condition(lambda: process_list.item_count() > 0, 30)
logging.info('Setting filter text for process list')
if process_filter:
filter_edit.set_focus()
filter_edit.set_edit_text('')
send_keys(process_filter)
# Wait for the process to show up - it may still be starting
wait_for_condition(lambda: process_list.item_count() > 0, 30)
logging.info('Process selected, continuing to main window...')
process_list.children(control_type='DataItem')[0].double_click_input()
wait_for_main_window(self.suite.application)
window = self.suite.top_window(True)
self.expect_eq(window.class_name(), "OrbitMainWindow", 'Main window is visible')
| null |
contrib/automation_tests/test_cases/connection_window.py
|
connection_window.py
|
py
| 3,921 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pywinauto.application.Application",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.E2ETestCase",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.E2ETestCase",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pywinauto.keyboard.send_keys",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "core.orbit_e2e.wait_for_condition",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 83,
"usage_type": "call"
}
] |
517657988
|
from flask import Flask, jsonify, request, make_response, Response, flash
from flask.ext.httpauth import HTTPBasicAuth
from flask_sqlalchemy import SQLAlchemy
from flask import render_template, redirect, url_for
import random, time
from socket import gethostname
from flask.ext.wtf import Form
from wtforms import StringField, TextField, TextAreaField, SubmitField, IntegerField
from wtforms import validators
from functools import wraps
import re, json
from flask.ext.mail import Message, Mail
mail = Mail()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///me5.db'
app.config['SECRET_KEY'] = 'thisishowyouremindme'
app.config["MAIL_SERVER"] = "smtp.gmail.com"
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True
app.config["MAIL_USERNAME"] = '[email protected]'
app.config["MAIL_PASSWORD"] = 'nagar822121'
db = SQLAlchemy(app)
auth = HTTPBasicAuth()
mail.init_app(app)
current_time_in_millis = lambda: int(round(time.time() * 1000))
resume_pdf_link = 'https://drive.google.com/file/d/1Um3xUiaiZT5tgVk3VSEyhWNlhkVJMODu/view?usp=sharing'
def check_auth(username, password):
return username == 'rish' and password == 'kidinjp2'
def authenticate():
return Response('Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
class Music(db.Model):
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
m_name = db.Column(db.String)
m_link = db.Column(db.String)
m_text = db.Column(db.Text)
m_date = db.Column(db.String)
m_weight = db.Column(db.Integer)
def __init__(self, m_name, m_link, m_text, m_date, m_weight):
self.m_name = m_name
self.m_link = m_link
self.m_text = m_text
self.m_date = m_date
self.m_weight = m_weight
class MusicForm(Form):
mf_name = StringField('mf_name', validators=[validators.required()])
mf_link = StringField('mf_link', validators=[validators.required()])
mf_text = TextAreaField('mf_text', validators=[validators.required(),validators.optional()])
mf_weight = IntegerField('mf_weight', validators=[validators.required()])
class ContactForm(Form):
c_name = StringField('c_name', validators=[validators.required()])
c_email = StringField('c_email', validators=[validators.required()])
c_msg = TextAreaField('c_msg', validators=[validators.required()])
@app.route("/")
def root():
return redirect(url_for('home'))
@app.route('/home')
def home():
color = 'blue'
title = "Shailesh Singh"
titleback = "SS"
subtitle = "Coder | Traveler | Athlete | Developer"
#subcontent = "Hi there! Polyglot full-stack developer? That's the aim. Steadily reaching there. I'm pursuing my undergrad degree in CS at DA-IICT, and am in my Junior year. I love keeping myself super busy, making things people will use, running, and playing football. Oh and FIFA too :D"
#subcontent = "Me? 5+ apps on Google Plays, developer, creative thinker, problem solver. Undergrad in CS at DA-IICT- Junior year. I love keeping myself super busy, making things people will use, running, and playing football. FIFA 14, labradors, traveling, meeting new people :D"
subcontent = '<a href = "/aboutme" class="aref">Here\'s what I\'ve done in the past 2 years.</a>'
return render_template('home.html',color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
@app.route('/portfolio')
def portfolio():
projectsFile = app.open_resource('static/projects.json')
projects = json.loads(projectsFile.read())['projects']
color = 'blue'
title = "Portfolio"
titleback = "CV"
subtitle = "A log of my perpetually increasing list of projects."
subcontent = "I could have made a fancy resume here, listing my work-exs, education history, but that's boring and we've got LinkedIn for that. This is a log of projects I've worked on indepenently, with organizations, and in my university."
return render_template('portfolio.html', projects = projects, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent, resume_pdf_link=resume_pdf_link)
@app.route('/code')
def code():
color = 'green'
title = "Code"
titleback = "C"
subtitle = "I love making things. And code allows me to do so in the laziest way possible. Laptop, bed, and some coffee."
subcontent = "Coding has become a major part of my life. Majorly because code just makes life so much easier. Whether it's a mobile app, an arduino based room locker, or a simple shell script to boot your laptop faster. Oh, and partly because this is the only way I see myself making money to fund my bucketlist."
return render_template('code.html', color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
@app.route('/weblog', defaults={'weblogno':None})
@app.route('/weblog/<weblogno>')
def weblog_ind(weblogno):
return redirect("http://bhardwajrish.blogspot.in/");
weblogs = None
if weblogno == None:
#weblogs = Weblog.query.all()
weblogsFile = app.open_resource('static/weblogs.json')
weblogs = json.loads(weblogsFile.read())['weblogs']
elif weblogno == 'random-list':
weblogsFile = app.open_resource('static/weblogs.json')
weblogs = json.loads(weblogsFile.read())['weblogs']
random.shuffle(weblogs, random.random)
elif weblogno == 'favorites':
weblogs = []
weblogsFile = app.open_resource('static/weblogs.json')
weblogs_temp = json.loads(weblogsFile.read())['weblogs']
for w in weblogs_temp :
if w['w_weight'] is 1 :
weblogs.append(w)
if weblogs is not None:
# DISPLAY WEBLOG PAGE WITH SELECTED FILTERS
color = 'dark'
title = "WebLog"
titleback = "W"
subtitle = "A log of random musings, notes and things I find interesting"
subcontent = "Most of my notes are short paragraphs (and not super long blogs that no one reads) on ideas and thoughts that cross my mind, fun observations about people and my surroundings, songs, travel, and sport."
return render_template('weblog.html', weblogs = weblogs, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
else:
# DISPLAY INDIVIDUAL WEBLOG
color = 'green'
title = "WebLog"
titleback = "W"
subtitle = "A log of random musings, notes and things I find interesting"
subcontent = "Most of my notes are short paragraphs (and not super long blogs that no one reads) on ideas and thoughts that cross my mind, fun observations about people and my surroundings, songs, travel, and sport."
#weblog = Weblog.query.filter_by(id = weblogno).first()
weblogsFile = app.open_resource('static/weblogs.json')
weblogs = json.loads(weblogsFile.read())['weblogs']
for w in weblogs:
if w['id'] is int(weblogno):
return render_template('weblog_ind.html', weblog = w, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
return redirect(url_for('page_not_found'))
@app.route('/add/<addwhat>', methods = ['POST', 'GET'])
@requires_auth
def addContent(addwhat):
if addwhat == 'song' or addwhat == 'music':
form = MusicForm()
if request.method == 'POST':
if form.validate_on_submit():
music = Music(form.mf_name.data,form.mf_link.data,form.mf_text.data, current_time_in_millis(), form.mf_weight.data)
db.session.add(music)
db.session.commit()
return redirect(url_for('music', link = None))
else :
return 'invalid details entered'
else:
return render_template("music_create.html", form = form)
@app.route('/music', defaults={'link':None}, methods = ['GET', 'POST'])
@app.route('/music/<link>', methods = ['GET', 'POST'])
def music(link):
songs = None
if link == None:
songs = Music.query.all()
elif link == 'random-list':
songs = Music.query.all()
random.shuffle(songs, random.random)
elif link == 'favorites':
songs = Music.query.filter_by(m_weight = 1).all()
if songs is not None:
color = 'red'
title = "Music"
titleback = "M"
subtitle = "A Music Log"
subcontent = "Without songs, you simply cannot spend half your day on a laptop writing code. So here's a throwback to the songs I love. - Some I am currently listening to, some I had a phase of, and some that'll remain in my playlist even when Im 70."
return render_template('music.html', songs = songs, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
@app.route('/contact', methods = ['POST', 'GET'])
def contact():
form = ContactForm()
color = 'orange'
title = "Contact"
titleback = "C"
subtitle = "Let's get in touch"
subcontent = "I love meeting new people and working on amazing things. If you'd like to work on a project with me, or get to know more about the work I do, do drop me a message. "
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('contact.html', form = form, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
else:
msg = Message("Great Website Man!", sender='[email protected]', recipients=['[email protected]'])
msg.body = """ From: %s <%s> %s """ % (form.c_name.data, form.c_email.data, form.c_msg.data)
mail.send(msg)
form = ContactForm()
return render_template('contact.html', success=True, form = form, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
return render_template('contact.html', form = form, color = color, title = title, titleback = titleback, subtitle = subtitle, subcontent = subcontent)
@app.route('/aboutme')
def aboutme():
return render_template('aboutme.html', resume_pdf_link=resume_pdf_link)
@app.route('/places')
def places():
return render_template('places.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', color = 'yellow', title = "Hold On!", titleback = "404", subtitle = "This page does not exist."), 404
if __name__ == '__main__':
db.create_all()
#app.run(host="192.168.150.1",port=8080, debug=True)
app.run(host="127.0.0.1", port=8080, debug=True)
| null |
run.py
|
run.py
|
py
| 10,321 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.ext.mail.Mail",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.ext.httpauth.HTTPBasicAuth",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.authorization",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.ext.wtf.Form",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "wtforms.validators.optional",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "wtforms.IntegerField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "flask.ext.wtf.Form",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "flask.ext.mail.Message",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 231,
"usage_type": "call"
}
] |
306158165
|
import os
import glob
config = dict()
config["image_shape"] = (128, 128, 128) # This determines what shape the images will be cropped/resampled to.
config["patch_shape"] = None # switch to None to train on the whole image
config["labels"] = (1, 2, 4) # the label numbers on the input image
config["n_base_filters"] = 16
config["n_labels"] = len(config["labels"])
config["all_modalities"] = ["t1", "t1ce", "flair", "t2"]
config["training_modalities"] = config["all_modalities"] # change this if you want to only use some of the modalities
config["nb_channels"] = len(config["training_modalities"])
if "patch_shape" in config and config["patch_shape"] is not None:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["patch_shape"]))
else:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["image_shape"]))
config["truth_channel"] = config["nb_channels"]
config["deconvolution"] = True # if False, will use upsampling instead of deconvolution
config["batch_size"] = 1
config["validation_batch_size"] = 2
config["n_epochs"] = 500 # cutoff the training after this many epochs
config["patience"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving
config["early_stop"] = 50 # training will be stopped after this many epochs without the validation loss improving
config["initial_learning_rate"] = 5e-4
config["learning_rate_drop"] = 0.5 # factor by which the learning rate will be reduced
config["validation_split"] = 0.8 # portion of the data that will be used for training
config["flip"] = False # augments the data by randomly flipping an axis during
config["permute"] = True # data shape must be a cube. Augments the data by permuting in various directions
config["distort"] = None # switch to None if you want no distortion
config["augment"] = config["flip"] or config["distort"]
config["validation_patch_overlap"] = 0 # if > 0, during training, validation patches will be overlapping
config["training_patch_start_offset"] = (16, 16, 16) # randomly offset the first patch index by up to this offset
config["skip_blank"] = True # if True, then patches without any target will be skipped
config["data_file"] = os.path.abspath("brats_data.h5")
config["model_file"] = os.path.abspath("isensee_2017_model.h5")
config["training_file"] = os.path.abspath("isensee_training_ids.pkl")
config["validation_file"] = os.path.abspath("isensee_validation_ids.pkl")
config["overwrite"] = False # If True, will previous files. If False, will use previously written files.
from nilearn.image import reorder_img, new_img_like
#from .nilearn_custom_utils.nilearn_utils import crop_img_to
data_shape = tuple([0, 3] + list(config['image_shape']))
#print(data_shape)
import nibabel as nib
img = nib.load("/home/jbmai_sai/Documents/image.nii")
import numpy as np
image = nib.load("/home/jbmai_sai/Documents/image.nii")
#import numpy as np
# Get data from nibabel image object (returns numpy memmap object)
#img_data = img.get_data()
interpolation ="linear"
image = reorder_img(image, resample=interpolation)
#print(image.shape)
zoom_level = np.divide(config["image_shape"], image.shape)
print(image.header.get_zooms())
new_spacing = np.divide(image.header.get_zooms(), zoom_level)
print(new_spacing)
#new_data = resample_to_spacing(image.get_data(), image.header.get_zooms(), new_spacing,
# interpolation=interpolation)
#new_affine = np.copy(image.affine)
#np.fill_diagonal(new_affine, new_spacing.tolist() + [1])
#new_affine[:3, 3] += calculate_origin_offset(new_spacing, image.header.get_zooms())
exit()
def fetch_training_data_files(return_subject_ids=False):
training_data_files = list()
subject_ids = list()
for subject_dir in glob.glob(os.path.join(os.path.dirname('/home/jbmai_sai/Downloads/'), "Pre-operative_TCGA_LGG_NIfTI_and_Segmentations", "*", "*")):
print(subject_dir)
x = os.path.basename(subject_dir)
print(x)
subject_ids.append(x)
subject_files = list()
for modality in config["training_modalities"] + ["truth"]:
subject_files.append(os.path.join(subject_dir, modality + ".nii.gz"))
training_data_files.append(tuple(subject_files))
if return_subject_ids:
return training_data_files, subject_ids
else:
return training_data_files
#import nibabel as nib
#img = nib.load("/home/jbmai_sai/Documents/image.nii")
#import numpy as np
# Get data from nibabel image object (returns numpy memmap object)
#img_data = img.get_data()
#print(type(img_data))
# Convert to numpy ndarray (dtype: uint16)
#img_data_arr = np.asarray(img_data)
#print(img_data_arr.shape)
#from brats.train_isensee2017 import fetch_training_data_files
training_files, subject_ids = fetch_training_data_files(return_subject_ids=True)
print(training_files[0:5])
| null |
trails.py
|
trails.py
|
py
| 4,875 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "nilearn.image.reorder_img",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
}
] |
607201274
|
# coding: utf-8
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
import math
import gc
import heapq
from multiprocessing import Process
import multiprocessing
import datetime
from math import isnan
def to_list(x):
return list(x)
def to_array(x):
return np.array(list(x))
def cluster(vectorList):
'''
:param vectorList: 用户n天内看过的新闻向量list
:return: 用户兴趣点list
'''
if type(vectorList) != float:
df = pd.DataFrame(vectorList)
df = df.loc[df[0] != -100] # 去掉得不到vector的新闻
if len(df) < 4:
vectorList = list(vectorList)
for i in range(len(vectorList)):
vectorList[i] = list(vectorList[i])
centerList = vectorList
else:
model = AgglomerativeClustering(n_clusters=int(math.sqrt(2*len(vectorList))),
linkage='average', affinity='cosine',
)
df['class'] = model.fit_predict(df)
df = df.groupby(['class']).mean().reset_index()
df['center'] = df.iloc[:, 1:201].apply(to_list, axis=1) # data frame向量转list向量
centerList = list(df['center']) # n个聚类中心list向量转成一个列表返回
print('cluster done')
return centerList
else:
centerList = []
print('cluster done')
return centerList
def get_center_correlation(row, vector):
target_vector = vector.loc[vector.url == row['url'], ['vector']].iloc[0, 0]
target_vector = np.array(target_vector)
center_list = row['center']
if (target_vector[0] != -100) & (type(center_list) != float):
cosine = []
euclidean = []
# pearson = []
manhattan = []
if eval(center_list) != -100:
for i in eval(center_list):
i = np.array(i)
cosine.append(i.dot(target_vector)/(math.sqrt((i**2).sum()) * math.sqrt((target_vector**2).sum())))
euclidean.append(np.sqrt(np.sum(np.square(target_vector - i))))
# pearson.append(np.corrcoef(np.vstack([i, target_vector]))[0][1])
manhattan.append(np.sum(np.abs(i-target_vector)))
row['cosine'] = cosine
row['euclidean'] = euclidean
# row['pearson'] = pearson
row['manhattan'] = manhattan
print('correlation get')
row = row.fillna(-100)
return row
def main_corr():
starttime = datetime.datetime.now()
train = pd.read_csv('../data/train_raw.csv', index_col=False)
valid = pd.read_csv('../data/valid_raw.csv', index_col=False)
test = pd.read_csv('../data/test_raw.csv', index_col=False)
vector = pd.read_csv('../data/news_vector.csv', index_col=False)
vector2 = pd.read_csv('../data/news_vector2.csv', index_col=False)
vector3 = pd.read_csv('../data/news_vector3.csv', index_col=False)
vector = pd.concat([vector, vector2, vector3])
vector = vector.drop_duplicates(['url']).reset_index(drop=True)
del (vector['link_id'])
del vector2, vector3
gc.collect()
vector['vector'] = vector.iloc[:, 1:201].apply(to_array, axis=1)
vector = vector[['url', 'vector']]
gc.collect()
# read center
train_center = pd.read_csv('../data/train_center.csv', index_col=False)
valid_center = pd.read_csv('../data/valid_center.csv', index_col=False)
test_center = pd.read_csv('../data/test_center.csv', index_col=False)
train_center = train_center[['device_id', 'refresh_day', 'center']]
valid_center = valid_center[['device_id', 'refresh_day', 'center']]
test_center = test_center[['device_id', 'refresh_day', 'center']]
# merge
train = pd.merge(train, train_center, on=['device_id', 'refresh_day'], how='left')
valid = pd.merge(valid, valid_center, on=['device_id', 'refresh_day'], how='left')
test = pd.merge(test, test_center, on=['device_id', 'refresh_day'], how='left')
del train_center, valid_center, test_center
gc.collect()
print('merge done')
valid = valid.apply(get_center_correlation, axis=1, vector=vector)
test = test.apply(get_center_correlation, axis=1, vector=vector)
del valid['center'], test['center']
gc.collect()
valid.to_csv('../data/valid_corr.csv', index=False)
test.to_csv('../data/test_corr.csv', index=False)
train = train.apply(get_center_correlation, axis=1, vector=vector)
del train['center']
gc.collect()
train.to_csv('../data/train_corr.csv', index=False)
def top_k_corr(row):
if row['cosine'] != -100:
for i in ['cosine', 'euclidean', 'manhattan']:
temp = heapq.nlargest(5, eval(row[str(i)]))
temp = pd.Series(temp)
temp_index = []
for j in range(len(temp)):
temp_index.append(str(i)+'_with_top'+str(j+1))
temp.index = temp_index
row = pd.concat([row, temp])
print('corr get')
return row
else:
return row
def corr_calculate(data):
data = data.fillna(-100)
data['avg_cosine_center'] = data['cosine'].apply(lambda x: sum(eval(x))/len(eval(x)) if x != -100 else -100)
data['avg_euc_center'] = data['euclidean'].apply(lambda x: sum(eval(x))/len(eval(x)) if x != -100 else -100)
data['avg_man_center'] = data['manhattan'].apply(lambda x: sum(eval(x))/len(eval(x)) if x != -100 else -100)
data = data.apply(top_k_corr, axis=1)
# data['avg_pearson_center'] = data['pearson'].apply(lambda x: sum(eval(x))/len(eval(x)) if x != -1 else -1)
return data
def main_calculate():
train_corr = pd.read_csv('../data/backup/train_corr.csv', index_col=False)
valid_corr = pd.read_csv('../data/backup/valid_corr.csv', index_col=False)
test_corr = pd.read_csv('../data/backup/test_corr.csv', index_col=False)
train_corr = corr_calculate(train_corr[['device_id', 'refresh_day', 'cosine', 'euclidean', 'manhattan', 'pearson']])
valid_corr = corr_calculate(valid_corr[['device_id', 'refresh_day', 'cosine', 'euclidean', 'manhattan', 'pearson']])
test_corr = corr_calculate(test_corr[['device_id', 'refresh_day', 'cosine', 'euclidean', 'manhattan', 'pearson']])
train_corr.to_csv('../data/train_corr.csv', index=False)
valid_corr.to_csv('../data/valid_corr.csv', index=False)
test_corr.to_csv('../data/test_corr.csv', index=False)
return
if __name__ == '__main__':
# get corr between news vector and user interest center on server
main_corr()
# main_calculate()
| null |
feature/get_corr.py
|
get_corr.py
|
py
| 6,644 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.AgglomerativeClustering",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "heapq.nlargest",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 156,
"usage_type": "call"
}
] |
508822593
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Soft Rasterizer (SoftRas)
#
# Copyright (c) 2017 Hiroharu Kato
# Copyright (c) 2018 Nikos Kolotouros
# Copyright (c) 2019 Shichen Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import kaolin as kal
from .DifferentiableRenderer import DifferentiableRenderer
from .Lighting import compute_ambient_light
from .Lighting import compute_directional_light
class SoftRenderer(DifferentiableRenderer):
r"""A class implementing the \emph{Soft Renderer}
from the following ICCV 2019 paper:
Soft Rasterizer: A differentiable renderer for image-based 3D reasoning
Shichen Liu, Tianye Li, Weikai Chen, and Hao Li
Link: https://arxiv.org/abs/1904.01786
"""
def __init__(
self,
image_size: int = 256,
anti_aliasing: bool = True,
bg_color: torch.Tensor = torch.zeros(3),
fill_back: bool = True,
camera_mode: str = 'projection',
K=None, rmat=None, tvec=None,
perspective_distort: bool = True,
sigma_val: float = 1e-5,
dist_func: str = 'euclidean',
dist_eps: float = 1e-4,
gamma_val: float = 1e-4,
aggr_func_rgb: str = 'softmax',
aggr_func_alpha: str = 'prod',
texture_type: str = 'surface',
viewing_angle: float = 30.,
viewing_scale: float = 1.0,
eye: torch.Tensor = None,
camera_direction: torch.Tensor = torch.FloatTensor([0, 0, 1]),
near: float = 0.1, far: float = 100,
light_mode: str = 'surface',
light_intensity_ambient: float = 0.5,
light_intensity_directional: float = 0.5,
light_color_ambient: torch.Tensor = torch.ones(3),
light_color_directional: torch.Tensor = torch.ones(3),
light_direction: torch.Tensor = torch.FloatTensor([0, 1, 0]),
device: str = 'cpu'):
r"""Initalize the SoftRenderer object.
NOTE: SoftRenderer works only in GPU mode!
Args:
image_size (int): Size of the (square) image to be rendered.
anti_aliasing (bool): Whether or not to perform anti-aliasing
(default: True)
bg_color (torch.Tensor): Background color of rendered image
(size: math:`3`, default: :math:`\left[0, 0, 0\right]`)
fill_back (bool): Whether or not to fill color to the back
side of each triangle as well (sometimes helps, when
the triangles in the mesh are not properly oriented.)
(default: True)
camera_mode (str): Choose from among `projection`, `look`, and
`look_at`. In the `projection` mode, the camera is at the
origin, and its optical axis is aligned with the positive
Z-axis. In the `look_at` mode, the object (not the camera)
is placed at the origin. The camera "looks at" the object
from a predefined "eye" location, which is computed from
the `viewing_angle` (another input to this function). In
the `look` mode, only the direction in which the camera
needs to look is specified. It does not necessarily look
towards the origin, as it allows the specification of a
custom "upwards" direction (default: 'projection').
K (torch.Tensor): Camera intrinsics matrix. Note that, unlike
standard notation, K here is a 4 x 4 matrix (with the last
row and last column drawn from the 4 x 4 identity matrix)
(default: None)
rmat (torch.Tensor): Rotation matrix (again, 4 x 4, as opposed
to the usual 3 x 3 convention).
tvec (torch.Tensor): Translation vector (3 x 1). Note that the
(negative of the) tranlation is applied before rotation,
to be consistent with the projective geometry convention
of transforming a 3D point X by doing
torch.matmul(R.transpose(), X - t) (default: None)
perspective_distort (bool): Whether or not to perform perspective
distortion (to simulate field-of-view based distortion effects)
(default: True).
viewing_angle (float): Angle at which the object is to be viewed
(assumed to be in degrees!) (default: 30.)
camera_direction (float): Direction in which the camera is facing
(used only in the `look` and `look_at` modes) (default:
:math:`[0, 0, 1]`)
near (float): Near clipping plane (for depth values) (default: 0.1)
far (float): Far clipping plane (for depth values) (default: 100)
light_intensity_ambient (float): Intensity of ambient light (in the
range :math:`\left[ 0, 1 \right]`) (default: 0.5).
light_intensity_directional (float): Intensity of directional light
(in the range :math:`\left[ 0, 1 \right]`) (default: 0.5).
light_color_ambient (torch.Tensor): Color of ambient light
(default: :math:`\left[ 0, 0, 0 \right]`)
light_color_directional (torch.Tensor): Color of directional light
(default: :math:`\left[ 0, 0, 0 \right]`)
light_direction (torch.Tensor): Light direction, for directional
light (default: :math:`\left[ 0, 1, 0 \right]`)
device (torch.Tensor): Device on which all tensors are stored.
NOTE: Although the default device is set to 'cpu', at the moment,
rendering will work only if the device is CUDA enabled.
Eg. 'cuda:0'.
"""
super(SoftRenderer, self).__init__()
# Size of the image to be generated.
self.image_size = image_size
# Whether or not to enable anti-aliasing
# If enabled, we render an image that is twice as large as the required
# size, and then downsample it.
self.anti_aliasing = anti_aliasing
# Background color of the rendered image.
self.bg_color = bg_color
# Whether or not to fill in color to the back faces of each triangle.
# Usually helps, especially when some of the triangles in the mesh
# have improper orientation specifications.
self.fill_back = fill_back
# Device on which tensors of the class reside. At present, this function
# only works when the device is CUDA enabled, such as a GPU.
self.device = device
# camera_mode specifies how the scene is to be set up.
self.camera_mode = camera_mode
# If the mode is 'projection', use the input camera intrinsics and
# extrinsics.
if self.camera_mode == 'projection':
self.K = K
self.rmat = rmat
self.tvec = tvec
# If the mode is 'look' or 'look_at', use the viewing angle to determine
# perspective distortion and camera position and orientation.
elif self.camera_mode in ['look', 'look_at']:
# Whether or not to perform perspective distortion.
self.perspective_distort = perspective_distort
# TODO: Add comments here
self.viewing_angle = viewing_angle
# TODO: use kal.deg2rad instead
self.eye = torch.FloatTensor([0, 0, -(1. / torch.tan(kal.math.pi
* self.viewing_angle / 180) + 1)]).to(self.device)
# Direction in which the camera's optical axis is facing
self.camera_direction = torch.FloatTensor([0, 0, 1]).to(
self.device)
# Near and far clipping planes.
self.near = near
self.far = far
# Ambient and directional lighting parameters.
self.light_intensity_ambient = light_intensity_ambient
self.light_intensity_directional = light_intensity_directional
self.light_color_ambient = light_color_ambient.to(device)
self.light_color_directional = light_color_directional.to(device)
self.light_direction = light_direction.to(device)
# TODO: Add comments here.
self.rasterizer_eps = 1e-3
def forward(self, vertices, faces, textures=None, mode=None,
K=None, rmat=None, tvec=None):
return self.render(vertices, faces, textures, mode, K, rmat, tvec)
if mode is None:
# If nothing is specified, render rgb, depth, and alpha channels
return self.render(vertices, faces, textures, K, rmat, tvec,
dist_coeffs, orig_size)
elif mode is 'rgb':
# Render RGB channels only
return self.render_rgb(vertices, faces, textures, K, rmat, tvec,
dist_coeffs, orig_size)
elif mode is 'silhouette':
# Render only a silhouette, without RGB colors
return self.render_silhouette(vertices, faces, textures, K, rmat,
tvec, dist_coeffs, orig_size)
elif mode is 'depth':
# Render depth image
return self.render_depth(vertices, faces, textures, K, rmat, tvec,
dist_coeffs, orig_size)
else:
raise ValueError('Mode {0} not implemented.'.format(mode))
def render(self, vertices, faces, textures=None, mode=None, K=None,
rmat=None, tvec=None):
r"""Renders the RGB, depth, and alpha channels.
Args:
vertices (torch.Tensor): Vertices of the mesh (shape: :math:`B
\times V \times 3`), where :math:`B` is the batchsize,
and :math:`V` is the number of vertices in the mesh.
faces (torch.Tensor): Faces of the mesh (shape: :math:`B \times
F \times 3`), where :math:`B` is the batchsize, and :math:`F`
is the number of faces in the mesh.
textures (torch.Tensor): Mesh texture (shape: :math:`B \times F
\times 4 \times 4 \times 4 \times 3`)
mode (str): Renderer mode (choices: 'rgb', 'silhouette',
'depth', None) (default: None). If the mode is None, the rgb,
depth, and alpha channels are all rendered. In the rgb mode,
only the rgb image channels are rendered. In the silhouette
mode, only a silhouette image is rendered. In the depth mode,
only a depth image is rendered.
K (torch.Tensor): Camera intrinsics (default: None) (shape:
:math:`B \times 4 \times 4` or :math:`4 \times 4`)
rmat (torch.Tensor): Rotation matrix (default: None) (shape:
:math:`B \times 4 \times 4` or :math:`4 \times 4`)
tvec (torch.Tensor): Translation vector (default: None)
(shape: :math:`B \times 3` or :math:`3`)
Returns:
(torch.Tensor): rendered RGB image channels
(torch.Tensor): rendered depth channel
(torch.Tensor): rendered alpha channel
Each of the channels is of shape
`self.image_size` x `self.image_size`.
"""
# Fill the back faces of each triangle, if needed
if self.fill_back:
faces = torch.cat((faces, faces[:, :, list(reversed(range(
faces.shape[-1])))]), dim=1)
textures = torch.cat(
(textures, textures.permute(0, 1, 4, 3, 2, 5)), dim=1)
# Lighting (not needed when we are rendering only depth/silhouette
# images)
if mode not in ['depth', 'silhouette']:
textures = self.lighting(vertices, faces, textures)
# Transform vertices to the camera frame
vertices = transform_to_camera_frame(vertices)
# Project the vertices from the camera coordinate frame to the image.
vertices = project_to_image(vertices)
# Rasterization
out = self.rasterize(vertices, faces, textures)
return out['rgb'], out['depth'], out['alpha']
def lighting(self, vertices, faces, textures):
r"""Applies ambient and directional lighting to the mesh. """
faces_lighting = vertices_to_faces(vertices, faces)
# textures = lighting(
# faces_lighting,
# textures,
# self.light_intensity_ambient,
# self.light_intensity_directional,
# self.light_color_ambient,
# self.light_color_directional,
# self.light_direction)
ambient_lighting = kal.graphics.compute_ambient_lighting(
faces_lighting, textures, self.light_intensity_ambient,
self.light_color_ambient)
directional_lighting = kal.graphics.compute_directional_lighting(
faces_lighting, textures, self.light_intensity_directional,
self.light_color_directional)
return ambient_lighting * textures + directional_lighting * textures
def shading(self):
r"""Does nothing. """
pass
def transform_to_camera_frame(self, vertices):
r"""Transforms the mesh vertices to the camera frame, based on the
camera mode to be used.
Args:
vertices (torch.Tensor): Mesh vertices (shape: :math:`B \times
V \times 3`), where `B` is the batchsize, and `V` is the
number of mesh vertices.
Returns:
vertices (torch.Tensor): Transformed vertices into the camera
coordinate frame (shape: :math:`B \times V \times 3`).
"""
if self.camera_mode == 'look_at':
vertices = self.look_at(vertices, self.eye)
# # Perspective distortion
# if self.perspective_distort:
# vertices = perspective_distort(vertices, angle=self.viewing_angle)
elif self.camera_mode == 'look':
vertices = self.look(vertices, self.eye, self.camera_direction)
# # Perspective distortion
# if self.perspective_distort:
# vertices = perspective_distort(vertices, angle=self.viewing_angle)
elif self.camera_mode == 'projection':
if K is None:
K = self.K
if rmat is None:
rmat = self.rmat
if tvec is None:
tvec = self.tvec
# vertices = perspective_projection(vertices, K, rmat, tvec)
def project_to_image(self, vertices):
r"""Projects the mesh vertices from the camera coordinate frame down
to the image.
Args:
vertices (torch.Tensor): Mesh vertices (shape: :math:`B \times
V \times 3`), where `B` is the batchsize, and `V` is the
number of mesh vertices.
Returns:
vertices (torch.Tensor): Projected image coordinates (u, v) for
each vertex, with an appended depth channel. (shape:
:math:`B \times V \times 3`), where :math:`B` is the
batchsize and :math:`V` is the number of vertices.
"""
# TODO: Replace all of these by perspective_projection. Use different
# rmat, tvec combinations, based on the mode, but use a consistent
# projection function across all modes. Helps avoid redundancy.
if self.camera_mode == 'look_at':
vertices = self.perspective_distort(vertices,
angle=self.viewing_angle)
elif self.camera_mode == 'look':
vertices = self.perspective_distort(vertices,
angle=self.viewing_angle)
elif self.camera_mode == 'projection':
vertices = perspective_projection(vertices, K, rmat, tvec)
def rasterize(self, vertices, faces, textures):
r"""Performs rasterization, i.e., conversion of triangles to pixels.
Args:
vertices (torch.Tensor): Vertices of the mesh (shape: :math:`B
\times V \times 3`), where :math:`B` is the batchsize,
and :math:`V` is the number of vertices in the mesh.
faces (torch.Tensor): Faces of the mesh (shape: :math:`B \times
F \times 3`), where :math:`B` is the batchsize, and :math:`F`
is the number of faces in the mesh.
textures (torch.Tensor): Mesh texture (shape: :math:`B \times F
\times 4 \times 4 \times 4 \times 3`)
"""
faces = self.vertices_to_faces(vertices, faces)
# If mode is unspecified, render rgb, depth, and alpha channels
if mode is None:
out = kal.graphics.nmr.rasterize_rgbad(faces, textures,
self.image_size, self.anti_aliasing, self.near, self.far,
self.rasterizer_eps, self.bg_color)
return out['rgb'], out['depth'], out['alpha']
# Render RGB channels only
elif mode == 'rgb':
images = kal.graphics.nmr.rasterize(faces, textures,
self.image_size, self.anti_aliasing, self.near, self.far,
self.rasterizer_eps, self.background_color)
return images
# Render depth image
elif mode == 'depth':
images = kal.graphics.nmr.rasterize_silhouettes(faces,
self.image_size, self.anti_aliasing)
# Render only a silhouette, without RGB colors
elif mode == 'silhouette':
depth = kal.graphics.nmr.rasterize_depth(faces,
self.image_size, self.anti_aliasing)
return depth
else:
raise ValueError('Mode {0} not implemented.'.format(mode))
def look_at(vertices, eye, at=torch.FloatTensor([0, 0, 0]),
up=torch.FloatTensor([0, 1, 0])):
r"""Camera "looks at" an object whose center is at the tensor represented
by "at". And "up" is the upwards direction.
"""
import torch.nn.functional as F
device = vertices.device
eye = eye.to(device)
at = at.to(device)
up = up.to(device)
batchsize = vertices.shape[0]
if eye.dim() == 1:
eye = eye[None, :].repeat(batchsize, 1)
if at.dim() == 1:
at = at[None, :].repeat(batchsize, 1)
if up.dim() == 1:
up = up[None, :].repeat(batchsize, 1)
# Create new axes
# eps is chosen as 1e-5 because that's what the authors use
# in their (Chainer) implementation
z_axis = F.normalize(at - eye, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# Create rotation matrices
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :],
z_axis[:, None, :]), dim=1)
# Apply
# [B, V, 3] -> [B, V, 3] -> [B, V, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, R.transpose(1, 2))
return vertices
def look(self, vertices, eye, direction=torch.FloatTensor([0, 1, 0]),
up=None):
r"""Apply the "look" transformation to the vertices.
"""
import torch.nn.functional as F
device = vertices.device
direction = direction.to(device)
if up is None:
up = torch.FloatTensor([0, 1, 0]).to(device)
if eye.dim() == 1:
eye = eye[None, :]
if direction.dim() == 1:
direction = direction[None, :]
if up.dim() == 1:
up = up[None, :]
# Create new axes
z_axis = F.normalize(direction, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# Create rotation matrix (B x 3 x 3)
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :],
z_axis[:, None, :]), dim=1)
# Apply
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, R.transpose(1, 2))
return vertices
def perspective_distort(self, vertices, angle=30.):
r"""Compute perspective distortion from a given viewing angle.
"""
device = vertices.device
angle = torch.FloatTensor([angle * 180 / kal.math.pi]).to(device)
width = torch.tan(angle)
width = width[:, None]
z = vertices[:, :, 2]
x = vertices[:, :, 0] / (z * width)
y = vertices[:, :, 1] / (z * width)
vertices = torch.stack((x, y, z), dim=2)
return vertices
def vertices_to_faces(self, vertices, faces):
r"""
vertices (torch.Tensor): shape: math:`B \times V \times 3`
faces (torch.Tensor): shape: math:`B \times F \times 3`
"""
B = vertices.shape[0]
V = vertices.shape[1]
# print(vertices.dim(), faces.dim())
# print(vertices.shape[0], faces.shape[0])
# print(vertices.shape[2], faces.shape[2])
device = vertices.device
faces = faces + (torch.arange(B).to(device) * V)[:, None, None]
vertices = vertices.reshape(B * V, 3)
return vertices[faces]
| null |
kaolin/graphics/SoftRenderer.py
|
SoftRenderer.py
|
py
| 23,330 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "DifferentiableRenderer.DifferentiableRenderer",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.tan",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "kaolin.math",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics.compute_ambient_lighting",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "kaolin.graphics.compute_directional_lighting",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "kaolin.graphics.nmr.rasterize_rgbad",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "kaolin.graphics.nmr.rasterize",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "kaolin.graphics.nmr.rasterize_silhouettes",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "kaolin.graphics.nmr.rasterize_depth",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "kaolin.graphics",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "torch.cross",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "torch.cross",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "torch.cross",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "torch.cross",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "kaolin.math",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "torch.tan",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 522,
"usage_type": "call"
}
] |
125602905
|
import pyglet
from pyglet.window import key
#constants
pc_chew_speed = 0.1
pc_pixel_step = 100
pc_move_direction = 0
window = pyglet.window.Window(width=672, height=744)
#Genral graphics
graphic_grid = pyglet.image.load('./resources/spritemap-384.png')
graphic_sequence = pyglet.image.ImageGrid(graphic_grid, 10, 16)
#PacMan graphics
pc_l = [graphic_sequence[32], graphic_sequence[98], graphic_sequence[96]]
pc_r = [graphic_sequence[32], graphic_sequence[102], graphic_sequence[100]]
pc_d = [graphic_sequence[32], graphic_sequence[103], graphic_sequence[101]]
pc_u = [graphic_sequence[32], graphic_sequence[99], graphic_sequence[97]]
pc_anim_l = pyglet.image.Animation.from_image_sequence(pc_l, pc_chew_speed, True)
pc_anim_r = pyglet.image.Animation.from_image_sequence(pc_r, pc_chew_speed, True)
pc_anim_d = pyglet.image.Animation.from_image_sequence(pc_d, pc_chew_speed, True)
pc_anim_u = pyglet.image.Animation.from_image_sequence(pc_u, pc_chew_speed, True)
#PacMan died animation
pc_died = [graphic_sequence[i] for i in range(37, 48)]
pc_anim_died = pyglet.image.Animation.from_image_sequence(pc_died, pc_chew_speed, True)
sp_pc_died = pyglet.sprite.Sprite(pc_anim_died)
batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
sp_pc = pyglet.sprite.Sprite(pc_anim_r)
sp_pc.batch = batch
sp_pc.group = foreground
#Draw maze
sp_walls = []
valid_x_coords = []
valid_y_coords = []
def init_sprite(img,x,y,batch,group):
s = pyglet.sprite.Sprite(img)
s.x = x
s.y = y
s.batch = batch
s.group = group
return s
def load_world_def():
global sp_walls
walls_definition = []
with open('resources/phase_1.txt', 'r') as f:
for l in f:
walls_definition.append(list(l))
y = window.height-24
for r in walls_definition:
x = 0
for c in r:
if c == '\u250F':
sp_walls.append(init_sprite(graphic_sequence[84], x, y, batch, background))
if c == '\u2501':
sp_walls.append(init_sprite(graphic_sequence[80], x, y, batch, background))
if c == '\u2513':
sp_walls.append(init_sprite(graphic_sequence[85], x, y, batch, background))
if c == '\u2517':
sp_walls.append(init_sprite(graphic_sequence[82], x, y, batch, background))
if c == '\u251B':
sp_walls.append(init_sprite(graphic_sequence[83], x, y, batch, background))
if c == '\u2503':
sp_walls.append(init_sprite(graphic_sequence[81], x, y, batch, background))
x += 24
y -= 24
@window.event
def on_draw():
window.clear()
batch.draw()
sp_pc_died.draw()
def update(dt):
if pc_move_direction == 0:
sp_pc.x += dt * pc_pixel_step
if pc_move_direction == 90:
sp_pc.y += dt * pc_pixel_step
if pc_move_direction == 180:
sp_pc.x -= dt * pc_pixel_step
if pc_move_direction == 270:
sp_pc.y -= dt * pc_pixel_step
@window.event
def on_key_press(symbol, modifiers):
global pc_move_direction
if symbol == key.RIGHT:
pc_move_direction = 0
sp_pc.image = pc_anim_r
if symbol == key.LEFT:
pc_move_direction = 180
sp_pc.image = pc_anim_l
if symbol == key.DOWN:
pc_move_direction = 270
sp_pc.image = pc_anim_d
if symbol == key.UP:
pc_move_direction = 90
sp_pc.image = pc_anim_u
def main():
load_world_def()
pyglet.clock.schedule_interval(update, 1 / 100.)
pyglet.app.run()
if __name__ == "__main__":
# execute only if run as a script
main()
| null |
__init__.py
|
__init__.py
|
py
| 3,675 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyglet.window.Window",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyglet.window",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.ImageGrid",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.Animation.from_image_sequence",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.Animation.from_image_sequence",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.Animation.from_image_sequence",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.Animation.from_image_sequence",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pyglet.image.Animation.from_image_sequence",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyglet.image",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pyglet.sprite.Sprite",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pyglet.sprite",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pyglet.graphics.Batch",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyglet.graphics",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pyglet.graphics.OrderedGroup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyglet.graphics",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pyglet.graphics.OrderedGroup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pyglet.graphics",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pyglet.sprite.Sprite",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pyglet.sprite",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pyglet.sprite.Sprite",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pyglet.sprite",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key.RIGHT",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key.LEFT",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key.DOWN",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key.UP",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "pyglet.window.key",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "pyglet.clock.schedule_interval",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pyglet.clock",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "pyglet.app.run",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pyglet.app",
"line_number": 124,
"usage_type": "attribute"
}
] |
618977884
|
import time
from typing import List, Dict, Tuple
from django.contrib.auth.models import User
from django.core.handlers.wsgi import WSGIRequest
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.http.response import JsonResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from .context import pychess
from .context import Stockfish
from user.user import get_user_info
from pychess.Utils.Board import Board
from pychess.Utils.Cord import Cord
from pychess.Utils.lutils.lmovegen import genAllMoves, newMove
from pychess.Utils.lutils.lmove import parseAny
from pychess.Utils.logic import legalMoveCount, getStatus
from pychess.Utils.Move import Move, listToMoves, parseSAN, toSAN
from pychess.Utils.const import (
KING_CASTLE,
QUEEN_CASTLE,
BLACK,
WHITE,
cordDic,
WHITEWON,
BLACKWON,
)
from game.game import GameState
from game.models import Games
from move.models import Moves
from move.move import MoveState
from user.user import get_user_info
FROM_COORD = 0
TO_COORD = 1
BOARD_WIDTH = 8
STOCKFISH_ENGINE_LOC = "../../../mac_stockfish/stockfish-10-mac/Mac/stockfish-10-64"
# STOCKFISH_ENGINE_LOC = "./mac_stockfish/stockfish-10-mac/Mac/stockfish-10-64"
ONLINE_OPPONENT = "Online Opponent"
AI = "Computer"
AI_ID = 0
WHITE_STR = "White"
BLACK_STR = "Black"
global_board = Board(setup=True)
@csrf_exempt
def create_chess_game(request: WSGIRequest) -> JsonResponse:
"""
Takes the appropriate information from `request` and creates a new
chess game
Relevant attributes:
user_id_1: str
user_id_2: str
white_user_id: str
black_user_id: str
id: int
"""
game_state = GameState(request)
game = Games.objects.create(**game_state.items())
initial_move = MoveState()
initial_move.set_initial(game)
add_move_state_to_database(initial_move)
return JsonResponse({"status": "success", "game_id": game.id})
def get_all_moves(request: WSGIRequest) -> JsonResponse:
board = _get_board(request)
from_coord = int(request.GET.get("index"))
potential_moves = _get_potential_board_moves(from_coord, board)
all_legal_moves = [Move(move) for move in genAllMoves(board.board)]
legal_destinations = _get_legal_destinations(potential_moves, all_legal_moves)
return JsonResponse({"moves": legal_destinations})
def _get_board(request: WSGIRequest) -> Board:
""" This will eventually be fleshed out into accessing the Database """
game_id = request.GET.get("game_id")
most_recent_move = _get_most_recent_move(game_id)
most_recent_move.refresh_from_db()
game_board = Board(setup=most_recent_move.post_move_fen)
return game_board
def _get_potential_board_moves(from_coord: int, board: Board) -> List[Move]:
""" Given a piece at location 'from_coord', list every possible move it could
make, legal and illegal alike
"""
potential_moves_coordinates = [(from_coord, to_coord) for to_coord in range(64)]
potential_moves = [
_get_move(*coords, board) for coords in potential_moves_coordinates
]
potential_moves = [
move for move in potential_moves if not board.board.willLeaveInCheck(move.move)
]
return potential_moves
def _get_legal_destinations(
potential_moves: List[Move], all_legal_moves: List[Move]
) -> List[int]:
legal_moves_for_piece = [
move for move in potential_moves if move in all_legal_moves
]
return [_move_to_board_location(move)[TO_COORD] for move in legal_moves_for_piece]
def _move_to_board_location(move: Move) -> Tuple[int]:
""" Takes a move and returns a single tuple of ints, 0-63, representing a FROM and
TO position.
"""
from_coord = move.cord0.y * BOARD_WIDTH + move.cord0.x
to_coord = move.cord1.y * BOARD_WIDTH + move.cord1.x
return (from_coord, to_coord)
def make_move(request: WSGIRequest) -> JsonResponse:
game_id = request.GET.get("game_id")
most_recent_move = _get_most_recent_move(game_id)
user = get_user_info(request)
player_color = WHITE if most_recent_move.white_user_id == user.id else BLACK
board = _get_board(request)
from_coord, to_coord = _get_coords_from_wsgi_request(request)
pieces_moved = [{"from_coord": from_coord, "to_coord": to_coord}]
player_move = _get_move(from_coord, to_coord, board)
pieces_moved += _check_for_castle(player_move, player_color)
board = board.move(player_move)
record_move(board, player_move, game_id)
return JsonResponse(
{"moves": pieces_moved, "winner": check_if_game_over(board, game_id)}
)
def _get_move(from_coord, to_coord, board: Board) -> Move:
move = Move(newMove(from_coord, to_coord))
move = Move(move.cord0, move.cord1, board)
return move
def _check_for_castle(move: Move, color: int) -> List[Dict[int, int]]:
return _black_check_castle(move) if color == BLACK else _white_check_castle(move)
def _black_check_castle(move: Move) -> List[Dict[str, int]]:
if move.flag == QUEEN_CASTLE:
from_coord, to_coord = cordDic["a8"], cordDic["d8"]
return [{"from_coord": from_coord, "to_coord": to_coord}]
elif move.flag == KING_CASTLE:
from_coord, to_coord = cordDic["h8"], cordDic["f8"]
return [{"from_coord": from_coord, "to_coord": to_coord}]
return []
def _white_check_castle(move: Move) -> List[Dict[int, int]]:
if move.flag == QUEEN_CASTLE:
from_coord, to_coord = cordDic["a1"], cordDic["d1"]
return [{"from_coord": from_coord, "to_coord": to_coord}]
elif move.flag == KING_CASTLE:
from_coord, to_coord = cordDic["h1"], cordDic["f1"]
return [{"from_coord": from_coord, "to_coord": to_coord}]
return []
def check_if_game_over(board: Board, game_id: int) -> str:
winner = ""
status, _ = getStatus(board)
if status == WHITEWON:
winner = WHITE_STR
elif status == BLACKWON:
winner = BLACK_STR
if winner != "":
game = Games.objects.get(id=game_id)
game.ongoing = False
game.save()
return winner
def get_opponent_move(request: WSGIRequest) -> JsonResponse:
if _opponent_is_ai(request): # Player is AI
return get_ai_move(request)
return wait_for_human_opponents_move(request)
def _opponent_is_ai(request: WSGIRequest) -> bool:
game_id = request.GET.get("game_id")
game = Games.objects.get(id=game_id)
return game.user_id_1 == AI_ID or game.user_id_2 == AI_ID
def get_ai_move(request: WSGIRequest):
game_id = request.GET.get("game_id")
difficulty = get_difficulty_from_game(game_id)
most_recent_move = _get_most_recent_move(game_id)
board = _get_board(request)
stockfish_color = BLACK if most_recent_move.black_user_id == AI_ID else WHITE
engine = initialize_stockfish_engine(difficulty)
_set_chess_engine_board_fen_position(engine, board)
best_move_as_san = engine.get_best_move()
most_recent_move = _get_most_recent_move(game_id)
stockfish_move = _convert_SAN_str_to_move(best_move_as_san, board)
board = board.move(stockfish_move)
record_move(board, stockfish_move, game_id)
from_coord, to_coord = _move_to_board_location(stockfish_move)
pieces_moved = [{"from_coord": from_coord, "to_coord": to_coord}]
pieces_moved += _check_for_castle(stockfish_move, stockfish_color)
return JsonResponse(
{"moves": pieces_moved, "winner": check_if_game_over(board, game_id)}
)
def wait_for_human_opponents_move(request: WSGIRequest) -> JsonResponse:
game_id = request.GET.get("game_id")
color_were_waiting_for = request.GET.get("player_color")
most_recent_move = _get_most_recent_move(game_id)
while most_recent_move.turn != color_were_waiting_for:
most_recent_move = _get_most_recent_move(game_id)
time.sleep(0.5)
return figure_out_previously_moved_pieces(request)
def figure_out_previously_moved_pieces(request: WSGIRequest) -> JsonResponse:
game_id = request.GET.get("game_id")
opponent_color = WHITE if request.GET.get("player_color") == WHITE_STR else BLACK
most_recent_move = _get_most_recent_move(game_id)
old_board = Board(setup=most_recent_move.pre_move_fen)
old_move = _convert_SAN_str_to_move(most_recent_move.move_algebraic, old_board)
current_board = old_board.move(old_move)
from_coord, to_coord = _move_to_board_location(old_move)
pieces_moved = [{"from_coord": from_coord, "to_coord": to_coord}]
pieces_moved += _check_for_castle(old_move, opponent_color)
return JsonResponse(
{"moves": pieces_moved, "winner": check_if_game_over(board, game_id)}
)
def _get_coords_from_wsgi_request(request: WSGIRequest) -> Tuple[int, int]:
from_coord = int(request.GET.get("from_coord"))
to_coord = int(request.GET.get("to_coord"))
return (from_coord, to_coord)
def initialize_stockfish_engine(difficulty: int) -> Stockfish:
engine = Stockfish(STOCKFISH_ENGINE_LOC)
engine.change_difficulty(difficulty)
return engine
def get_difficulty_from_game(game_id: int) -> int:
game = Games.objects.get(id=game_id)
return game.difficulty
def _set_chess_engine_board_fen_position(engine: Stockfish, board: Board) -> None:
engine.set_fen_position(board.asFen())
def _convert_SAN_str_to_move(san: str, board: Board) -> Move:
move = Move(parseAny(global_board.board, san))
from_coord, to_coord = _move_to_board_location(move)
move = _get_move(from_coord, to_coord, board)
return move
def _get_most_recent_move(game_id: int) -> Moves:
moves = Moves.objects.filter(game_id_id=game_id) # Django has weird name-mangling
return moves.order_by("-move_number")[0]
def record_move(board: Board, move: Move, game_id: int):
most_recent_move = _get_most_recent_move(game_id)
move_state = MoveState()
move_state.set_state_from_prev_move(most_recent_move)
move_state.post_move_fen = board.asFen()
move_state.set_move(toSAN(board, move))
add_move_state_to_database(move_state)
def add_move_state_to_database(move: MoveState):
new_move = Moves.objects.create(**move.items())
new_move.save()
def get_current_games(request: WSGIRequest) -> JsonResponse:
user = get_user_info(request)
all_games = Games.objects.filter(Q(user_id_1=user.id) | Q(user_id_2=user.id))
active_games = all_games.filter(ongoing=1)
game_list = []
for game in active_games:
game_list.append(build_game_status_dict(game, user))
return JsonResponse({"games": game_list})
def build_game_status_dict(game: Games, user: User) -> Dict[str, str]:
most_recent_move = _get_most_recent_move(game.id)
opponent_id = (
game.black_user_id if game.white_user_id == user.id else game.white_user_id
)
if opponent_id == AI_ID:
opponent = AI
else:
opponent = f"Online Player - {opponent_id}"
return {
"id": game.id,
"color": WHITE_STR if game.white_user_id == user.id else BLACK_STR,
"turn": BLACK_STR if most_recent_move.turn == WHITE_STR else WHITE_STR,
"count": most_recent_move.move_number,
"opponent": opponent,
}
def get_game_info(request: WSGIRequest) -> JsonResponse:
game_id = request.GET.get("game_id")
user = get_user_info(request)
board = _get_board(request)
most_recent_move = _get_most_recent_move(game_id)
return JsonResponse(
{
"fen": most_recent_move.post_move_fen,
"captured_white_pieces": [],
"captured_black_pieces": [],
"turn": WHITE_STR if most_recent_move.turn == BLACK_STR else BLACK_STR,
"count": most_recent_move.move_number,
"your_color": WHITE_STR
if most_recent_move.white_user_id == user.id
else BLACK_STR,
}
)
| null |
chess/backend/src/game/logic.py
|
logic.py
|
py
| 11,936 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "game.game.GameState",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "game.game",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "game.models.Games.objects.create",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "game.models.Games",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "move.move.MoveState",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "game.game",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "game.game.id",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "move.models",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "pychess.Utils.lutils.lmovegen.genAllMoves",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "move.models.move",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "typing.List",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "move.models.cord0",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "move.models.cord1",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "user.user",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "user.user.get_user_info",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "user.user.id",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "user.user",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.WHITE",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.BLACK",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pychess.Utils.lutils.lmovegen.newMove",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "move.models",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "move.models.cord0",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "move.models.cord1",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.BLACK",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 160,
"usage_type": "argument"
},
{
"api_name": "typing.List",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "move.models.flag",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.QUEEN_CASTLE",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.cordDic",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "move.models.flag",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.KING_CASTLE",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.cordDic",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "move.models.flag",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.QUEEN_CASTLE",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.cordDic",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "move.models.flag",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "move.models",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.KING_CASTLE",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.cordDic",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.logic.getStatus",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pychess.Utils.const.WHITEWON",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.BLACKWON",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "game.models.Games.objects.get",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "game.models.Games",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "game.game.ongoing",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "game.game.save",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "game.game",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "game.models.Games.objects.get",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "game.models.Games",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "game.game.user_id_1",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "game.game.user_id_2",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.BLACK",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.WHITE",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.WHITE",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.const.BLACK",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "context.Stockfish",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "context.Stockfish",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "game.models.Games.objects.get",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "game.models.Games",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "game.game.difficulty",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "context.Stockfish",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pychess.Utils.lutils.lmove.parseAny",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "move.models",
"line_number": 292,
"usage_type": "argument"
},
{
"api_name": "move.models",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "move.models",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "move.models.Moves.objects.filter",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "move.models.Moves.objects",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "move.models.Moves",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "move.models.Moves",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Board.Board",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "pychess.Utils.Move.Move",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "move.move.MoveState",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "pychess.Utils.Move.toSAN",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "move.models",
"line_number": 307,
"usage_type": "argument"
},
{
"api_name": "move.move.MoveState",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "move.models.Moves.objects.create",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "move.models.Moves.objects",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "move.models.Moves",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "move.models.items",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "move.models",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "user.user",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "user.user.get_user_info",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects.filter",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "game.models.Games.objects",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "game.models.Games",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "user.user.id",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "user.user",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 323,
"usage_type": "argument"
},
{
"api_name": "user.user",
"line_number": 323,
"usage_type": "argument"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "game.models.Games",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "game.game.id",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "game.game.white_user_id",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "user.user.id",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "user.user",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "game.game.black_user_id",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "game.game.id",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "game.game.white_user_id",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "user.user.id",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "user.user",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "django.core.handlers.wsgi.WSGIRequest",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "user.user",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "user.user.get_user_info",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "user.user.id",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "user.user",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 348,
"usage_type": "name"
}
] |
68999832
|
# ReadableDicomDataset 객체 - DCM 파일 조작을 위한
import numpy as np
import pandas as pd
from pydicom import dataset
from sklearn.model_selection import train_test_split
import shutil
from pydicom.encaps import decode_data_sequence
from PIL import Image
from typing import Union
from glob import glob
import io
import pydicom
import os
import sqlite3
import time
class ReadableDicomDataset():
def __init__(self, filename):
self._ds = pydicom.dcmread(filename)
self.geometry_imsize = (self._ds[0x48,0x6].value,self._ds[0x48,0x7].value)
self.geometry_tilesize = (self._ds.Columns, self._ds.Rows)
self.geometry_columns = round(0.5+(self.geometry_imsize[0]/self.geometry_tilesize[0]))
self.geometry_rows = round(0.5 + (self.geometry_imsize[1] / self.geometry_tilesize[1] ))
self._dsequence = decode_data_sequence(self._ds.PixelData)
def imagePos_to_id(self, imagePos:tuple):
id_x, id_y = imagePos
return (id_x+(id_y*self.geometry_columns))
def get_tile(self, pos):
return np.array(Image.open(io.BytesIO(self._dsequence[pos])))
def get_id(self, pixelX:int, pixelY:int) -> Union[int, int, int]:
id_x = round(-0.5+(pixelX/self.geometry_tilesize[1]))
id_y = round(-0.5+(pixelY/self.geometry_tilesize[0]))
return (id_x,id_y), pixelX-(id_x*self.geometry_tilesize[0]), pixelY-(id_y*self.geometry_tilesize[1]),
@property
def dimensions(self):
return self.geometry_imsize
def read_region(self, location: tuple, size:tuple):
lu, lu_xo, lu_yo = self.get_id(*list(location))
rl, rl_xo, rl_yo = self.get_id(*[sum(x) for x in zip(location,size)])
# generate big image
bigimg = np.zeros(((rl[1]-lu[1]+1)*self.geometry_tilesize[0], (rl[0]-lu[0]+1)*self.geometry_tilesize[1], self._ds[0x0028, 0x0002].value), np.uint8)
for xi, xgridc in enumerate(range(lu[0],rl[0]+1)):
for yi, ygridc in enumerate(range(lu[1],rl[1]+1)):
if (xgridc<0) or (ygridc<0):
continue
bigimg[yi*self.geometry_tilesize[0]:(yi+1)*self.geometry_tilesize[0],
xi*self.geometry_tilesize[1]:(xi+1)*self.geometry_tilesize[1]] = \
self.get_tile(self.imagePos_to_id((xgridc,ygridc)))
# crop big image
return bigimg[lu_yo:lu_yo+size[1],lu_xo:lu_xo+size[0]]
def yolo_fix_labels(image_dir, label_dir):
file_lists = glob(label_dir + "/*.txt")
for text in file_lists:
temp = text.split("/")[-1].split(".")[0]
txt_file = label_dir + "/" + temp + ".txt"
png_file = image_dir + "/" + temp + ".png"
with open(txt_file, 'r') as f:
cells = f.readlines()
fixed_cells = []
for cell in cells:
temp_cell = cell.strip().split()
print(temp_cell)
if '7' in temp_cell[0]:
temp_cell[0] = '0'
if '4' in temp_cell[0]:
temp_cell = None
if float(temp_cell[1]) + float(temp_cell[3])/2 > 1 or float(temp_cell[1]) - float(temp_cell[3])/2 < 0 or \
float(temp_cell[2]) + float(temp_cell[4])/2 > 1 or float(temp_cell[2]) - float(temp_cell[4])/2 < 0 :
temp_cell = None
if temp_cell != None:
fixed_cells.append(temp_cell)
with open(txt_file, 'w') as f:
for fixed_cell in fixed_cells:
f.write((' ').join(fixed_cell) + '\n')
if len(fixed_cells) == 0 or len(cells) == 0:
os.remove(txt_file)
os.remove(png_file)
def yolo_dcm_to_train_set_includeNone(db = "original_data/archive/MITOS_WSI_CCMCT_ODAEL_train_dcm.sqlite", source_dir = "original_data/archive",\
dest_dir = "datasets/origin", tile_size = 320, cell_size = 40, LABELS = ["Mitotic_figure_lookalike", "granulocyte", "mitotic_figure", "tumor_cell"]):
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
os.mkdir(dest_dir+"/images")
os.mkdir(dest_dir+"/labels")
DB = sqlite3.connect(db)
cur = DB.cursor()
IMGSZ = tile_size
datasets = glob(source_dir +"/*.dcm")
file_names = []
for data in datasets:
temp = data.split("/")[-1]
file_names.append(temp.split('.')[0])
file_names.sort()
file_len = len(file_names)
file_count = 0
for file_name in file_names:
file_count += 1
# filename -> slide 번호, width, height 추출
slide = cur.execute(f"""SELECT uid, width, height from Slides where filename == "{file_name+".dcm"}" """).fetchall()
slide = slide[0]
# 이미지 읽기 위한 Dicom Dataset 객체 객체 생성
ds = ReadableDicomDataset(source_dir+ "/" + file_name + ".dcm")
tiles = ds.geometry_imsize
tiles = int(tiles[0]/IMGSZ) * int(tiles[1]/IMGSZ)
Annotations = pd.DataFrame(cur.execute(f"""SELECT uid, agreedClass FROM Annotations WHERE slide == {slide[0]}""").fetchall(),\
columns=["annoid", "class"]).set_index('annoid')
cells = pd.DataFrame(cur.execute(f"""SELECT coordinateX, coordinateY, annoId
from Annotations_coordinates where slide=={slide[0]}""").fetchall(),\
columns=['x', 'y', 'annoid'])
idx = -1
for col in range(0,slide[2]-IMGSZ,IMGSZ):
for row in range(0,slide[1]-IMGSZ,IMGSZ):
idx += 1
if idx % 500 == 0:
print(file_count, "/", file_len, file_name,":",idx, "/", tiles)
location = (row, col)
local_cells = cells[(cells['x'] > location[0])&(cells['y'] > location[1])&\
(cells['x']<location[0]+IMGSZ)&(cells['y']<location[1]+IMGSZ)]
local_cells['x'] -= location[0]
local_cells['y'] -= location[1]
lines = []
flag = 0
for cell in local_cells.values.tolist():
try:
label = Annotations.loc[cell[2]].values[0]
except:
# annoid가 annotations에 존재하지 않는 경우 걸러냄.
continue
# 원래 x1, y1, x2, y2이지만 편의상 w, h로 표기
x, y, w, h = cell[0]/IMGSZ, cell[1]/IMGSZ, cell_size/IMGSZ, cell_size/IMGSZ
if label == 7:
label = 0
#imgsize 범위 벗어나는 경우, label 0 ~ 3이 아닌 경우 pass
if x - w/2 <= 0 or x + w/2 >= 1 or y - h/2 <= 0 or y + h/2 >= 1 or label >= 4:
continue
line = str(label) + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h) + "\n"
lines.append(line)
flag = 1
# 빈 라벨의 경우 1/10만 학습, 아니면 124만장 나옴
if flag == 1 or idx % 10 == 0:
with open(dest_dir+'/labels/'+file_name+'_'+str(idx)+'.txt', 'w') as f:
for l in lines:
f.write(l)
# 이미지 저장
img = Image.fromarray(ds.read_region(location=location,size=(IMGSZ,IMGSZ)))
img.save(dest_dir + "/images/" + file_name + "_" + str(idx) + ".png", 'png')
# yolo_dcm_to_png_includeNone -> yolo_train_test_split
def yolo_train_test_split(source_dir="datasets/origin", dest_dir = "datasets/New"):
files = glob(source_dir+"/labels/*.txt")
file_lists = []
for file_name in files:
file_lists.append(file_name.split("/")[-1].split(".")[0])
train_files, test_files = train_test_split(file_lists, random_state=7, test_size=0.2)
print(len(train_files), len(test_files))
with open(source_dir+"/train.txt", 'w') as f:
for train_file in train_files:
f.write(train_file+"\n")
with open(source_dir+"/val.txt", 'w') as f:
for test_file in test_files:
f.write(test_file+"\n")
# train test split 칸에서 작성된 txt파일에 따라 이미지, 라벨 다른 폴더로 분할
img_dest = dest_dir + "/images"
label_dest = dest_dir + "/labels"
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(img_dest):
os.makedirs(img_dest)
os.makedirs(img_dest + "/train")
os.makedirs(img_dest + "/val")
if not os.path.exists(label_dest):
os.makedirs(label_dest)
os.makedirs(label_dest + "/train")
os.makedirs(label_dest + "/val")
train_txt = source_dir + "/train.txt"
val_txt = source_dir + "/val.txt"
with open(train_txt, 'r') as f:
train_files = f.readlines()
with open(val_txt, 'r') as f:
val_files = f.readlines()
for train_file in train_files:
temp = train_file.strip()
shutil.move(source_dir + "/images/"+temp+".png", img_dest + "/train")
shutil.move(source_dir + "/labels/"+temp+".txt", label_dest + "/train")
for val_file in val_files:
temp = val_file.strip()
shutil.move(source_dir + "/images/"+temp+".png", img_dest + "/val")
shutil.move(source_dir + "/labels/"+temp+".txt", label_dest + "/val")
## M2det expired!
# def m2det_dcm_to_train_set(db = "original_data/archive/MITOS_WSI_CCMCT_ODAEL_train_dcm.sqlite", source_dir = "original_data/archive",\
# dest_dir = "datasets/img_320", tile_size = 640, cell_size = 40, LABELS = ["Mitotic_figure_lookalike", "granulocyte", "mitotic_figure", "tumor_cell"]):
# DB = sqlite3.connect(db)
# cur = DB.cursor()
# IMGSZ = tile_size
# datasets = glob(source_dir +"/*.dcm")
# if not os.path.isdir(dest_dir):
# os.mkdir(dest_dir)
# if not os.path.isdir(dest_dir+"/images"):
# os.mkdir(dest_dir+"/images")
# file_names = []
# for data in datasets:
# temp = data.split("/")[-1]
# file_names.append(temp.split('.')[0])
# file_names.sort()
# file_len = len(file_names)
# file_count = 0
# with open(dest_dir+'/'+'labels.csv','w') as f:
# for index,l in enumerate(LABELS):
# f.write(l+','+str(index)+'\n')
# f = open(dest_dir+"/"+"train.csv", 'w')
# for file_name in file_names:
# file_count += 1
# # filename -> slide 번호, width, height 추출
# slide = cur.execute(f"""SELECT uid, width, height from Slides where filename == "{file_name+".dcm"}" """).fetchall()
# slide = slide[0]
# # 이미지 읽기 위한 Dicom Dataset 객체 객체 생성
# ds = ReadableDicomDataset(source_dir+ "/" + file_name + ".dcm")
# tiles = ds.geometry_imsize
# tiles = int(tiles[0]/IMGSZ) * int(tiles[1]/IMGSZ)
# Annotations = pd.DataFrame(cur.execute(f"""SELECT uid, agreedClass FROM Annotations WHERE slide == {slide[0]}""").fetchall(),\
# columns=["annoid", "class"]).set_index('annoid')
# cells = pd.DataFrame(cur.execute(f"""SELECT coordinateX, coordinateY, annoId
# from Annotations_coordinates where slide=={slide[0]}""").fetchall(),\
# columns=['x', 'y', 'annoid'])
# idx = -1
# for col in range(0,slide[2]-IMGSZ,IMGSZ):
# for row in range(0,slide[1]-IMGSZ,IMGSZ):
# idx += 1
# if idx % 500 == 0:
# print(file_count, "/", file_len, file_name,":",idx, "/", tiles)
# location = (row, col)
# local_cells = cells[['x','y','annoid']][cells['x'] > location[0]][cells['y'] > location[1]]\
# [cells['x']<location[0]+IMGSZ][cells['y']<location[1]+IMGSZ]
# local_cells['x'] -= location[0]
# local_cells['y'] -= location[1]
# flag = 0
# for cell in local_cells.values.tolist():
# try:
# label = Annotations.loc[cell[2]].values[0]
# except:
# print(cell)
# continue
# # 원래 x1, y1, x2, y2이지만 편의상 w, h로 표기
# x, y, w, h = cell[0]-cell_size//2 , cell[1]-cell_size//2, cell[0] + cell_size//2, cell[1] + cell_size//2
# if label == 7:
# label = 0
# #imgsize 범위 벗어나는 경우, label 0 ~ 3이 아닌 경우 pass
# if label >= 4 or x <= 0 or y <= 0 or w >= IMGSZ or h >= IMGSZ:
# continue
# img_name, label = "images/"+file_name+"_"+str(idx)+'.dcm ', LABELS[label]
# line = dest_dir+"/"+img_name + "," + str(x) + "," + str(y) + "," + str(w) + "," + str(h) + "," + label + "\n"
# f.write(line)
# flag = 1
# if flag == 0:
# f.write(dest_dir+"/images/"+file_name+"_"+str(idx)+'.dcm ' + ",,,,,\n")
# # # 이미지 저장
# # img = Image.fromarray(ds.read_region(location=location,size=(IMGSZ,IMGSZ)))
# # img.save(dest_dir + "/images/" + file_name + "_" + str(idx) + ".png", 'png')
# f.close()
# df로 annotations 확인
def get_all_cells(label_dir):
target_dir = label_dir+"/"
txt_list = glob(target_dir + "*.txt")
cells = []
for txt in txt_list:
file = txt.split("/")[-1]
file = target_dir + file
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
temp = line.strip().split()
cells.append(temp)
cells = pd.DataFrame(cells, columns=["label", "x", "y", "w", "h"])
return cells
| null |
utils/data_util_linux.py
|
data_util_linux.py
|
py
| 14,415 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pydicom.dcmread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pydicom.encaps.decode_data_sequence",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 358,
"usage_type": "call"
}
] |
202526785
|
# -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from openerp import api, models, fields
from openerp.exceptions import Warning
from datetime import datetime
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
@api.model
def create(self, values):
return_create = super(ResPartnerBank, self).create(values)
return_create.auto_create_banking_mandate_item()
return return_create
@api.one
def auto_create_banking_mandate_item(self):
current_date = datetime.today()
res_partner_banks_ids_get = self.env['res.partner.bank'].search([('partner_id', '=', self.partner_id.id)])
if res_partner_banks_ids_get!=False:
for res_partner_banks_id_get in res_partner_banks_ids_get:
if res_partner_banks_id_get.partner_id.supplier==False:
account_banking_mandate_ids_get = self.env['account.banking.mandate'].search(
[
('state', '!=', 'expired'),
('partner_bank_id', '=', res_partner_banks_id_get.id)
]
)
if len(account_banking_mandate_ids_get)==0:
account_banking_mandate_vals = {
'auto_create': True,
'format': 'sepa',
'scheme': 'CORE',
'type': 'recurrent',
'recurrent_sequence_type': 'recurring',
'partner_bank_id': self.id,
'partner_id': self.partner_id.id,
'signature_date': current_date.strftime("%Y-%m-%d"),
}
account_banking_mandate_obj = self.env['account.banking.mandate'].sudo().create(account_banking_mandate_vals)
#validate
account_banking_mandate_obj.validate()
| null |
account_banking_mandate_auto_create/models/res_partner_bank.py
|
res_partner_bank.py
|
py
| 2,184 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "openerp.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "openerp.api.model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "openerp.api.one",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 18,
"usage_type": "name"
}
] |
117953793
|
from django.conf import settings
from django.conf.urls import *
from django.contrib import admin
from django.views.generic.base import TemplateView
from rest_framework import routers
import manabi.views
from manabi.apps.manabi_auth.api_views import RegistrationWithTokenView
from manabi.apps.flashcards.api_views import (
DeckViewSet,
SynchronizedDeckViewSet,
SharedDeckViewSet,
SuggestedSharedDecksViewSet,
ManabiReaderFactViewSet,
FactViewSet,
ReviewAvailabilitiesViewSet,
NextCardsForReviewViewSet,
CardViewSet,
)
from manabi.apps.review_results.api_views import ReviewResultsView
api_router = routers.DefaultRouter()
api_router.register(r'flashcards/decks',
DeckViewSet,
base_name='deck')
api_router.register(r'flashcards/synchronized_decks',
SynchronizedDeckViewSet,
base_name='synchronized-deck')
api_router.register(r'flashcards/suggested_shared_decks',
SuggestedSharedDecksViewSet,
base_name='suggested-shared-deck')
api_router.register(r'flashcards/shared_decks',
SharedDeckViewSet,
base_name='shared-deck')
api_router.register(r'flashcards/facts',
FactViewSet,
base_name='fact')
api_router.register(r'flashcards/manabi_reader_facts',
ManabiReaderFactViewSet,
base_name='fact')
api_router.register(r'flashcards/cards',
CardViewSet,
base_name='card')
api_router.register(r'flashcards/review_availabilities',
ReviewAvailabilitiesViewSet,
base_name='card')
api_router.register(r'flashcards/next_cards_for_review',
NextCardsForReviewViewSet,
base_name='next-card-for-review')
api_router.register(r'flashcards/review_results',
ReviewResultsView,
base_name='review-results')
urlpatterns = [
url(r'^apple-app-site-association$', TemplateView.as_view(
template_name='apple_app_site_association.json',
content_type='application/json',
)),
url(r'^ios-required/', TemplateView.as_view(
template_name='ios_required.html'), name='ios-required'),
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^rq/', include('django_rq.urls')),
url(r'^$', manabi.views.homepage, name='homepage'),
url(r'^flashcards/', include('manabi.apps.flashcards.urls')),
url(r'^users/', include('manabi.apps.profiles.urls')),
url(r'^terms-of-service/$', TemplateView.as_view(
template_name='tos.html'), name='terms_of_service'),
url(r'^privacy-policy/$', TemplateView.as_view(
template_name='privacy.html'), name='privacy_policy'),
url(r'^credits/$', TemplateView.as_view(
template_name='credits.html'), name='credits'),
# API URLs.
url(r'^api/', include(api_router.urls, namespace='api')),
url(r'^api/auth/register/$', RegistrationWithTokenView.as_view(),
name='register'),
url(r'^api/auth/', include('djoser.urls.authtoken')),
url(r'^api/flashcards/', include('manabi.apps.flashcards.api_urls')),
url(r'^api/flashcards/review_results/', include('manabi.apps.review_results.api_urls')),
url(r'^api/subscriptions/', include('manabi.apps.subscriptions.api_urls')),
url(r'^api/furigana/', include('manabi.apps.furigana.urls')),
url(r'^api/twitter_usages/', include('manabi.apps.twitter_usages.urls')),
]
# if not settings.LIVE_HOST:
# urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]
if 'silk' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]
| null |
manabi/urls.py
|
urls.py
|
py
| 3,502 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "manabi.apps.flashcards.api_views.DeckViewSet",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.SynchronizedDeckViewSet",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.SuggestedSharedDecksViewSet",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.SharedDeckViewSet",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.FactViewSet",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.ManabiReaderFactViewSet",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.CardViewSet",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.ReviewAvailabilitiesViewSet",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.flashcards.api_views.NextCardsForReviewViewSet",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "manabi.apps.review_results.api_views.ReviewResultsView",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "manabi.views.views",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "manabi.views",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "manabi.apps.manabi_auth.api_views.RegistrationWithTokenView.as_view",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "manabi.apps.manabi_auth.api_views.RegistrationWithTokenView",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.INSTALLED_APPS",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 93,
"usage_type": "name"
}
] |
337955562
|
import os
import numpy as np
from collections import Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
print("START")
def make_Dictionary(root_dir,size):
all_words = []
emails = [os.path.join(root_dir,f) for f in os.listdir(root_dir)]
for mail in emails:
with open(mail) as m:
for line in m:
words = line.split()
all_words += words
dictionary = Counter(all_words)
list_to_remove = list(dictionary)
for item in list_to_remove:
if item.isalpha() == False:
del dictionary[item]
elif len(item) == 1:
del dictionary[item]
dictionary = dictionary.most_common(size)
return dictionary
def extract_features(mail_dir,size):
files = [os.path.join(mail_dir,fi) for fi in os.listdir(mail_dir)]
features_matrix = np.zeros((len(files),size))
train_labels = np.zeros(len(files))
count = 0;
docID = 0;
for fil in files:
with open(fil) as fi:
for i,line in enumerate(fi):
if i == 2:
words = line.split()
for word in words:
wordID = 0
for i,d in enumerate(dictionary):
if d[0] == word:
wordID = i
features_matrix[docID,wordID] = words.count(word)
train_labels[docID] = 0;
filepathTokens = fil.split('/')
lastToken = filepathTokens[len(filepathTokens) - 1]
if lastToken.startswith("spmsg"):
train_labels[docID] = 1;
count = count + 1
docID = docID + 1
return features_matrix, train_labels
score_Gaussian=[]
score_Bernoulli=[]
TRAIN_DIR = "train-mails/"
TEST_DIR = "test-mails/"
for flag_model in range (1,3,1):
for i in range(1,10000,1000):
dictionary = make_Dictionary(TRAIN_DIR,i)
print("Reading and processing emails from file...")
features_matrix, labels = extract_features(TRAIN_DIR,i)
test_feature_matrix, test_labels = extract_features(TEST_DIR,i)
if flag_model==1:
print("Training model: Gaussian")
model = GaussianNB()
else:
print("Training model: Bernoulli")
model = BernoulliNB()
#train model
model.fit(features_matrix, labels)
predicted_labels = model.predict(test_feature_matrix)
if flag_model==1:
score_Gaussian.append(accuracy_score(test_labels, predicted_labels))
else:
score_Bernoulli.append(accuracy_score(test_labels, predicted_labels))
x = np.arange(1., 10000., 1000)
plot=plt.figure()
plt.scatter(x,score_Gaussian,color='red')
plt.scatter(x,score_Bernoulli,color='blue')
plt.xlabel("Most Common Words")
plt.ylabel("Accuracy Score")
plt.legend(["Gaussian","Bernoulli"])
plt.title("Gaussian vs. Bernoulli Accuracy Scores")
plt.show()
| null |
New folder/Ass1.py
|
Ass1.py
|
py
| 3,076 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.BernoulliNB",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
}
] |
280329285
|
from datetime import datetime
from typing import List, Optional
from fastapi import Depends, HTTPException, Path
from fastapi.param_functions import Query
from fastapi.responses import JSONResponse
from rx import operators as rxops
from api_server.base_app import BaseApp
from api_server.dependencies import pagination_query
from api_server.fast_io import FastIORouter, WatchRequest
from api_server.models import (
CancelTask,
SubmitTask,
SubmitTaskResponse,
Task,
TaskSummary,
User,
)
from api_server.models.pagination import Pagination
from api_server.repositories import RmfRepository
from api_server.routes.utils import rx_watcher
from api_server.services.tasks import convert_task_request
from .dispatcher import DispatcherClient
from .utils import get_task_progress
class TasksRouter(FastIORouter):
def __init__(self, app: BaseApp):
user_dep = app.auth_dep
super().__init__(tags=["Tasks"], user_dep=user_dep)
_dispatcher_client: Optional[DispatcherClient] = None
def dispatcher_client_dep():
nonlocal _dispatcher_client
if _dispatcher_client is None:
_dispatcher_client = DispatcherClient(app.rmf_gateway())
return _dispatcher_client
@self.get("/{task_id}/summary", response_model=TaskSummary)
async def get_task_summary(
rmf_repo: RmfRepository = Depends(app.rmf_repo),
task_id: str = Path(..., description="task_id with '/' replaced with '__'"),
):
"""
Available in socket.io
"""
ts = await rmf_repo.get_task_summary(task_id)
return ts.dict(exclude_none=True)
@self.watch("/{task_id}/summary")
async def watch_task_summary(req: WatchRequest, task_id: str):
await req.emit(await get_task_summary(RmfRepository(req.user), task_id))
rx_watcher(
req,
app.rmf_events().task_summaries.pipe(
rxops.filter(lambda x: x.task_id == task_id),
rxops.map(lambda x: x.dict(exclude_none=True)),
),
)
def to_task(task_summary: TaskSummary):
return Task.construct(
task_id=task_summary.task_id,
authz_grp=task_summary.authz_grp,
progress=get_task_progress(
task_summary,
app.rmf_gateway().now(),
),
summary=task_summary,
)
@self.get("", response_model=List[Task])
async def get_tasks(
rmf_repo: RmfRepository = Depends(app.rmf_repo),
pagination: Pagination = Depends(pagination_query),
task_id: Optional[str] = Query(
None, description="comma separated list of task ids"
),
fleet_name: Optional[str] = Query(
None, description="comma separated list of fleet names"
),
submission_time_since: Optional[datetime] = None,
start_time_since: Optional[datetime] = None,
end_time_since: Optional[datetime] = None,
robot_name: Optional[str] = Query(
None, description="comma separated list of robot names"
),
state: Optional[str] = Query(
None, description="comma separated list of states"
),
task_type: Optional[str] = Query(
None, description="comma separated list of task types"
),
priority: Optional[int] = None,
):
task_summaries = await rmf_repo.query_task_summaries(
pagination,
task_id=task_id,
fleet_name=fleet_name,
submission_time_since=submission_time_since,
start_time_since=start_time_since,
end_time_since=end_time_since,
robot_name=robot_name,
state=state,
task_type=task_type,
priority=priority,
)
return [to_task(t) for t in task_summaries]
@self.post("/submit_task", response_model=SubmitTaskResponse)
async def submit_task(
submit_task_params: SubmitTask,
user: User = Depends(user_dep),
dispatcher_client: DispatcherClient = Depends(dispatcher_client_dep),
):
req_msg, err_msg = convert_task_request(
submit_task_params, app.rmf_gateway().now()
)
if err_msg:
raise HTTPException(422, err_msg)
rmf_resp = await dispatcher_client.submit_task_request(user, req_msg)
if not rmf_resp.success:
raise HTTPException(422, rmf_resp.message)
return {"task_id": rmf_resp.task_id}
@self.post("/cancel_task")
async def cancel_task(
task: CancelTask,
user: User = Depends(user_dep),
dispatcher_client: DispatcherClient = Depends(dispatcher_client_dep),
):
cancel_status = await dispatcher_client.cancel_task_request(task, user)
return JSONResponse(content={"success": cancel_status})
| null |
packages/api-server/api_server/routes/tasks/tasks.py
|
tasks.py
|
py
| 5,255 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "api_server.fast_io.FastIORouter",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "api_server.base_app.BaseApp",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "dispatcher.DispatcherClient",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "dispatcher.DispatcherClient",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "api_server.repositories.RmfRepository",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "fastapi.Path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "api_server.models.TaskSummary",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "api_server.fast_io.WatchRequest",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "api_server.repositories.RmfRepository",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "api_server.routes.utils.rx_watcher",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "rx.operators.filter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "rx.operators",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "rx.operators.map",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "rx.operators",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "api_server.models.TaskSummary",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "api_server.models.Task.construct",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "api_server.models.Task",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "utils.get_task_progress",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "api_server.repositories.RmfRepository",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "api_server.models.pagination.Pagination",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "api_server.dependencies.pagination_query",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "fastapi.param_functions.Query",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "fastapi.param_functions.Query",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "fastapi.param_functions.Query",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "fastapi.param_functions.Query",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "fastapi.param_functions.Query",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "api_server.models.Task",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "api_server.models.SubmitTask",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "api_server.models.User",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "dispatcher.DispatcherClient",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "api_server.services.tasks.convert_task_request",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "api_server.models.SubmitTaskResponse",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "api_server.models.CancelTask",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "api_server.models.User",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "dispatcher.DispatcherClient",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "fastapi.responses.JSONResponse",
"line_number": 139,
"usage_type": "call"
}
] |
271667268
|
import copy
import datetime
import unittest
import cfdm
# Note: it is important we test on the cfdm logging config rather than the
# generic Python module logging (i.e. 'cfdm.logging' not just 'logging').
# Also, mimic the use in the codebase by using a module-specific logger:
log_name = __name__
logger = cfdm.logging.getLogger(log_name)
class dummyClass:
'''Dummy class acting as container to test methods as proper instance
methods, mirroring their context in the codebase.
'''
def __init__(self):
self._list = [1]
self.debug_message = "A major clue to solving the evasive bug"
self.detail_message = "In practice this will be very detailed."
self.info_message = "This should be short and sweet"
self.warning_message = "Best pay attention to this!"
def copy(self):
return copy.deepcopy(self) # note a shallow copy is not sufficient
def func(self, inplace):
'''Dummy function to do something trivial to a mutable object,
potentially in-place as toggled by an in-place flag.
'''
if inplace:
d = self
else:
d = self.copy()
d._list.append(2)
if inplace:
d = None
return d
@cfdm.decorators._inplace_enabled(False)
def decorated_func(self, inplace):
'''Dummy function equivalent to 'func' but a decorator manages the
logic to specify and conduct in-place operation.
'''
d = cfdm.decorators._inplace_enabled_define_and_cleanup(self)
d._list.append(2)
return d
@cfdm.decorators._manage_log_level_via_verbosity
def decorated_logging_func(self, verbose=None):
logger.debug(self.debug_message)
logger.detail(self.detail_message)
logger.info(self.info_message)
logger.warning(self.warning_message)
# --- End: class
class DecoratorsTest(unittest.TestCase):
def setUp(self):
self.test_only = []
def test_inplace_enabled(self):
if self.test_only and inspect.stack()[0][3] not in self.test_only:
return
# Note we must initiate separate classes as a list is mutable:
test_class_1 = dummyClass()
test_class_2 = dummyClass()
# Test when not in-place
res_1 = test_class_1.func(inplace=False)
res_2 = test_class_2.decorated_func(inplace=False)
self.assertEqual(test_class_1._list, test_class_2._list)
self.assertEqual(test_class_2._list, [1]) # as original list untouched
self.assertEqual(res_1._list, res_2._list)
self.assertEqual(res_2._list, [1, 2]) # as return d copy, not original
# Test when in-place
res_3 = test_class_1.func(inplace=True)
res_4 = test_class_2.decorated_func(inplace=True)
self.assertEqual(test_class_1._list, test_class_2._list)
# As do the operation in-place on the original (class) list object:
self.assertEqual(test_class_2._list, [1, 2])
self.assertEqual(res_3, res_4)
self.assertEqual(res_4, None) # as return None if inplace=True
def test_manage_log_level_via_verbosity(self):
if self.test_only and inspect.stack()[0][3] not in self.test_only:
return
test_class = dummyClass()
# Order of decreasing severity/verbosity is crucial to one test below
levels = ['WARNING', 'INFO', 'DETAIL', 'DEBUG']
# Note we test assertions on the root logger object, which is the
# one output overall at runtime, but the specific module logger name
# should be registered within the log message:
log_message = [
'WARNING:{}:{}'.format(log_name, test_class.warning_message),
'INFO:{}:{}'.format(log_name, test_class.info_message),
'DETAIL:{}:{}'.format(log_name, test_class.detail_message),
'DEBUG:{}:{}'.format(log_name, test_class.debug_message)
]
for level in levels:
cfdm.log_level(level) # reset to level
# Default verbose(=None) cases: log_level should determine output
with self.assertLogs(level=cfdm.log_level()) as catch:
test_class.decorated_logging_func()
for msg in log_message:
# log_level should prevent messages less severe appearing:
if levels.index(level) >= log_message.index(msg):
self.assertIn(msg, catch.output)
else: # less severe, should be effectively filtered out
self.assertNotIn(msg, catch.output)
# Cases where verbose is set; value should override log_level...
# Highest verbosity case (note -1 == 'DEBUG', highest verbosity):
# all messages should appear, regardless of global log_level:
for argument in (-1, 'DEBUG', 'debug', 'Debug', 'DeBuG'):
with self.assertLogs(level=cfdm.log_level()) as catch:
test_class.decorated_logging_func(verbose=argument)
for msg in log_message:
self.assertIn(msg, catch.output)
# Lowest verbosity case ('WARNING' / 1) excluding special case of
# 'DISABLE' (see note above): only warning messages should appear,
# regardless of global log_level value set:
for argument in (1, 'WARNING', 'warning', 'Warning', 'WaRning'):
with self.assertLogs(level=cfdm.log_level()) as catch:
test_class.decorated_logging_func(verbose=argument)
for msg in log_message:
if msg.split(":")[0] == 'WARNING':
self.assertIn(msg, catch.output)
else:
self.assertNotIn(msg, catch.output)
# Boolean cases for testing backwards compatibility...
# ... verbose=True should be equivalent to verbose=3 now:
with self.assertLogs(level=cfdm.log_level()) as catch:
test_class.decorated_logging_func(verbose=True)
for msg in log_message:
if msg.split(":")[0] == 'DEBUG':
self.assertNotIn(msg, catch.output)
else:
self.assertIn(msg, catch.output)
# ... verbose=False should be equivalent to verbose=0 now, so
# test along with 'DISABLE' special case below...
# Special 'DISABLE' (0) case: note this needs to be last as we
# reset the log_level to it but need to use 'NOTSET' for the
# assertLogs level, which sends all log messages through:
for argument in (0, 'DISABLE', 'disable', 'Disable', 'DisAblE'):
with self.assertLogs(level='NOTSET') as catch:
# Note: get 'AssertionError' if don't log anything at all,
# so to avoid this and allow check for disabled logging,
# first log something then disable and check that no other
# messages emerge:
logger.info(
"Purely to keep 'assertLog' happy: see comment!")
cfdm.log_level('DISABLE')
test_class.decorated_logging_func(verbose=argument)
for msg in log_message: # nothing else should be logged
self.assertNotIn(msg, catch.output)
# verbose=False should be equivalent in behaviour to verbose=0
with self.assertLogs(level='NOTSET') as catch:
logger.info("Purely to keep 'assertLog' happy: see previous!")
test_class.decorated_logging_func(verbose=False)
for msg in log_message: # nothing else should be logged
self.assertNotIn(msg, catch.output)
# --- End: class
if __name__ == '__main__':
print('Run date:', datetime.datetime.now())
cfdm.environment()
print('')
unittest.main(verbosity=2)
| null |
cfdm/test/test_decorators.py
|
test_decorators.py
|
py
| 8,075 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cfdm.logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cfdm.logging",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cfdm.decorators._inplace_enabled_define_and_cleanup",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cfdm.decorators",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cfdm.decorators._inplace_enabled",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cfdm.decorators",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cfdm.decorators",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cfdm.log_level",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "cfdm.log_level",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "cfdm.log_level",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cfdm.log_level",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "cfdm.log_level",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "cfdm.log_level",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "cfdm.environment",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 194,
"usage_type": "call"
}
] |
99899365
|
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
use_gpu = torch.cuda.is_available()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
import ipdb; ipdb.set_trace()
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
if use_gpu:
model_ft = model_ft.cuda()
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
| null |
transfer_learning.py
|
transfer_learning.py
|
py
| 2,245 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "ipdb.set_trace",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 80,
"usage_type": "name"
}
] |
515700353
|
from typing import Dict
# This creates a json model to convert sql queries.
class Task():
def __init__(
self,
taskUserId,
taskId,
title,
taskDescription,
dateCreated,
completed,
):
self.taskUserId = taskUserId
self.taskId = taskId
self.title = title
self.taskDescription = taskDescription
self.dateCreated = dateCreated
self.completed = completed
def get(self):
return "this is a test"
def getDict(self) -> Dict:
return {
"taskId": self.taskId,
"taskUserId": self.taskUserId,
"title": self.title,
"taskDescription": self.taskDescription,
"dateCreated": self.dateCreated,
"completed": self.completed
}
| null |
functions/Model/Task.py
|
Task.py
|
py
| 831 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.Dict",
"line_number": 26,
"usage_type": "name"
}
] |
215851569
|
# MIT License
#
# Copyright (c) 2019 Oz N Tiram
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This code contains a significant portion of the stencil template engine
# This code contains a significant portion of the confucius code both are
# Copyright (c) 2016 Curtis Maloney
"""
Example usage::
from chick import Chick
chick = Chick()
@chick.get("/")
def index(environ):
return [b"Hello World!\n"]
@chick.post("/input/")
def test_post(environ):
r = ''.join(('{} {}\n'. format(k, v) for
k, v in environ.items())).encode()
return [r]
if __name__ == "__main__":
chick = Chick()
from wsgiref.simple_server import make_server
httpd = make_server('', 8000, chick)
print("Serving on port 8000...")
# Serve until process is killed
httpd.serve_forever()
"""
import html
import importlib
import re
import token
import tokenize
from os import getenv
from typing import get_type_hints
from io import StringIO
from pathlib import Path
from collections import defaultdict, deque, namedtuple, ChainMap
TOK_COMMENT = "comment"
TOK_TEXT = "text"
TOK_VAR = "var"
TOK_BLOCK = "block"
tag_re = re.compile(
r"{%\s*(?P<block>.+?)\s*%}|{{\s*(?P<var>.+?)\s*}}|{#\s*(?P<comment>.+?)\s*#}", # noqa
re.DOTALL)
Token = namedtuple("Token", "type content")
class SafeStr(str):
__safe__ = True
def __str__(self):
return self
def tokenise(template):
upto = 0
for m in tag_re.finditer(template):
start, end = m.span()
if upto < start:
yield Token(TOK_TEXT, template[upto:start])
upto = end
mode = m.lastgroup
yield Token(mode, m[mode].strip())
if upto < len(template):
yield Token(TOK_TEXT, template[upto:])
class TemplateLoader(dict):
def __init__(self, paths):
self.paths = [Path(path).resolve() for path in paths]
def load(self, name, encoding="utf8"):
for path in self.paths:
full_path = path / name
if full_path.is_file():
return Template(full_path.read_text(encoding), loader=self,
name=name)
raise LookupError(name)
def __missing__(self, key):
self[key] = tmpl = self.load(key)
return tmpl
class Context(ChainMap):
def __init__(self, *args, escape=html.escape):
super().__init__(*args)
self.maps.append({"True": True, "False": False, "None": None})
self.escape = escape
def push(self, data=None):
self.maps.insert(0, data or {})
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.maps.pop(0)
class Nodelist(list):
def render(self, context, output):
for node in self:
node.render(context, output)
def nodes_by_type(self, node_type):
for node in self:
if isinstance(node, node_type):
yield node
if isinstance(node, BlockNode):
yield from node.nodes_by_type(node_type)
class Template:
def __init__(self, src, loader=None, name=None):
self.tokens, self.loader = tokenise(src), loader
self.name = name # So we can report where the fault was
self.nodelist = self.parse_nodelist([])
def parse(self):
for tok in self.tokens:
if tok.type == TOK_TEXT:
yield TextTag(tok.content)
elif tok.type == TOK_VAR:
yield VarTag(tok.content)
elif tok.type == TOK_BLOCK:
m = re.match(r"\w+", tok.content)
if not m:
raise SyntaxError(tok)
yield BlockNode.__tags__[
m.group(0)].parse(tok.content[m.end(0):].strip(), self)
def parse_nodelist(self, ends):
nodelist = Nodelist()
try:
node = next(self.parse())
while node.name not in ends:
nodelist.append(node)
node = next(self.parse())
except StopIteration:
node = None
nodelist.endnode = node
return nodelist
def render(self, context, output=None):
if not isinstance(context, Context):
context = Context(context)
if output is None:
dest = StringIO()
else:
dest = output
self.nodelist.render(context, dest)
if output is None:
return dest.getvalue()
class AstLiteral:
def __init__(self, arg):
self.arg = arg
def resolve(self, context):
return self.arg
class AstContext:
def __init__(self, arg):
self.arg = arg
def resolve(self, context):
return context.get(self.arg, "")
class AstLookup:
def __init__(self, left, right):
self.left = left
self.right = right
def resolve(self, context):
left = self.left.resolve(context)
right = self.right.resolve(context)
return left[right]
class AstAttr:
def __init__(self, left, right):
self.left = left
self.right = right
def resolve(self, context):
left = self.left.resolve(context)
return getattr(left, self.right, "")
class AstCall:
def __init__(self, func):
self.func = func
self.args = []
def add_arg(self, arg):
self.args.append(arg)
def resolve(self, context):
func = self.func.resolve(context)
args = [arg.resolve(context) for arg in self.args]
return func(*args)
class Expression:
def __init__(self, source):
self.tokens = tokenize.generate_tokens(StringIO(source).readline)
self.next() # prime the first token
def next(self):
self.current = next(self.tokens)
return self.current
@staticmethod
def parse(s):
p = Expression(s)
result = p._parse()
if p.current.exact_type not in (token.NEWLINE, token.ENDMARKER):
raise SyntaxError(f"Parse ended unexpectedly: {p.current}")
return result
def parse_kwargs(self):
kwargs = {}
tok = self.current
while tok.exact_type != token.ENDMARKER:
if tok.exact_type == token.NEWLINE:
tok = self.next()
continue
if tok.exact_type != token.NAME:
raise SyntaxError(f"Expected name, found {tok}")
name = tok.string
tok = self.next()
if tok.exact_type != token.EQUAL:
raise SyntaxError(f"Expected =, found {tok}")
tok = self.next()
kwargs[name] = self._parse()
tok = self.next()
return kwargs
def _parse(self):
tok = self.current
if tok.exact_type in (token.ENDMARKER, token.COMMA):
return # TODO
if tok.exact_type == token.STRING:
self.next()
return AstLiteral(tok.string[1:-1])
if tok.exact_type == token.NUMBER:
self.next()
try:
value = int(tok.string)
except ValueError:
value = float(tok.string)
return AstLiteral(value)
if tok.exact_type == token.NAME:
state = AstContext(tok.string)
while True:
tok = self.next()
if tok.exact_type == token.DOT:
tok = self.next()
if tok.exact_type != token.NAME:
raise SyntaxError(f"Invalid attr lookup: {tok}")
state = AstAttr(state, tok.string)
elif tok.exact_type == token.LSQB:
self.next()
right = self._parse()
state = AstLookup(state, right)
if self.current.exact_type != token.RSQB:
raise SyntaxError(
f"Expected ] but found {self.current}")
elif tok.exact_type == token.LPAR:
state = AstCall(state)
self.next()
while self.current.exact_type != token.RPAR:
arg = self._parse()
state.add_arg(arg)
if self.current.exact_type != token.COMMA:
break
self.next()
if self.current.exact_type != token.RPAR:
raise SyntaxError(
f"Expected ( but found {self.current}")
self.next()
else:
break
return state
raise SyntaxError(
(f"Error parsing expression {tok.line !r}: Unexpected token "
"{tok.string!r} at position {tok.start[0]}.")
)
class Node:
name = None
def __init__(self, content):
self.content = content
def render(self, context, output):
pass # pragma: no cover
class TextTag(Node):
def render(self, context, output):
output.write(self.content)
class VarTag(Node):
def __init__(self, content):
self.expr = Expression.parse(content)
def render(self, context, output):
value = str(self.expr.resolve(context))
if not getattr(value, '__safe__', False):
value = context.escape(value)
output.write(value)
class BlockNode(Node):
__tags__ = {}
child_nodelists = ("nodelist",)
def __init_subclass__(cls, *, name):
super().__init_subclass__()
cls.name = name
BlockNode.__tags__[name] = cls
return cls
@classmethod
def parse(cls, content, parser):
return cls(content)
def nodes_by_type(self, node_type):
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
yield from nodelist.nodes_by_type(node_type)
class ForTag(BlockNode, name="for"):
child_nodelists = ("nodelist", "elselist")
def __init__(self, argname, iterable, nodelist, elselist):
self.argname, self.iterable, self.nodelist, self.elselist = \
argname, iterable, nodelist, elselist
@classmethod
def parse(cls, content, parser):
argname, iterable = content.split(" in ", 1)
nodelist = parser.parse_nodelist({"endfor", "else"})
elselist = parser.parse_nodelist({"endfor"}) if \
nodelist.endnode.name == "else" else None
return cls(argname.strip(),
Expression.parse(iterable.strip()), nodelist, elselist)
def render(self, context, output):
iterable = self.iterable.resolve(context)
if iterable:
with context.push():
for idx, item in enumerate(iterable):
context.update({"loopcounter": idx, self.argname: item})
self.nodelist.render(context, output)
elif self.elselist:
self.elselist.render(context, output)
class ElseTag(BlockNode, name="else"):
pass
class EndforTag(BlockNode, name="endfor"):
pass
class IfTag(BlockNode, name="if"):
child_nodelists = ("nodelist", "elselist")
def __init__(self, condition, nodelist, elselist):
condition, inv = re.subn(r"^not\s+", "", condition, count=1)
self.inv, self.condition = bool(inv), Expression.parse(condition)
self.nodelist, self.elselist = nodelist, elselist
@classmethod
def parse(cls, content, parser):
nodelist = parser.parse_nodelist({"endif", "else"})
elselist = parser.parse_nodelist({"endif"}) if \
nodelist.endnode.name == "else" else None
return cls(content, nodelist, elselist)
def render(self, context, output):
if self.test_condition(context):
self.nodelist.render(context, output)
elif self.elselist:
self.elselist.render(context, output)
def test_condition(self, context):
return self.inv ^ bool(self.condition.resolve(context))
class EndifTag(BlockNode, name="endif"):
pass
class IncludeTag(BlockNode, name="include"):
def __init__(self, template_name, kwargs, loader):
self.template_name, self.kwargs, self.loader = \
template_name, kwargs, loader
@classmethod
def parse(cls, content, parser):
if parser.loader is None:
raise RuntimeError(
"Can't use {% include %} without a bound Loader")
tokens = Expression(content)
template_name = tokens._parse()
kwargs = tokens.parse_kwargs()
return cls(template_name, kwargs, parser.loader)
def render(self, context, output):
name = self.template_name.resolve(context)
tmpl = self.loader[name]
kwargs = {key: expr.resolve(context) for
key, expr in self.kwargs.items()}
ctx = context.new_child(kwargs)
tmpl.render(ctx, output)
class LoadTag(BlockNode, name="load"):
@classmethod
def parse(cls, content, parser):
importlib.import_module(content)
return cls(None)
class ExtendsTag(BlockNode, name="extends"):
def __init__(self, parent, loader, nodelist):
self.parent, self.loader, self.nodelist = parent, loader, nodelist
@classmethod
def parse(cls, content, parser):
parent = Expression.parse(content)
nodelist = parser.parse_nodelist([])
return cls(parent, parser.loader, nodelist)
def render(self, context, output):
parent = self.loader[self.parent.resolve(context)]
block_context = getattr(context, "block_context", None)
if block_context is None:
block_context = context.block_context = defaultdict(deque)
for block in self.nodelist.nodes_by_type(BlockTag):
block_context[block.block_name].append(block)
if parent.nodelist[0].name != "extends":
for block in parent.nodelist.nodes_by_type(BlockTag):
block_context[block.block_name].append(block)
parent.render(context, output)
class BlockTag(BlockNode, name="block"):
def __init__(self, name, nodelist):
self.block_name, self.nodelist = name, nodelist
@classmethod
def parse(cls, content, parser):
m = re.match(r"\w+", content)
if not m:
raise ValueError(f'Invalid block label: {content !r}')
name = m.group(0)
nodelist = parser.parse_nodelist({"endblock"})
return cls(name, nodelist)
def render(self, context, output):
self.context = context
self.output = output
self._render()
def _render(self):
block_context = getattr(self.context, "block_context", None)
if not block_context:
block = self
else:
block = block_context[self.block_name].popleft()
with self.context.push({"block": self}):
block.nodelist.render(self.context, self.output)
if block_context:
block_context[self.block_name].appendleft(block)
@property
def super(self):
self._render()
return ""
class EndBlockTag(BlockNode, name="endblock"):
pass
class WithTag(BlockNode, name="with"):
def __init__(self, kwargs, nodelist):
self.kwargs, self.nodelist = kwargs, nodelist
@classmethod
def parse(cls, content, parser):
kwargs = Expression(content).parse_kwargs()
nodelist = parser.parse_nodelist({"endwith"})
return cls(kwargs, nodelist)
def render(self, context, output):
kwargs = {key: value.resolve(context) for
key, value in self.kwargs.items()}
with context.push(kwargs):
self.nodelist.render(context, output)
class EndWithTag(BlockNode, name="endwith"):
pass
class CaseTag(BlockNode, name="case"):
def __init__(self, term, nodelist):
self.term, self.nodelist = term, nodelist
@classmethod
def parse(cls, content, parser):
term = Expression.parse(content)
nodelist = parser.parse_nodelist(["endcase"])
else_found = False
for node in nodelist:
if node.name not in {"when", "else"}:
raise SyntaxError(
f"Only 'when' and 'else' allowed as "
"children of case. Found: {node}")
if node.name == "else":
if else_found:
raise SyntaxError("Case tag can only have one else child")
else_found = True
nodelist.sort(key=lambda x: x.name, reverse=True)
return cls(term, nodelist)
def render(self, context, output):
value = self.term.resolve(context)
for node in self.nodelist:
if node.name == "when":
other = node.term.resolve(context)
else:
other = value
if value == other:
node.render(context, output)
return
class WhenTag(BlockNode, name="when"):
def __init__(self, term, nodelist):
self.term, self.nodelist = term, nodelist
@classmethod
def parse(cls, content, parser):
term = Expression.parse(content)
nodelist = parser.parse_nodelist()
return cls(term, nodelist)
def render(self, context, output):
self.nodelist.render(context, output)
class EndCaseTag(BlockNode, name="endcase"):
pass
class MetaConfig(type):
__types__ = {
bool: lambda v: str(v).lower() in {'yes', 'y', 't', 'true', '1', 'on'},
}
def __new__(cls, name, bases, namespace, **kwargs):
namespace['__slots__'] = ()
types = {}
attr_types = {}
# Walk the parents and collate:
# - all the __types__ dicts.
# - all the attribute types
for parent in reversed(bases):
types.update(getattr(parent, '__types__', {}))
attr_types.update({
k: v
for k, v in get_type_hints(parent).items()
if k.isupper()
})
types.update(namespace.get('__types__', {}))
namespace['__types__'] = types
new_cls = type.__new__(cls, name, bases, namespace, **kwargs)
# Validate we don't re-type anything
for k, v in get_type_hints(new_cls).items():
if not k.isupper() or k not in attr_types:
continue
assert v == attr_types[k], \
(f"Type of locally declared {k} ({v}) "
"does not match parent ({attr_types[k]})")
return new_cls
def __call__(cls):
raise TypeError(
f'Can not create instance of singleton config {cls.__name__}')
def as_dict(cls):
return {
key: getattr(cls, key)
for key in dir(cls)
if key.isupper()
}
def __getattribute__(cls, key):
if not key.isupper():
return object.__getattribute__(cls, key)
raw = super().__getattribute__(key)
_type = get_type_hints(cls).get(key, None)
if callable(raw):
raw = raw(cls)
_type = cls.__types__.get(_type, _type)
value = getenv(key, raw)
if _type is not None:
value = _type(value)
return value
def module_getattr_factory(cls):
"""
Factory function to build a module-level __getattr__ for tools (like
Django) which need a whole-module settings.
__getattr__ = config_getattr(config)
"""
def __getattr__(name):
return getattr(cls, name)
return __getattr__
class BaseConfig(object, metaclass=MetaConfig):
"""Base Config class
example usage::
class Config(BaseConfig):
HOST = '127.0.0.1'
PORT : int = 8000
DEBUG : bool = False
print(Config.HOST)
$ python app.py
127.0.0.1
HOST=my.example.org python app.py
my.example.com
"""
class AttrView(dict):
"""
A Read Only object that recursively builds itself from a dict
and allows easy access to attributes
"""
def __init__(self, d=None):
d = d or {}
super(AttrView, self).__init__(d)
for k, v in d.items():
if isinstance(v, dict):
self.__dict__[k] = AttrView(v)
else:
self.__dict__[k] = v
def __getattr__(self, attr):
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def __setitem__(self, key, value):
raise AttributeError("%s is read only" % key)
def __setattr__(self, attr, value):
raise AttributeError("%s is read only" % attr)
class AttrDict(dict):
"""
An object that recursively builds itself from a dict
and allows easy access to attributes
"""
def __init__(self, d=None):
d = d or {}
super(AttrDict, self).__init__(d)
for k, v in d.items():
if isinstance(v, dict):
self.__dict__[k] = AttrDict(v)
else:
self.__dict__[k] = v
def __getattr__(self, attr):
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def __setitem__(self, key, value):
super(AttrDict, self).__setitem__(key, value)
self.__dict__[key] = value
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
class DictResolver:
"""
A dumb route resolver
"""
def __init__(self):
self.routes = {}
def add(self, path, wrapped, method):
self.routes[path] = (wrapped, method)
def get(self, path):
return self.routes[path]
class Chick:
"""
A pico WSGI Application framework with API inspired by Bottle and Flask.
There is No HTTPRequest Object and No HTTPResponse object.
Routing is done via a dictionary lookup which means your application has
constant lookup time regardless of the amount of routes.
Just barebone routing ...
Args:
reolver (object) - A resolver instance. You can use any see for example
webob.routing or https://github.com/andreypopp/routr
"""
def __init__(self, resolver=None):
if not resolver:
self.resolver = DictResolver()
def __call__(self, environ, start_response):
try:
callback, method = self.resolver.get(environ.get('PATH_INFO'))
except (TypeError, KeyError):
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'404 Not Found']
if method != environ.get('REQUEST_METHOD'):
start_response('405 Method Not Allowed',
[('Content-Type', 'text/plain')])
return [b'404 Method Not Allowed']
start_response('200 OK', [('Content-Type', 'text/plain')])
return callback(environ)
def add_route(self, path, wrapped, method):
self.resolver.add(path, wrapped, method)
def get(self, path):
def decorator(wrapped):
self.add_route(path, wrapped, 'GET')
return wrapped
return decorator
def post(self, path):
def decorator(wrapped):
self.add_route(path, wrapped, 'POST')
return wrapped
return decorator
| null |
examples/chick.py
|
chick.py
|
py
| 24,466 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.compile",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "collections.ChainMap",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "html.escape",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "tokenize.generate_tokens",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "token.NEWLINE",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "token.ENDMARKER",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "token.ENDMARKER",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "token.NEWLINE",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "token.NAME",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "token.EQUAL",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "token.ENDMARKER",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "token.COMMA",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "token.STRING",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "token.NUMBER",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "token.NAME",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "token.DOT",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "token.NAME",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "token.LSQB",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "token.RSQB",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "token.LPAR",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "token.RPAR",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "token.COMMA",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "token.RPAR",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "re.subn",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 508,
"usage_type": "argument"
},
{
"api_name": "re.match",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "typing.get_type_hints",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "typing.get_type_hints",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "typing.get_type_hints",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 685,
"usage_type": "call"
}
] |
491013602
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from .forms import NumberForm
# Create your views here.
def index(request):
if request.method == 'GET':
form = NumberForm(request.GET)
if form.is_valid():
data = form.cleaned_data
number = data['number']
return HttpResponseRedirect(number)
else:
form = NumberForm()
return render(request, 'calculator/index.html', {'form': form})
def count(request, number):
return render(request, 'calculator/count.html', {'range': range(int(number))})
| null |
calculator/views.py
|
views.py
|
py
| 633 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "forms.NumberForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "forms.NumberForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
}
] |
374535707
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int8
from std_msgs.msg import String
import serial
import binascii
import ema.libs.yei.threespace_api as ts_api
import time
def teste():
rospy.init_node('imu', anonymous=False)
pub = rospy.Publisher('imu/data', String, queue_size=10)
number_of_sensors = 1
p = False # print?
command = 0
addresses = [7,8,2,3,4,5,1]
portIMU = '/dev/ttyACM0'
serial_port = serial.Serial(port=portIMU, baudrate=115200, timeout=0.001)
time.sleep(0.1)
serial_port.flush()
time.sleep(0.1)
# Set streaming slots
for i in range(len(addresses)):
msg = '>'+str(addresses[i])+',80,'+str(command)+',255,255,255,255,255,255,255\n'
print(msg)
serial_port.write(msg)
time.sleep(0.1)
out = ''
while serial_port.inWaiting():
out += '>> ' + serial_port.read(serial_port.inWaiting())
print(out)
out = ''
# Start streaming
for i in range(len(addresses)):
serial_port.write('>'+str(addresses[i])+',85\n')
time.sleep(0.1)
while serial_port.inWaiting():
out = '>> ' + serial_port.read(serial_port.inWaiting())
print('Start')
# define loop rate (in hz)
rate = rospy.Rate(200)
# node loop
while not rospy.is_shutdown():
bytes_to_read = serial_port.inWaiting()
if bytes_to_read > 0:
data = serial_port.read(bytes_to_read)
id_str = binascii.hexlify(data[1].encode('utf-8'))
if p:
print(id_str)
try:
id = int(id_str)
pub.publish(str(id))
except ValueError:
print('NAN')
pub.publish('NAN')
# sleep until it's time to work again
rate.sleep()
if __name__ == '__main__':
try:
teste()
except rospy.ROSInterruptException:
pass
| null |
scripts/imu_noapi_node.py
|
imu_noapi_node.py
|
py
| 1,951 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rospy.init_node",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.String",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "serial.Serial",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "rospy.Rate",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "rospy.is_shutdown",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "binascii.hexlify",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "rospy.ROSInterruptException",
"line_number": 82,
"usage_type": "attribute"
}
] |
85868928
|
import os
import sys
from PIL import Image
# 将输入路径的上两级路径加入系统
def set_projectpath(current_path):
curPath = os.path.abspath(current_path)
# curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
rootPath = os.path.split(rootPath)[0]
sys.path.append(rootPath)
def concatImage(images, mode="L"):
if not isinstance(images, list):
raise Exception('images must be a list ')
count = len(images)
size = Image.fromarray(images[0]).size
target = Image.new(mode, (size[0] * count, size[1] * 1))
for i in range(count):
image = Image.fromarray(images[i]).resize(size, Image.BILINEAR)
target.paste(image, (i * size[0], 0, (i + 1) * size[0], size[1]))
return target
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
| null |
utiles/utils.py
|
utils.py
|
py
| 1,194 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.Image.BILINEAR",
"line_number": 24,
"usage_type": "attribute"
}
] |
283701748
|
from decimal import Decimal
from typing import Optional, List
from pydantic import BaseModel, condecimal, ValidationError
import stackprinter
#================================================================================
#================================================================================
class Order:
def __init__(self, price: float):
try:
price = Decimal(price)
self.price = price.quantize(Decimal('0.01'))
except Exception as e:
print(stackprinter.format(e, style="darkbg2"))
self.validated_data = self._validate()
def _validate(self):
try:
pydantic = OrderPydantic(price=self.price)
return pydantic.dict()
except ValidationError as e:
raise e
@property
def data(self):
return self.validated_data
class OrderPydantic(BaseModel):
price: condecimal(multiple_of=Decimal('0.01'))
opt: Optional[str]
#================================================================================
#================================================================================
class Orders:
def __init__(self, orders: List[Order]):
self.orders = orders
self.validated_data = self._validate()
def _validate(self):
try:
pydantic = _OrdersPydantic(data=self.orders)
return pydantic.dict()["data"]
except ValidationError as e:
raise e
@property
def data(self):
return self.validated_data
class _OrdersPydantic(BaseModel):
data: List[OrderPydantic]
#================================================================================
#================================================================================
# pydord = OrderPydantic(price='130.44')
# print(pydord)
# order = Order(price=130.33442534325)._validate()
# print(order)
# testorders = Orders(orders=[order, order, order])
# print(testorders.data)
class BaseError(Exception):
def __init__(self, endpoint: str, data: dict):
self.endpoint = endpoint
self.data = data
self.accept = True
self.sleep = None
msg = f"Endpoint: {self.endpoint} - Data: {self.data}"
super().__init__(msg)
class DDoSProtection(BaseError):
accept = False
sleep = 60
# try:
# raise BaseError(endpoint="kraken.com", data={"orderID": "lol"})
# except Exception as e:
# print(stackprinter.format(e, style="darkbg2"))
if True:
err = BaseError(endpoint="kraken.com", data={"1 == 2"})
print("message: ", err)
raise err
# raise ValueError("Constant Value not accepted in conditional statement")
| null |
src/noobit/models/test.py
|
test.py
|
py
| 2,680 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "decimal.Decimal",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "stackprinter.format",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pydantic.dict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pydantic.ValidationError",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pydantic.condecimal",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pydantic.dict",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pydantic.ValidationError",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 62,
"usage_type": "name"
}
] |
145207432
|
# 导入类
from __future__ import print_function
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
from torch.optim import SGD
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
# 判断是否有此安装包
try:
from tensorboardX import SummaryWriter
except ImportError:
raise RuntimeError("No tensorboardX package is found")
from ignite.engine import Events, create_supervised_evaluator,create_supervised_trainer
from ignite.metrics import Accuracy, Loss
# 定义网络结构
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d() # 这个不熟
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
# 这里应该全在self,用nn定义
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320) # [64, 320] [batch, 320]
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1) # 需要记录
# 获取Dataloader
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize([0.1307], [0.3081])])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
# 建立Summarywriter
def create_summary_writer(model, data_loader, log_dir):
writer = SummaryWriter(logdir= log_dir)
# 这一句不是很明白
data_loader_iter = iter(data_loader)
x, y = next(data_loader_iter)
try:
writer.add_graph(model, x)
except Exception as e:
print("Failed to save model graph:{}".format(e))
return writer
# 定义run函数
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) # Dataloader实例化
model = Net() # 网络模型
writer = create_summary_writer(model, train_loader, log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) # 优化器
# 定义trainer,传入,model、optimizer、loss、device实例化
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy(),
"nll":Loss(F.nll_loss)},
device=device)
# 定义触发事件
@trainer.on(Events.ITERATION_COMPLETED)
def log_train_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
.format(engine.state.epoch, iter, len(train_loader), engine.state.output)) # 这里的engine.state.output 不是很明白
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration) # iteration是总共的迭代次数
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics #
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print("Training Results -Epoch:{} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=2,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument("--log_dir", type=str, default="tensorboard_logs",
help="log directory for Tensorboard log output")
args = parser.parse_args()
#run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum,
#args.log_interval, args.log_dir)
a = Net()
for i in a.named_parameters():
print(i[0])
print(a.named_parameters())
| null |
templete/MNIST_tensorboardX.py
|
MNIST_tensorboardX.py
|
py
| 6,147 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.dropout",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "ignite.engine.create_supervised_trainer",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "ignite.engine.create_supervised_evaluator",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "ignite.metrics.Accuracy",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "ignite.metrics.Loss",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "ignite.engine.Events.ITERATION_COMPLETED",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "ignite.engine.Events",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "ignite.engine.Events.EPOCH_COMPLETED",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "ignite.engine.Events",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "ignite.engine.Events.EPOCH_COMPLETED",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "ignite.engine.Events",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 118,
"usage_type": "call"
}
] |
541164479
|
from l3addr import L3Addr
from utils import maskToHostMask, maskToInt
#from icecream import ic
class L3Interface:
def __init__(self, number: int, addr: str, mask_numbits: int):
self._number = number
self._mask = maskToInt(mask_numbits)
#print(self._mask)
self._mask_numbits = mask_numbits
self._addr = L3Addr(addr)
def get_number(self) -> int:
return self._number
def get_netaddr(self) -> L3Addr:
mask = maskToHostMask(self._mask_numbits)
return L3Addr(self._addr.as_int() & ~mask)
def get_directed_bcast_addr(self) -> L3Addr:
host_mask = maskToHostMask(self._mask_numbits)
# host_mask is all 1 bits in the host part -- which is the same as a bcast value!
host_bcast = host_mask | self._addr.as_int()
return L3Addr(host_bcast)
def get_mask(self):
return self._mask_numbits
def get_mask_as_int(self):
return self._mask
def on_same_network(self, addr: L3Addr) -> bool:
'''return True if the given addr is on this interface's network.'''
if self._addr.network_part_as_int(self._mask_numbits) == addr.network_part_as_int(self._mask_numbits):
return True
else:
False
def get_addr(self):
return self._addr
def __str__(self):
return f"Iface<{self._number}: {self._addr.as_str()}/{self._mask_numbits}>"
if __name__ == "__main__":
iface = L3Interface(1, "10.10.10.2", 8)
assert iface.get_mask_as_int() == 4278190080
iface = L3Interface(1, "10.10.10.2", 16)
assert iface.get_mask_as_int() == 4294901760
iface = L3Interface(1, "10.10.10.2", 5)
assert iface.get_mask_as_int() == 4160749568
iface = L3Interface(1, "10.10.10.2", 11)
assert iface.get_mask_as_int() == 4292870144
iface = L3Interface(1, "10.10.10.2", 28)
assert iface.get_mask_as_int() == 4294967280
iface = L3Interface(1, "10.10.10.2", 23)
assert iface.get_mask_as_int() == 4294966784
assert iface.on_same_network(L3Addr("10.10.11.3"))
assert not iface.on_same_network(L3Addr("10.10.12.74"))
print(iface.get_directed_bcast_addr().as_str())
assert iface.get_directed_bcast_addr().as_str() == "10.10.11.255"
assert iface.get_netaddr().as_str() == "10.10.10.0"
assert str(iface) == "Iface<1: 10.10.10.2/23>"
print("All tests passed!")
| null |
routing/l3interface.py
|
l3interface.py
|
py
| 2,388 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.maskToInt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.maskToHostMask",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "utils.maskToHostMask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "l3addr.L3Addr",
"line_number": 63,
"usage_type": "call"
}
] |
520232988
|
"""
(prototype) Graph Mode Post Training Static Quantization in PyTorch
=========================================================
**Author**: `Jerry Zhang <https://github.com/jerryzh168>`_
This tutorial introduces the steps to do post training static quantization in graph mode.
The advantage of graph mode quantization is that as long as the model can be scripted or traced,
we can perform quantization fully automatically on the model.
Right now we can do post training static and post training dynamic quantization
and quantization aware training support will come later.
We have a separate tutorial for `Graph Mode Post Training Dynamic Quantization <https://pytorch.org/tutorials/prototype_source/graph_mode_dynamic_bert_tutorial.html>`_.
tldr; The graph mode API looks like the following:
.. code:: python
import torch
from torch.quantization import get_default_qconfig, quantize_jit
ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input)
qconfig = get_default_qconfig('fbgemm')
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
quantized_model = quantize_jit(
ts_model, # TorchScript model
{'': qconfig}, # qconfig dict
calibrate, # calibration function
[data_loader_test]) # positional arguments to calibration function, typically some sample dataset
"""
######################################################################
# 1. Motivation of Graph Mode Quantization
# ---------------------
# Currently PyTorch only has eager mode quantization: `Static Quantization with Eager Mode in PyTorch <https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html>`_.
#
# We can see there are multiple manual steps involved in the process, including:
#
# - Explicitly quantize and dequantize activations, this is time consuming when floating point and quantized operations are mixed in a model.
# - Explicitly fuse modules, this requires manually identifying the sequence of convolutions, batch norms and relus and other fusion patterns.
# - Special handling is needed for pytorch tensor operations (like add, concat etc.)
# - Functionals did not have first class support (functional.conv2d and functional.linear would not get quantized)
#
# Most of these required modifications comes from the underlying limitations of eager mode quantization. Eager mode works in module level since it can not inspect the code that is actually run (in the forward function), quantization is achieved by module swapping, and we don’t know how the modules are used in forward function in eager mode, so it requires users to insert QuantStub and DeQuantStub manually to mark the points they want to quantize or dequantize.
# In graph mode, we can inspect the actual code that’s been executed in forward function (e.g. aten function calls) and quantization is achieved by module and graph manipulations. Since graph mode has full visibility of the code that is run, our tool is able to automatically figure out things like which modules to fuse and where to insert observer calls, quantize/dequantize functions etc., we are able to automate the whole quantization process.
#
# Advantages of graph mode quantization are:
#
# - Simple quantization flow, minimal manual steps
# - Unlocks the possibility of doing higher level optimizations like automatic precision selection
#
# Limitations of graph mode quantization is that quantization is configurable only at the level of module and the set of operators that are quantized is not configurable by user currently.
#
# 2. Define Helper Functions and Prepare Dataset
# ---------------------
# We’ll start by doing the necessary imports, defining some helper functions and prepare the data.
# These steps are identitcal to `Static Quantization with Eager Mode in PyTorch <https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html>`_.
#
# Download dataset:
#
# .. code::
#
# wget https://s3.amazonaws.com/pytorch-tutorial-assets/imagenet_1k.zip
#
# and unzip to `data` folder.
# Download the `torchvision resnet18 model <https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L12>`_ and rename it to
# ``data/resnet18_pretrained_float.pth``.
import numpy as np
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
import os
import time
import sys
import torch.quantization
# # Setup warnings
import warnings
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*'
)
warnings.filterwarnings(
action='default',
module=r'torch.quantization'
)
# Specify random seed for repeatable results
_ = torch.manual_seed(191009)
from torchvision.models.resnet import resnet18
from torch.quantization import get_default_qconfig, quantize_jit
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(model, criterion, data_loader):
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
cnt = 0
with torch.no_grad():
for image, target in data_loader:
output = model(image)
loss = criterion(output, target)
cnt += 1
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], image.size(0))
top5.update(acc5[0], image.size(0))
print('')
return top1, top5
def load_model(model_file):
model = resnet18(pretrained=False)
state_dict = torch.load(model_file)
model.load_state_dict(state_dict)
model.to('cpu')
return model
def print_size_of_model(model):
if isinstance(model, torch.jit.RecursiveScriptModule):
torch.jit.save(model, "temp.p")
else:
torch.jit.save(torch.jit.script(model), "temp.p")
print('Size (MB):', os.path.getsize("temp.p")/1e6)
os.remove('temp.p')
def prepare_data_loaders(data_path):
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
dataset_test = torchvision.datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=train_batch_size,
sampler=train_sampler)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=eval_batch_size,
sampler=test_sampler)
return data_loader, data_loader_test
data_path = 'data/imagenet_1k'
saved_model_dir = 'data/'
float_model_file = 'resnet18_pretrained_float.pth'
train_batch_size = 30
eval_batch_size = 30
data_loader, data_loader_test = prepare_data_loaders(data_path)
criterion = nn.CrossEntropyLoss()
float_model = load_model(saved_model_dir + float_model_file).to('cpu')
float_model.eval();
######################################################################
# 3. Script/Trace the model
# --------------------------
# The input for graph mode quantization is a TorchScript model, so we'll need to either script or trace the model first.
#
ts_model = torch.jit.script(float_model).eval() # ts_model = torch.jit.trace(float_model, input)
######################################################################
# 4. Specify how to quantize the model with ``qconfig_dict``
# -------------------------
#
# .. code:: python
#
# qconfig_dict = {'' : default_qconfig}
#
# We use the same ``qconfig`` used in eager mode quantization, ``qconfig`` is just a named tuple of the observers for ``activation`` and ``weight``. `qconfig_dict` is a dictionary with names of sub modules as key and qconfig for that module as value, empty key means the qconfig will be applied to whole model unless it’s overwritten by more specific configurations, the qconfig for each module is either found in the dictionary or fallback to the qconfig of parent module.
#
# Right now ``qconfig_dict`` is the only way to configure how the model is quantized, and it is done in the granularity of module, that is, we only support one type of ``qconfig`` for each ``torch.nn.Module``, for example, if we have:
#
# .. code:: python
#
# qconfig = {
# '' : qconfig_global,
# 'sub' : qconfig_sub,
# 'sub.fc' : qconfig_fc,
# 'sub.conv': None
# }
#
# Module ``sub.fc`` will be configured with ``qconfig_fc``, and all other child modules in ``sub`` will be configured with ``qconfig_sub`` and ``sub.conv`` will not be quantized. All other modules in the model will be quantized with ``qconfig_global``
# Utility functions related to ``qconfig`` can be found in https://github.com/pytorch/pytorch/blob/master/torch/quantization/qconfig.py.
qconfig = get_default_qconfig('fbgemm')
qconfig_dict = {'': qconfig}
######################################################################
# 5. Define Calibration Function
# -------------------------
#
# .. code:: python
#
# def calibrate(model, sample_data, ...):
# model(sample_data, ...)
#
#
# Calibration function is run after the observers are inserted in the model.
# The purpose for calibration is to run through some sample examples that is representative of the workload
# (for example a sample of the training data set) so that the observers in the model are able to observe
# the statistics of the Tensors and we can later use this information to calculate quantization parameters.
#
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
######################################################################
# 6. Quantize
# ---------------------
#
# .. code:: python
#
# quantized_model = quantize_jit(
# ts_model, # TorchScript model
# {'': qconfig}, # qconfig dict
# calibrate, # calibration function
# [data_loader_test], # positional arguments to calibration function, typically some sample dataset
# inplace=False, # whether to modify the model inplace or not
# debug=True) # whether to prduce a debug friendly model or not
#
# There are three things we do in ``quantize_jit``:
#
# 1. ``prepare_jit`` folds BatchNorm modules into previous Conv2d modules, and insert observers in appropriate places in the Torchscript model.
# 2. Run calibrate function on the provided sample dataset.
# 3. ``convert_jit`` takes a calibrated model and produces a quantized model.
#
# If ``debug`` is False (default option), ``convert_jit`` will:
#
# - Calculate quantization parameters using the observers in the model
# - Ifnsert quantization ops like ``aten::quantize_per_tensor`` and ``aten::dequantize`` to the model, and remove the observer modules after that.
# - Replace floating point ops with quantized ops
# - Freeze the model (remove constant attributes and make them as Constant node in the graph).
# - Fold the quantize and prepack ops like ``quantized::conv2d_prepack`` into an attribute, so we don't need to quantize and prepack the weight everytime we run the model.
#
# If ``debug`` is set to ``True``:
#
# - We can still access the attributes of the quantized model the same way as the original floating point model, e.g. ``model.conv1.weight`` (might be harder if you use a module list or sequential)
# - The arithmetic operations all occur in floating point with the numerics being identical to the final quantized model, allowing for debugging.
quantized_model = quantize_jit(
ts_model,
{'': qconfig},
calibrate,
[data_loader_test])
print(quantized_model.graph)
######################################################################
# As we can see ``aten::conv2d`` is changed to ``quantized::conv2d`` and the floating point weight has been quantized
# and packed into an attribute (``quantized._jit_pass_packed_weight_30``), so we don't need to quantize/pack in runtime.
# Also we can't access the weight attributes anymore after the debug option since they are frozen.
#
# 7. Evaluation
# --------------
# We can now print the size and accuracy of the quantized model.
print('Size of model before quantization')
print_size_of_model(ts_model)
print('Size of model after quantization')
print_size_of_model(quantized_model)
top1, top5 = evaluate(quantized_model, criterion, data_loader_test)
print('[before serilaization] Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))
graph_mode_model_file = 'resnet18_graph_mode_quantized.pth'
torch.jit.save(quantized_model, saved_model_dir + graph_mode_model_file)
quantized_model = torch.jit.load(saved_model_dir + graph_mode_model_file)
top1, top5 = evaluate(quantized_model, criterion, data_loader_test)
print('[after serialization/deserialization] Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))
######################################################################
# If you want to get better accuracy or performance, try changing the `qconfig_dict`.
# We plan to add support for graph mode in the Numerical Suite so that you can
# easily determine the sensitivity towards quantization of different modules in a model: `PyTorch Numeric Suite Tutorial <https://pytorch.org/tutorials/prototype/numeric_suite_tutorial.html>`_
#
# 8. Debugging Quantized Model
# ---------------------------
# We can also use debug option:
quantized_debug_model = quantize_jit(
ts_model,
{'': qconfig},
calibrate,
[data_loader_test],
debug=True)
top1, top5 = evaluate(quantized_debug_model, criterion, data_loader_test)
print('[debug=True] quantized model Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))
######################################################################
# Note that the accuracy of the debug version is close to, but not exactly the same as the non-debug
# version as the debug version uses floating point ops to emulate quantized ops and the numerics match
# is approximate. We are working on making this even more exact.
#
print(quantized_debug_model.graph)
######################################################################
# We can see that there is no ``quantized::conv2d`` in the model, but the numerically equivalent pattern
# of ``aten::dequnatize - aten::conv2d - aten::quantize_per_tensor``.
print_size_of_model(quantized_debug_model)
######################################################################
# Size of the debug model is the close to the floating point model because all the weights are
# in float and not yet quantized and frozen, this allows people to inspect the weight.
# You may access the weight attributes directly in the torchscript model, except for batch norm as
# it is fused into the preceding convolutions. We will also develop graph mode ``Numeric Suite``
# to allow easier inspection of weights in the future. Accessing the weight in the debug model is
# the same as accessing the weight in a TorchScript model:
def get_first_conv_weight(model):
return model.conv1.weight
w1 = get_first_conv_weight(ts_model)
w2 = get_first_conv_weight(quantized_debug_model)
print('first conv weight for input model:', str(w1)[:200])
print('first conv weight for quantized model:', str(w2)[:200])
######################################################################
# The weights are different because we fold the weights of BatchNorm to the previous conv before we quantize the model.
# More instructions on how to debug TorchScript model can be found `here <https://pytorch.org/docs/stable/jit.html#debugging>`_.
#
#
# As we can see, this is not as straightforward as eager mode, that's why we also plan to support graph mode ``Numeric Suite``,
# and it will probably be the primary tool people use to debug numerical issues.
#
# 9. Comparison with Baseline Float Model and Eager Mode Quantization
# ---------------------------
scripted_float_model_file = 'resnet18_scripted.pth'
print('Size of baseline model')
print_size_of_model(float_model)
top1, top5 = evaluate(float_model, criterion, data_loader_test)
print('Baseline Float Model Evaluation accuracy: %2.2f, %2.2f'%(top1.avg, top5.avg))
torch.jit.save(torch.jit.script(float_model), saved_model_dir + scripted_float_model_file)
######################################################################
# In this section we compare the model quantized with graph mode quantization with the model
# quantized in eager mode. Graph mode and eager mode produce very similar quantized models,
# so the expectation is that the accuracy and speedup are similar as well.
print('Size of graph mode quantized model')
print_size_of_model(quantized_model)
top1, top5 = evaluate(quantized_model, criterion, data_loader_test)
print('graph mode quantized model Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))
from torchvision.models.quantization.resnet import resnet18
eager_quantized_model = resnet18(pretrained=True, quantize=True).eval()
print('Size of eager mode quantized model')
eager_quantized_model = torch.jit.script(eager_quantized_model)
print_size_of_model(eager_quantized_model)
top1, top5 = evaluate(eager_quantized_model, criterion, data_loader_test)
print('eager mode quantized model Evaluation accuracy on test dataset: %2.2f, %2.2f'%(top1.avg, top5.avg))
eager_mode_model_file = 'resnet18_eager_mode_quantized.pth'
torch.jit.save(eager_quantized_model, saved_model_dir + eager_mode_model_file)
######################################################################
# We can see that the model size and accuracy of graph mode and eager mode quantized model are pretty similar.
#
# Running the model in AIBench (with single threading) gives the following result:
#
# .. code::
#
# Scripted Float Model:
# Self CPU time total: 418.472ms
#
# Scripted Eager Mode Quantized Model:
# Self CPU time total: 177.768ms
#
# Graph Mode Quantized Model:
# Self CPU time total: 157.256ms
#
# As we can see for resnet18 both graph mode and eager mode quantized model get similar speed up over the floating point model,
# which is around 2-3x faster than the floating point model. But the actual speedup over floating point model may vary
# depending on model, device, build, input batch sizes, threading etc.
#
| null |
prototype_source/graph_mode_static_quantization_tutorial.py
|
graph_mode_static_quantization_tutorial.py
|
py
| 20,021 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torchvision.models.resnet.resnet18",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.save",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.save",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.script",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.RandomSampler",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.SequentialSampler",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "torch.jit.script",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "torch.quantization.get_default_qconfig",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "torch.quantization.quantize_jit",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "torch.jit.save",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.load",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "torch.quantization.quantize_jit",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "torch.jit.save",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.script",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "torchvision.models.quantization.resnet.resnet18",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "torch.jit.script",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "torch.jit.save",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "torch.jit",
"line_number": 436,
"usage_type": "attribute"
}
] |
417353158
|
import pytest
from vyper.context.types import get_primitive_types
from vyper.context.types.indexable.sequence import ArrayDefinition
from vyper.context.types.utils import get_type_from_abi
from vyper.exceptions import UnknownType
BASE_TYPES = ["int128", "uint256", "bool", "address", "bytes32"]
@pytest.mark.parametrize("type_str", BASE_TYPES)
def test_base_types(type_str):
primitive = get_primitive_types()[type_str]
type_definition = get_type_from_abi({"type": type_str})
assert isinstance(type_definition, primitive._type)
@pytest.mark.parametrize("type_str", BASE_TYPES)
def test_base_types_as_arrays(type_str):
primitive = get_primitive_types()[type_str]
type_definition = get_type_from_abi({"type": f"{type_str}[3]"})
assert isinstance(type_definition, ArrayDefinition)
assert type_definition.length == 3
assert isinstance(type_definition.value_type, primitive._type)
@pytest.mark.parametrize("type_str", BASE_TYPES)
def test_base_types_as_multidimensional_arrays(type_str):
primitive = get_primitive_types()[type_str]
type_definition = get_type_from_abi({"type": f"{type_str}[3][5]"})
assert isinstance(type_definition, ArrayDefinition)
assert type_definition.length == 5
assert isinstance(type_definition.value_type, ArrayDefinition)
assert type_definition.value_type.length == 3
assert isinstance(type_definition.value_type.value_type, primitive._type)
@pytest.mark.parametrize("idx", ["0", "-1", "0x00", "'1'", "foo", "[1]", "(1,)"])
def test_invalid_index(idx):
with pytest.raises(UnknownType):
get_type_from_abi({"type": f"int128[{idx}]"})
| null |
tests/functional/context/types/test_type_from_abi.py
|
test_type_from_abi.py
|
py
| 1,641 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "vyper.context.types.get_primitive_types",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "vyper.context.types.utils.get_type_from_abi",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "vyper.context.types.get_primitive_types",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "vyper.context.types.utils.get_type_from_abi",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "vyper.context.types.indexable.sequence.ArrayDefinition",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "vyper.context.types.get_primitive_types",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "vyper.context.types.utils.get_type_from_abi",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "vyper.context.types.indexable.sequence.ArrayDefinition",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "vyper.context.types.indexable.sequence.ArrayDefinition",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "vyper.exceptions.UnknownType",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "vyper.context.types.utils.get_type_from_abi",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 44,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.