seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6555235
|
import scrapy
from ..items import GuaziItem
class GuaziSpider(scrapy.Spider):
name = 'guazi2'
allowed_domains = ['www.guazi.com']
#重写start_url start_requests()方法
def start_requests(self):
"""生成所有的url地址,一次性交给调度器"""
for i in range(1,6):
url = 'https://www.guazi.com/ty/buy/o{}/#bread'.format(i)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
#基准xpath:匹配所有汽车节点对象
li_list = response.xpath('//ul[@class="carlist clearfix js-top"]/li')
item = GuaziItem()
for li in li_list:
item['url'] = li.xpath('./a/@href').extract()[0]
item['name'] = li.xpath('./a/@title').extract()[0]
item['price'] = li.xpath('./a/div[@class="t-price"]/p').extract()[0]
#把抓取的数据,传递给了管道文件piplines.py
yield item
| null |
Spider/day08/Guazi/Guazi/spiders/guazi2.py
|
guazi2.py
|
py
| 962 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "items.GuaziItem",
"line_number": 18,
"usage_type": "call"
}
] |
565145640
|
from django.test import TestCase
from django.urls import reverse
from complaint.apps import ComplaintConfig
from complaint.models import Complaint
from django.utils import timezone
class ComplaintConfigTest(TestCase):
def test_apps(self):
self.assertEqual(ComplaintConfig.name, "complaint")
class ComplaintViewTests(TestCase):
def test_map_view_works(self):
response = self.client.get(reverse("issue-complaint"))
self.assertEqual(response.status_code, 200)
def test_right_complaint_post_request(self):
holder = self.client.post(
reverse("issue-complaint"),
data={
"subject": "Subject1",
"message": "I have a problem",
"image": "",
},
)
self.assertEqual(holder.status_code, 200)
class ComplaintModelTests(TestCase):
def test_complaint_contains_correct_info(self):
test_complaint_1 = Complaint(
subject="I hate math",
message="I hate math",
uploaded_at=timezone.now(),
image=None,
)
test_complaint_1.save()
response = self.client.get(reverse("issue-complaint"))
self.assertEqual(response.status_code, 200)
def test_complaint_contains_no_data(self):
form = Complaint()
self.assertFalse(form.save())
def test_complaint_contains_wrong_message_data(self):
form = Complaint(
subject="hi all,",
message="",
uploaded_at=timezone.now(),
image=None,
)
self.assertFalse(form.save())
def test_complaint_contains_wrong_subject_data(self):
form = Complaint(
subject="",
message="yippie",
uploaded_at=timezone.now(),
image=None,
)
self.assertFalse(form.save())
def test_complaint_contains_without_subject_message_data(self):
form = Complaint(
uploaded_at=timezone.now(),
image=None,
)
self.assertFalse(form.save())
def test_complaint_contains_without_image(self):
form = Complaint(
subject="hi all,",
message="yippie",
uploaded_at=timezone.now(),
)
self.assertFalse(form.save())
| null |
complaint/tests.py
|
tests.py
|
py
| 2,300 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.TestCase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "complaint.apps.ComplaintConfig.name",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "complaint.apps.ComplaintConfig",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.test.TestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "complaint.models.Complaint",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 76,
"usage_type": "name"
}
] |
43966129
|
import json
import datetime
from collections import Counter
import json
import datetime
import sys
out = {"books": [], "cds": [], "films": []}
everything = {"library": {}}
helabiblan = "my_library_register.json"
def get_inputs_book():
#user input to record the log
dbok = {}
print("******************Böcker************************")
dbok['BookTitel'] = input("Lägg in en bok titel: ")
dbok['Bookforfattare'] = input("Vad heter forfattaren: ")
dbok['BookAntal'] = input("Hur många sidor har boken: ")
dbok['BookInkopspris'] = input("Vad var EttInkopspris: ")
dbok['BookInkopesar'] = input("Vad var EttInkopesar: ")
out['books'].append(dbok)
return out
def get_inputs_cd():
#user input to record the log
dcd = {}
print("******************CD************************")
dcd['CdTitel'] = input("Lägg in en cd titel: ")
dcd['CdArtist'] = input("Vad heter artisten: ")
dcd['CdAntalspar'] = input("Hur många spår finns det: ")
dcd['CdLangd'] = input("Vad är längden: ")
dcd['CdInkopspris'] = input("Vad var inköpspriset: ")
out['cds'].append(dcd)
return out
def get_inputs_film():
#user input to record the log
dfilm = {}
print("******************FILM************************")
dfilm['FilmTitel'] = input("Lägg in en film titel: ")
dfilm['FilmRegissor'] = input("Vad heter artisten: ")
dfilm['FilmLangd'] = input("Hur lång är filmen: ")
dfilm['FilmInkopspris'] = input("Vad var FilmInkopspris: ")
dfilm['FilmInkopesar'] = input("Vad var inköpsåret: ")
dfilm['Forslitningsgrad'] = input("Ange forslitningsgrad mellan 1-10 : ")
out['films'].append(dfilm)
return out
def visaplus():
savetest()
Final_Lista = makealista()
skrivut_hela_biblioteket(Final_Lista)
def menufinal():
while True:
a = input("""
Chose the details you like to register
A: Registera böcker
B: Registera cdskivor
C: Registera filmer
D: VISA LIBRARY
Q: Quit
Please enter your val: """).lower()
if a=="a":
get_inputs_book()
elif a=="b":
get_inputs_cd()
elif a=="c":
get_inputs_film()
elif a=="d":
visaplus()
elif a=="q":
savetest()
break
else:
print("Välj ett alternative")
def savetest():
everything["library"].update(out)
with open(helabiblan,'w') as f:
json.dump(everything, f, indent=2)
#--------------------------book----------------------
def rakna_bok(År, price):
ÅretNu = datetime.datetime.now()
age = ÅretNu.year - År
if age > 50:
price = price*(0.9**50)*(1.08**(age-50))
return price
else:
price = price*(0.9**age)
return price
#---------------------------film-----------------------------------
def taborttioprocent(År, price):
ÅretNu = datetime.datetime.now()
age = ÅretNu.year - År
price = price*(0.9**age)
return round(price)
def the_film_worth(FilmInkopspris, förslitningsgrad ):
# // annvänd om dom råka trycka enter
if förslitningsgrad == 1:
tioprocent = FilmInkopspris * 0.10
tioNya = tioprocent + FilmInkopspris
return round(tioNya)
elif förslitningsgrad == 2:
tjugoprocent = FilmInkopspris * 0.20
tjugoNya = tjugoprocent + FilmInkopspris
return round(tjugoNya)
elif förslitningsgrad == 3:
trettioprocent = FilmInkopspris * 0.30
trettioNya = trettioprocent + FilmInkopspris
return round(trettioNya)
elif förslitningsgrad == 4:
fyrtioprocent = FilmInkopspris * 0.40
fyrtioNya = fyrtioprocent + FilmInkopspris
return round(fyrtioNya)
elif förslitningsgrad == 5:
femtioprocent = FilmInkopspris * 0.50
femtioNya = femtioprocent + FilmInkopspris
return round(femtioNya)
elif förslitningsgrad == 6:
sextioprocent = FilmInkopspris * 0.60
sextio = sextioprocent + FilmInkopspris
return round(sextio)
elif förslitningsgrad == 7:
sjutioprocent = FilmInkopspris * 0.70
sjutioNya = sjutioprocent + FilmInkopspris
return round(sjutioNya)
elif förslitningsgrad == 8:
åttioprocent = FilmInkopspris * 0.80
åttioNya = åttioprocent + FilmInkopspris
return round(åttioNya)
elif förslitningsgrad == 9:
nittoprocent = FilmInkopspris * 0.90
nittioNya = nittoprocent + FilmInkopspris
return round(nittioNya)
elif förslitningsgrad == 10:
hundraprocent = FilmInkopspris
return round(hundraprocent)
#---------------------------cd-----------------------------------
def taborttioprocent(År, price):
ÅretNu = datetime.datetime.now()
age = ÅretNu.year - År
price = price*(0.9**age)
# print("s4", price)
return round(price)
def finalworth(Titel, Artist, Thelist, FirstPris):
TitelPlusArtist = Titel + " " + Artist
Sametitels = Thelist.count(TitelPlusArtist)
# print(Sametitels)
Worth = FirstPris / Sametitels
return round(Worth)
def makealista():
with open(helabiblan, 'r') as f:
info = json.load(f)
access_library = info['library']
lista1 = []
lista2 = []
lista = []
# här vill jag nå sakerna under results som komver vara titel och artist
for cd_data in access_library['cds']:
titel_cd = cd_data['CdTitel']
artist_cd = cd_data['CdArtist']
lista1.append(titel_cd)
lista2.append(artist_cd)
for name, surname in zip(lista1, lista2):
lista.append(name + " " + surname)
return lista
#-----------------------------------------
def skrivut_hela_biblioteket(lista):
with open(helabiblan, 'r') as f:
info = json.load(f)
access_library = info['library']
books = access_library['books']
cds = access_library['cds']
films = access_library['films']
sorted_list_books = sorted(books, key=lambda k: (k['BookTitel']))
sorted_list_cds = sorted(cds, key=lambda k: (k['CdTitel']))
sorted_list_films = sorted(films, key=lambda k: (k['FilmTitel']))
for book_data in sorted_list_books:
BookTitel = book_data['BookTitel']
Bookforfattare = book_data['Bookforfattare']
BookAntal = book_data['BookAntal']
BookInkopspris = int(book_data['BookInkopspris'])
BookInkopesar = int(book_data['BookInkopesar'])
VardetNu = int(rakna_bok(BookInkopesar, BookInkopspris))
print("-------------------------------------------------------------sorterad----------------------------------------------------------------")
print("BookTitel", BookTitel , "forfattare",Bookforfattare, "Antalsidor", BookAntal, "Inkopspris", BookInkopspris, "Inkopesar", BookInkopesar, "NyaVärdet:", VardetNu, "kr")
for cd_data in sorted_list_cds:
CdTitel = cd_data['CdTitel']
CdArtist = cd_data['CdArtist']
CdAntalspar = cd_data['CdAntalspar']
CdLangd = int(cd_data['CdLangd'])
CdInkopspris = int(cd_data['CdInkopspris'])
nyttpris = finalworth(CdTitel, CdArtist, lista, CdInkopspris)
print("-------------------------------------------------------------sorterad----------------------------------------------------------------")
print("CdTitel:", CdTitel , "Artist:", CdArtist, "EttAntalspar:",CdAntalspar, "EnLangd:", CdLangd, "EttInkopspris:", CdInkopspris, "NyaVärdet:", nyttpris, "kr")
for film_data in sorted_list_films:
FilmTitel = film_data['FilmTitel']
FilmRegissor = film_data['FilmRegissor']
FilmLangd = int(film_data['FilmLangd'])
FilmInkopspris = int(film_data['FilmInkopspris'])
FilmInkopesar = int(film_data['FilmInkopesar'])
Forslitningsgrad = int(film_data['Forslitningsgrad'])
MinusTio = taborttioprocent(FilmInkopesar, FilmInkopspris)
filmpris = the_film_worth(MinusTio, Forslitningsgrad)
print("-------------------------------------------------------------sorterad-----------------------------------------------------------------")
print("FilmTitel:", FilmTitel , "Regissor:", FilmRegissor, "EnfilmLangd:",FilmLangd , "min", "FilmInkopspris:", FilmInkopspris, "kr", "EttInkopesar:", FilmInkopesar, "NyaVärdet:", filmpris, "kr")
menufinal()
| null |
tonymain.py
|
tonymain.py
|
py
| 8,879 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dump",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 224,
"usage_type": "call"
}
] |
464662086
|
import codecs
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from scrapy.utils.log import configure_logging
from website_scraper.spiders.presales_spider import PresalesSpider
from elasticsearch import Elasticsearch
from urlparse import urlparse, urlunparse
ELASTICSEARCH_URL = "http://localhost:9200/"
## Sample crawl config. Its a list of company confis.
crawl_config = [
{
"company_name": "Enthought",
"allowed_domains": ['www.docker.com'],
"start_urls": ['https://www.docker.com/careers', 'https://boards.greenhouse.io/embed/job_board?for=docker&b=https://www.docker.com/careers'],
"record_type": "data",
},
{
"company_name": "SiteControls",
"allowed_domains": ['koiosworks.com'],
"start_urls": ['http://koiosworks.com/careers'],
"record_type": "data",
},
{
"company_name": "QuoraInc.",
"allowed_domains": ['www.quora.com'],
"start_urls": ['https://www.quora.com/careers'],
"record_type": "data",
},
{
"company_name": "SensorLogic(AquiredByGemalto)",
"allowed_domains": ["www.sensorlogic.com"],
"start_urls": ["http://www.sensorlogic.com"],
"record_type": "data",
},
]
def get_crawl_config():
"""
This function is used to get company configurations from elasticsearch database.
"""
config_list = []
search_conn = Elasticsearch([ELASTICSEARCH_URL])
resp = search_conn.search(index="%s"%('presales'), body={"from": 0, "size": 1000, "_source": {"exclude": ["recent_activity", "company_description", "careers_page_data"]}, "query": {"match_all": {}}})
data = resp["hits"]["hits"]
for hit in data:
record = hit['_source']
config = {}
config['company_name'] = record['company_name']
with codecs.open('companies.txt', 'a', encoding='utf8') as company_names:
company_names.write(config['company_name']+'\n')
url_params = urlparse(record['website'])
# Forming the start url with path /careers
new_url = urlparse('')
new_url = new_url._replace(scheme=url_params.scheme if url_params.scheme else 'http')
new_url = new_url._replace(netloc=url_params.netloc if url_params.netloc else url_params.path.rstrip('/'))
#new_url = new_url._replace(path='careers')
new_url = new_url._replace(path='')
start_url = urlunparse(new_url)
config['start_urls'] = [start_url]
config['record_type'] = 'data'
config['allowed_domains'] = [new_url.netloc]
config_list.append(config)
return config_list
crawl_config = get_crawl_config()
configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
process = CrawlerProcess(get_project_settings())
#crawl_config = crawl_config[3:4]
for config in crawl_config:
process.crawl('presales', company_name = config["company_name"], allowed_domains = config["allowed_domains"], start_urls = config["start_urls"], record_type = config['record_type'])
process.start() # the script will block here until the crawling is finished
| null |
presales_scraper/scrape_companies.py
|
scrape_companies.py
|
py
| 3,142 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "urlparse.urlparse",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "urlparse.urlparse",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "urlparse.urlunparse",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "scrapy.utils.log.configure_logging",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "scrapy.crawler.CrawlerProcess",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "scrapy.utils.project.get_project_settings",
"line_number": 74,
"usage_type": "call"
}
] |
392703441
|
from math import sqrt, atan2, log
import pygame.gfxdraw as gfx
import pygame
NICE = True
if NICE:
def line(surface, colour, start, end, width=1):
dx = end[0] - start[0]
dy = end[1] - start[1]
linelength = sqrt(dx * dx + dy * dy)
if linelength == 0:
return
dx /= linelength
dy /= linelength
px = 0.5 * width * -dy
py = 0.5 * width * dx
poly = (
(start[0] + px, start[1] + py),
(end[0] + px, end[1] + py),
(end[0] - px, end[1] - py),
(start[0] - px, start[1] - py),
)
try:
gfx.filled_polygon(surface, poly, colour)
gfx.aapolygon(surface, poly, colour)
except OverflowError:
pass
def arrow(surface, colour, start, end, width=1, asize=None):
line(surface, colour, start, end, width)
dx = end[0] - start[0]
dy = end[1] - start[1]
length = sqrt(dx * dx + dy * dy)
if length == 0:
return
if asize is None:
asize = log(length)
dx /= length
dy /= length
px = 0.5 * asize * -dy
py = 0.5 * asize * dx
poly = (
(end[0] + px * 3, end[1] + py * 3),
(end[0] - px * 3, end[1] - py * 3),
(end[0] + dx * asize * 2, end[1] + dy * asize * 2),
)
try:
gfx.filled_polygon(surface, poly, colour)
gfx.aapolygon(surface, poly, colour)
except OverflowError:
pass
def circle(surface, colour, centre, radius, width=None):
try:
gfx.filled_circle(surface, *centre, radius, colour)
gfx.aacircle(surface, *centre, radius, colour)
except OverflowError:
pass
def polygon(surface, colour, points, width=None):
gfx.filled_polygon(surface, points, colour)
gfx.aapolygon(surface, points, colour)
def rect(surface, colour, rect, width=None):
polygon(surface, colour, (
(rect[0], rect[1]), (rect[0] + rect[2], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3]), (rect[0], rect[1] + rect[3])
), width)
def path(surface, colour, points, width, tail=False):
if not tail:
for n in range(1, len(points)):
line(surface, colour, points[n - 1], points[n], width)
else:
for n in range(1, len(points)):
line(surface, colour, points[n - 1], points[n], min(width, n // 2))
else:
line = pygame.draw.line
circle = pygame.draw.circle
rect = pygame.draw.rect
polygon = pygame.draw.polygon
def path(surface, colour, points, width, tail=False):
if not tail:
for n in range(1, len(points)):
line(self.screen, colour.green, points[n - 1], points[n], width)
else:
for n in range(1, len(points)):
line(self.screen, colour.green, points[n - 1], points[n], min(width, n // 2))
| null |
animlib/draw.py
|
draw.py
|
py
| 3,050 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "math.sqrt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw.filled_polygon",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.aapolygon",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw.filled_polygon",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.aapolygon",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.filled_circle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.aacircle",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.filled_polygon",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "pygame.gfxdraw.aapolygon",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "pygame.draw",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw",
"line_number": 97,
"usage_type": "attribute"
}
] |
335574123
|
from stdnet.exceptions import *
from stdnet.utils import encoders
from .fields import Field
from . import related
from .struct import *
__all__ = ['ManyFieldManagerProxy',
'Many2ManyManagerProxy',
'MultiField',
'SetField',
'ListField',
'HashField']
class ManyFieldManagerProxy(object):
def __init__(self, name, cache_name, pickler,
value_pickler, scorefun):
self.name = name
self.cache_name = cache_name
self.pickler = pickler
self.value_pickler = value_pickler
self.scorefun = scorefun
def __get__(self, instance, instance_type=None):
if instance is None:
return self
if instance.id is None:
raise MultiFieldError('id for %s is not available.\
Call save on instance before accessing %s.' % (instance._meta,self.name))
cache_name = self.cache_name
try:
return getattr(instance, cache_name)
except AttributeError:
rel_manager = self.get_related_manager(instance)
setattr(instance, cache_name, rel_manager)
return rel_manager
def get_related_manager(self, instance):
return self.get_structure(instance)
def get_structure(self, instance):
session = instance.session
st = getattr(backend,self.stype)
return st(backend.basekey(instance._meta,'id',instance.id,self.name),
instance = instance,
#timeout = meta.timeout,
pickler = self.pickler,
value_pickler = self.value_pickler,
scorefun = self.scorefun)
class Many2ManyManagerProxy(ManyFieldManagerProxy):
def __init__(self, name, cache_name, stype, to_name, to):
super(Many2ManyManagerProxy,self).__init__(name, cache_name, stype,
ModelFieldPickler(to), None, None)
self.to_name = to_name
self.model = to
def get_related_manager(self, instance):
st = self.get_structure(instance)
return M2MRelatedManager(self.model,
st, self.to_name, instance = instance)
class MultiField(Field):
'''Virtual class for fields which are proxies to remote
:ref:`data structures <structures-backend>` such as :class:`stdnet.List`,
:class:`stdnet.Set`, :class:`stdnet.OrderedSet` and :class:`stdnet.HashTable`.
Sometimes you want to structure your data model without breaking it up
into multiple entities. For example, you might want to define model
that contains a list of messages an instance receive::
from stdnet import orm
class MyModel(orm.StdModel):
...
messages = orm.ListField()
By defining structured fields in a model, an instance of that model can access
a stand alone structure in the back-end server with very little effort.
:parameter model: an optional :class:`stdnet.orm.StdModel` class. If
specified, the structured will contains ids of instances of the model.
It is saved in the :attr:`relmodel` attribute.
.. attribute:: relmodel
Optional :class:`stdnet.otm.StdModel` class contained in the structure.
It can also be specified as a string.
.. attribute:: pickler
an instance of :class:`stdnet.utils.encoders.Encoder` used to serialize
and userialize data. It contains the ``dumps`` and ``loads`` methods.
Default :class:`stdnet.utils.encoders.Json`.
.. attribute:: value_pickler
Same as the :attr:`pickler` attribute, this serializer is applaied to values
(used by hash table)
Default: ``None``.
'''
default_pickler = encoders.Json()
default_value_pickler = None
def __init__(self,
model = None,
pickler = None,
value_pickler = None,
required = False,
scorefun = None,
**kwargs):
# Force required to be false
super(MultiField,self).__init__(required = False, **kwargs)
self.relmodel = model
self.index = False
self.unique = False
self.primary_key = False
self.pickler = pickler
self.value_pickler = value_pickler
self.scorefun = scorefun
def register_with_model(self, name, model):
super(MultiField,self).register_with_model(name, model)
if self.relmodel:
related.load_relmodel(self,self._set_relmodel)
else:
self._register_with_model()
def _set_relmodel(self, relmodel):
self.relmodel = relmodel
if not self.pickler:
self.pickler = related.ModelFieldPickler(self.relmodel)
self._register_with_model()
def _register_with_model(self):
self._install_encoders()
self.pickler = self.pickler or self.default_pickler
self.value_pickler = self.value_pickler or self.default_value_pickler
setattr(self.model,
self.name,
ManyFieldManagerProxy(self.name,
self.get_cache_name(),
pickler = self.pickler,
value_pickler = self.value_pickler,
scorefun = self.scorefun))
def _install_encoders(self):
if self.relmodel and not self.pickler:
self.pickler = related.ModelFieldPickler(self.relmodel)
def add_to_fields(self):
self.model._meta.multifields.append(self)
def to_python(self, instance):
return None
def id(self, obj):
return getattr(obj,self.attname).id
def todelete(self):
return True
def structure_class(self):
raise NotImplementedError
class SetField(MultiField):
'''A field maintaining an unordered collection of values. It is initiated
without any argument other than an optional model class.
When accessed from the model instance, it returns an instance of
:class:`stdnet.Set` structure. For example::
class User(orm.StdModel):
username = orm.AtomField(unique = True)
password = orm.AtomField()
following = orm.SetField(model = 'self')
It can be used in the following way::
>>> user = User(username = 'lsbardel', password = 'mypassword').save()
>>> user2 = User(username = 'pippo', password = 'pippopassword').save()
>>> user.following.add(user2)
>>> user.save()
>>> user2 in user.following
True
'''
def structure_class(self):
return Zset if self.ordered else Set
class ListField(MultiField):
'''A field maintaining a list of values.
When accessed from the model instance,
it returns an instance of :class:`stdnet.List` structure. For example::
class UserMessage(orm.StdModel):
user = orm.SymbolField()
messages = orm.ListField()
Lets register it with redis::
>>> orm.register(UserMessage,''redis://127.0.0.1:6379/?db=11')
'redis db 7 on 127.0.0.1:6379'
Can be used as::
>>> m = UserMessage(user = 'pippo').save()
>>> m.messages.push_back("adding my first message to the list")
>>> m.messages.push_back("ciao")
>>> m.save()
>>> type(u.messages)
<class 'stdnet.backends.structures.structredis.List'>
>>> u.messages.size()
2
'''
type = 'list'
def structure_class(self):
return List
class HashField(MultiField):
'''A Hash table field, the networked equivalent of a python dictionary.
Keys are string while values are string/numeric.
it returns an instance of :class:`stdnet.HashTable` structure.
'''
type = 'hash'
default_pickler = encoders.NoEncoder()
default_value_pickler = encoders.Json()
def get_pipeline(self):
return 'hash'
def _install_encoders(self):
if self.relmodel and not self.value_pickler:
self.value_pickler = related.ModelFieldPickler(relmodel)
def structure_class(self):
return HashTable
| null |
stdnet/orm/std.py
|
std.py
|
py
| 8,337 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "fields.Field",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "stdnet.utils.encoders.Json",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "stdnet.utils.encoders",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "stdnet.utils.encoders.NoEncoder",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "stdnet.utils.encoders",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "stdnet.utils.encoders.Json",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "stdnet.utils.encoders",
"line_number": 237,
"usage_type": "name"
}
] |
397230784
|
from __future__ import print_function
from PIL import Image
from numpy import clip
from math import pi,atan2,hypot,floor
import os
import shutil
import time
restriction=['Cancel', 'cancel', 'CANCEL', ' Cancel', ' cancel', ' CANCEL', 'Cancel ', 'cancel ', 'CANCEL ']
print("Wirte a direction as the example or write a name..")
path=input("Example.... C:\\name1\\name2\\....\n")
saver=input("Where do you want to save?...\nWrite a second path...\nif you have done a mistake, write Cancel and try again\n")
while saver in restriction:
print("Please, try again")
print("Wirte a direction as the example or write a name..")
path=input("Example.... C:\\name1\\name2\\....\n")
print("Write Cancel if you want to go back")
saver=input("Where do you want to save?...\nWrite a second path...\n")
imagesfiles = []
imagesDirection = os.walk(path)
file_extension = ".png"
os.mkdir('360transform')
#-----------------------360 TO CUBE -----------------------------------
print("360 to Cube Image....")
def outImgToXYZ(i,j,face,edge):
a = 2.0*float(i)/edge
b = 2.0*float(j)/edge
if face==0: # back
(x,y,z) = (-1.0, 1.0-a, 3.0 - b)
elif face==1: # left
(x,y,z) = (a-3.0, -1.0, 3.0 - b)
elif face==2: # front
(x,y,z) = (1.0, a - 5.0, 3.0 - b)
elif face==3: # right
(x,y,z) = (7.0-a, 1.0, 3.0 - b)
elif face==4: # top
(x,y,z) = (b-1.0, a -5.0, 1.0)
elif face==5: # bottom
(x,y,z) = (5.0-b, a-5.0, -1.0)
return (x,y,z)
def convertBack(imgIn,imgOut):
inSize = imgIn.size
outSize = imgOut.size
inPix = imgIn.load()
outPix = imgOut.load()
edge = int(inSize[0]/4) # the length of each edge in pixels
for i in range(outSize[0]):
face = int(i/edge) # 0 - back, 1 - left 2 - front, 3 - right
if face==2:
rng = range(0,edge*3)
else:
rng = range(edge,edge*2)
for j in rng:
if j<edge:
face2 = 4 # top
elif j>=2*edge:
face2 = 5 # bottom
else:
face2 = face
(x,y,z) = outImgToXYZ(i,j,face2,edge)
theta = atan2(y,x) # range -pi to pi
r = hypot(x,y)
phi = atan2(z,r) # range -pi/2 to pi/2
# source img coords
uf = ( 2.0*edge*(theta + pi)/pi )
vf = ( 2.0*edge * (pi/2 - phi)/pi)
# Use bilinear interpolation between the four surrounding pixels
ui = floor(uf) # coord of pixel to bottom left
vi = floor(vf)
u2 = ui+1 # coords of pixel to top right
v2 = vi+1
mu = uf-ui # fraction of way across pixel
nu = vf-vi
# Pixel values of four corners
A = inPix[int(ui % inSize[0]),int(clip(vi,0,inSize[1]-1))]
B = inPix[int(u2 % inSize[0]),int(clip(vi,0,inSize[1]-1))]
C = inPix[int(ui % inSize[0]),int(clip(v2,0,inSize[1]-1))]
D = inPix[int(u2 % inSize[0]),int(clip(v2,0,inSize[1]-1))]
# interpolate
(r,g,b) = (
A[0]*(1-mu)*(1-nu) + B[0]*(mu)*(1-nu) + C[0]*(1-mu)*nu+D[0]*mu*nu,
A[1]*(1-mu)*(1-nu) + B[1]*(mu)*(1-nu) + C[1]*(1-mu)*nu+D[1]*mu*nu,
A[2]*(1-mu)*(1-nu) + B[2]*(mu)*(1-nu) + C[2]*(1-mu)*nu+D[2]*mu*nu )
outPix[i,j] = (int(round(r)),int(round(g)),int(round(b)))
#------------Lista De imagenes-------------
for root, dirs,files in imagesDirection:
print("root ", root)
print("files ", files)
for infiles in files:
(nombreFichero, extension) = os.path.splitext(infiles)
if(extension == ".jpg"):
imagesfiles.append(infiles)
elif(extension == ".jpeg"):
imagesfiles.append(infiles)
elif (extension == ".png"):
imagesfiles.append(infiles)
for infile in imagesfiles:
print("------")
print(infile)
full_path = os.path.join(root, infile)
os.mkdir('Carpet {}'.format(infile))
imgIn = Image.open(full_path)
inSize = imgIn.size
imgOut = Image.new("RGB",(inSize[0],int(inSize[0]*3/4)),"black")
convertBack(imgIn,imgOut)
#imgOut.save("filtre.png")
print("Cube Image Finished...")
imgOut.save("Cube.png")
#--------------------Map cube sort------------------------------------
print("Cube Image Map...")
name_map = [ \
["", "", "posy", ""],
["negz", "negx", "posz", "posx"],
["","", "negy", "0"]]
image=Image.open("Cube.png")
#print(image, image.format, "%dx%d" % image.size, image.mode)
imSize=image.size
cube_size = imSize[0] / 4
for row in range(3):
for col in range(4):
if name_map[row][col] != "":
sx = cube_size * col
sy = cube_size * row
fn = name_map[row][col] + file_extension
images=image.crop((sx, sy, sx + cube_size, sy + cube_size))
if row==0 and col==2 :
images.save("n1.jpg")
elif row ==1 and col==0 :
images.save("n2.jpg")
elif row ==1 and col==1 :
images.save("n3.jpg")
elif row==1 and col==2 :
images.save("n4.jpg")
elif row==1 and col==3 :
images.save("n5.jpg")
elif row==2 and col==2 :
images.save("n6.jpg")
elif row==2 and col==3 :
images.save("n7.jpg")
print("Cube map Finished....")
#-------Crop Large image---------
negative = Image.open("n7.jpg")
neg = negative.resize((512, 3072), Image.ANTIALIAS)
#------Image.paste------
line = [2, 6, 4, 3, 5, 1]
jack = (0, 0)
a = 512
for i in line:
#print(i)
img = Image.open("n{}.jpg".format(i))
piece=img.resize((512, 512), Image.ANTIALIAS)
for j in range(0,i):
jack = (0, a*j)
#print(jack)
neg.paste(piece,jack)
neg.save("cube.jpg")
shutil.move('cube.jpg'.format(infile),'Carpet {}'.format(infile))
#------------Image zooms binders ---------
lectura=Image.open("n7.jpg")
an=lectura.width
al=lectura.height
general=512
pha=an/general
ta=al/general
if pha>ta:
nuom=int(pha)
elif pha<ta:
nuom=int(ta)
elif pha==ta:
nuom=int(pha)
for i in range(1,nuom+2):
os.mkdir('z{}'.format(i))
print("Zooms binder done")
time.sleep(5)
#---------------------Zooms Crop-------------------------------------
for photo in range(1,7):
imz=Image.open("n{}.jpg".format(photo))
ancho=imz.width
alto=imz.height
general=512
numero=0
alpha=ancho/general
beta=alto/general
area=(0,0,0,0)
if alpha>beta:
numero=int(alpha)
elif alpha<beta:
numero=int(beta)
elif alpha==beta:
numero=int(alpha)
print("Total Zooms..{}".format(numero+1))
print("Zooms Process...")
#shutil.move('{}'.format(jpgs),'Carpet {}'.format(jpgs))
#shutil.move('Carpet {}'.format(jpgs),'{}'.format(direcction))
for zooms in range(1,numero+2):
if photo==1:
os.makedirs('u') #1
elif photo==2:
os.makedirs('b') #2
elif photo==3:
os.makedirs('l') #3
elif photo==4:
os.makedirs('f') #4
elif photo==5:
os.makedirs('r') #5
elif photo==6:
os.makedirs('d') #6
ex=2**zooms
div=int(ex/2)
y=0
yy=0
x=0
xx=0
for yi in range(1,div+1):
ny=alto/div
m=yi
if yi==1:
y=0
yy=alto/div
else:
y=((yi-1)*ny)
yy=(m*(alto/div))
#----------Carpetas de X-----------------
os.makedirs('{}'.format(yi-1))
for xi in range(1,div+1):
nx=ancho/div
mm=xi
if xi ==1:
x=0
xx=ancho/div
else:
x=((xi-1)*nx)
xx=(mm*(ancho/div))
area=(x,y,xx,yy)
copys=imz.crop(area)
copyn=copys.resize((general,general),Image.ANTIALIAS)
copyn.save('{}.jpg'.format(xi-1))
shutil.move('{}.jpg'.format(xi-1),'{}'.format(yi-1))
if photo==1:
shutil.move('{}'.format(yi-1),'u')
elif photo==2:
shutil.move('{}'.format(yi-1),'b')
elif photo==3:
shutil.move('{}'.format(yi-1),'l')
elif photo==4:
shutil.move('{}'.format(yi-1),'f')
elif photo==5:
shutil.move('{}'.format(yi-1),'r')
elif photo==6:
shutil.move('{}'.format(yi-1),'d')
if photo==1:
shutil.move('u','z{}'.format(zooms))
elif photo==2:
shutil.move('b','z{}'.format(zooms))
elif photo==3:
shutil.move('l','z{}'.format(zooms))
elif photo==4:
shutil.move('f','z{}'.format(zooms))
elif photo==5:
shutil.move('r','z{}'.format(zooms))
elif photo==6:
shutil.move('d','z{}'.format(zooms))
os.rename('z{}'.format(zooms),'{}'.format(zooms))
shutil.move('{}'.format(zooms),'Carpet {}'.format(infile))
shutil.move('Carpet {}'.format(infile),'360transform')
print("delet process.... ")
os.mkdir('delet')
shutil.move('cube.png','delet')
for delet in range(1,7):
shutil.move('n{}.jpg'.format(delet),'delet')
time.sleep(1)
shutil.rmtree('delet')
try:
shutil.rmtree('n7.jpg')
except:
print("Fail")
try:
time.sleep(1)
shutil.move('360transform',saver)
except:
print("Saver Doesn´t exist")
shutil.move('360transform',path)
# time.sleep(10)
print("Zooms Finished...")
break
| null |
Python Converter/360image.py
|
360image.py
|
py
| 11,487 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.walk",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "math.pi",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "math.floor",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "shutil.move",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "os.mkdir",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "shutil.move",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 326,
"usage_type": "call"
}
] |
575439170
|
#-*- coding:utf-8 -*-
'''
Normal Distribution, also called Gaussian Distribution
'''
import numpy as np
import matplotlib.pyplot as plt
def simple_plot():
x = np.linspace(0, 10, 10000)
y = np.random.normal(0, x)
z = np.cos(x**2)
plt.figure(figsize = (8, 4))
plt.plot(x, y, label = "sin(x)", color = "red", linewidth = 2)
plt.plot(x, z, "b--", label = "cos(x^2)")
plt.xlabel("Time(s)")
plt.ylabel("Volt")
plt.title("PyPlot First Example")
plt.ylim(-1.2, 1.2)
plt.legend()
plt.show()
def normal_plot(mu = 0, sigma = 1, num = 1000):
y = np.random.normal(mu, sigma, num)
plt.figure(figsize = (8, 4))
plt.plot(y, label = "norm", color = "red", linewidth = 2)
plt.xlabel("X")
plt.ylabel("Distribution")
plt.title("Normal-Distribution")
plt.legend()
plt.show()
def normal_distribution(mu = 0, sigma = 1, start = None, end = None, num = 1000):
if start and end:
x = np.linspace(start, end, num)
else:
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, num)
# gaussian density function
y = np.e ** ((x - mu)**2 / (-2 * sigma ** 2)) / np.sqrt(2 * np.pi * sigma)
plt.figure(figsize = (8, 4))
plt.plot(x, y, color = "red", linewidth = 2)
plt.xlabel("X")
plt.ylabel("density")
plt.title("Normal-Distribution")
plt.show()
#simple_plot()
#normal_plot()
normal_distribution()
| null |
python/distribution/Gaussian.py
|
Gaussian.py
|
py
| 1,404 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.linspace",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.random.normal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.e",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
}
] |
512803119
|
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import *
from django.http import HttpResponse
from django.core import serializers
from app.settings import PROJECT_ROOT
import json
import os
@csrf_exempt
def loginFacebook(request):
infoArray = request.body.decode('UTF-8') # request becomes string
infoArray = infoArray.split("&")
if "%C3%85" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%85", "Å")
if "%C3%86" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%86", "Æ")
if "%C3%98" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%98", "Ø")
if "%C3%A5" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%A5", "å")
if "%C3%A6" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%A6", "æ")
if "%C3%B8" in infoArray[2]:
infoArray[2] = infoArray[2].replace("%C3%B8", "ø")
if "%C3%85" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%85", "Å")
if "%C3%86" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%86", "Æ")
if "%C3%98" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%98", "Ø")
if "%C3%A5" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%A5", "å")
if "%C3%A6" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%A6", "æ")
if "%C3%B8" in infoArray[3]:
infoArray[3] = infoArray[3].replace("%C3%B8", "ø")
if "+" in infoArray[2]:
infoArray[2] = infoArray[2].replace("+", " ")
if "+" in infoArray[3]:
infoArray[3] = infoArray[3].replace("+", " ")
if len(infoArray) > 4:
email = infoArray[4].split("=")[1]
email = email.replace("%40", "@")
else:
email = ""
facebookId = infoArray[0].split("=")[1]
age = infoArray[1].split("=")[1]
first_name = infoArray[2].split("=")[1]
last_name = infoArray[3].split("=")[1]
password = facebookId[:5] + first_name
user = authenticate(username=facebookId, password=password)
if user is None:
user = User(username=facebookId, email=email, first_name=first_name, last_name=last_name, is_staff=False)
user.set_password(facebookId[:5] + first_name)
user.save()
if int(age) >= 21:
type = "P"
else:
type = "C"
userProfile = UserProfile(user=user, type=type, phone=None, profile_name=first_name, last_name=last_name,
email=email, provider={}, is_active=True)
userProfile.save()
login(request, user)
request.session['username'] = user.username
request.session['profile_name'] = userProfile.profile_name
request.session['profile_pk'] = userProfile.pk
return redirect("skalvi:index")
elif user is not None:
profiles = UserProfile.objects.filter(user=user)
login(request,user)
if user.is_staff:
for profile in profiles:
profile.is_active = True
profile.save()
request.session['username'] = user.username
request.session['profile_name'] = profile.profile_name
request.session['profile_pk'] = profile.pk
break
return redirect("/admin")
elif len(profiles) > 1:
return redirect("skalvi:choose")
else:
# if only one profile
for profile in profiles:
profile.is_active = True
profile.save()
request.session['username'] = user.username
request.session['profile_name'] = profile.profile_name
request.session['profile_pk'] = profile.pk
return redirect("/")
# Admin function to populate the SQLdatabase with all providers from Aktørdatabasen.
def populate(request):
with open(os.path.join(PROJECT_ROOT, '../app/aktordatabasen.json')) as json_file:
json_data = json.load(json_file)
# run through each object that is saved
for i in json_data:
try:
i['Navn'] # throws exception if there is no attribute 'Navn'
try:
entry = UserProfile.objects.filter(profile_name=i['Navn'])
except Exception as e:
entry = False
if entry:
org = Organisation(user=entry.user, userprofile=entry, aktordatabase=i)
else:
org = Organisation(aktordatabase=i)
except:
org = Organisation(aktordatabase=i)
org.save()
return HttpResponse('Done! not sure if faulty tho, please check.')
def getProviders(request):
json_serializer = serializers.get_serializer("json")()
providers = Organisation.objects.all()
providers = json_serializer.serialize(providers, ensure_ascii=False)
return HttpResponse(providers, content_type='application/json')
def getUserProviders(request):
profile = UserProfile.objects.get(user=request.user, profile_name=request.session["profile_name"])
providers = profile.provider.split(",")
profileProviders = Organisation.objects.filter(pk__in=providers)
json_serializer = serializers.get_serializer("json")()
json = json_serializer.serialize(profileProviders, ensure_ascii=False)
return HttpResponse(json, content_type='application/json')
def getUser(request):
profile = UserProfile.objects.get(user=request.user, profile_name=request.session["profile_name"])
providers = profile.provider.split(",")
username = profile.profile_name
data = {'name': username, 'providers': providers}
return HttpResponse(json.dumps(data), content_type='application/json')
def getProvider(request, pk):
provider = Organisation.objects.get(pk=pk)
data = {
'aktordatabase': provider.aktordatabase
}
return HttpResponse(json.dumps(data), content_type='application/json')
| null |
skalvi/ApiFunctions.py
|
ApiFunctions.py
|
py
| 6,023 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "app.settings.PROJECT_ROOT",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.get_serializer",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.get_serializer",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 157,
"usage_type": "call"
}
] |
295428308
|
import os
import yaml
import torch
import nibabel as nib
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import time
import pickle
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import LambdaLR,MultiStepLR
import matplotlib.pyplot as plt
import sys
# build model from Liu et al.'s github code
sys.path.insert(1, "./CNN_design_for_AD-master/models/")
import build_model_extrablock
# for untrained use:
#config_name = './CNN_design_for_AD-master/config.yaml'
# pretrained model
config_name = './CNN_design_for_AD-master/config2.yaml'
with open(os.path.join('./'+config_name), 'r') as f:
cfg = yaml.load(f)
device = torch.device('cuda')
model = build_model_extrablock.build_model(cfg).to(device)
class hcp_dataset(Dataset):
def __init__(self, df_path, train = False):
self.df = pd.read_csv(df_path)
self.train = train
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
subject_name = self.df.iloc[idx]['Subject']
image_path ='./data/hcp2/'+str(subject_name)+'/T1w/T1w_acpc_dc_restore_brain.nii.gz'
image = nib.load(image_path)
image_array = image.get_fdata()
#Normalization
image_array = (image_array - image_array.mean()) / image_array.std()
#label = self.df.loc[idx][['N','E','O','A','C']].values.astype(int)
label = self.df.loc[idx][['N']].values[0].astype(int) # predict C
sample = {'x': image_array[None,:], 'y': label}
return sample
bs = 1
# full dataset
train_df_path = './train.csv'
val_df_path = './test.csv'
test_df_path = './val.csv'
transformed_dataset = {'train': hcp_dataset(train_df_path, train = True),
'validate':hcp_dataset(val_df_path),
'test':hcp_dataset(test_df_path),}
# for debugging and to see if model can learn training set on tiny sample
#sample_df_path = './sample.csv'
#sample_transformed_dataset = {'train': hcp_dataset(sample_df_path, train = True),
# 'validate':hcp_dataset(sample_df_path),
# 'test':hcp_dataset(sample_df_path),}
#
#dataloader_sample = {x: DataLoader(sample_transformed_dataset[x], batch_size=bs,
# shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
# get data_loader
dataloader = {x: DataLoader(transformed_dataset[x], batch_size=bs,
shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
data_sizes ={x: len(transformed_dataset[x]) for x in ['train', 'validate','test']}
def train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 10, verbose = True, scheduler=None, output_name="test.txt"):
acc_dict = {'train':[],'validate':[]}
loss_dict = {'train':[],'validate':[]}
best_acc = 0
phases = ['train','validate']
since = time.time()
number = 0
for i in range(num_epochs):
print('Epoch: {}/{}'.format(i, num_epochs-1))
print('-'*10)
for p in phases:
running_correct = 0
running_loss = 0
running_total = 0
if p == 'train':
model.train()
else:
model.eval()
for data in dataloader[p]:
optimizer.zero_grad()
image = F.interpolate(data['x'], mode="trilinear", scale_factor=interpolation_scale)
image = image.to(device,dtype=torch.float)
label = data['y'].to(device,dtype=torch.long)
output = model(image)
loss = loss_fn(output, label)
print(number)
number += 1
_, preds = torch.max(output, dim = 1)
num_imgs = image.size()[0]
running_correct += torch.sum(preds ==label).item()
running_loss += loss.item()*num_imgs
running_total += num_imgs
if p== 'train':
loss.backward()
optimizer.step()
epoch_acc = float(running_correct/running_total)
epoch_loss = float(running_loss/running_total)
if verbose or (i%10 == 0):
print('Phase:{}, epoch loss: {:.4f} Acc: {:.4f}'.format(p, epoch_loss, epoch_acc))
acc_dict[p].append(epoch_acc)
loss_dict[p].append(epoch_loss)
if p == 'validate':
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
save_model(best_model_wts, model, acc_dict, loss_dict)
else:
if scheduler:
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model, acc_dict, loss_dict
def save_model(best_model_wts, model, acc_dict, loss_dict):
model_saved = {'best_model_wts':best_model_wts, 'model':model, 'acc_dict':acc_dict, 'loss_dict':loss_dict}
f=open(output_name,'wb')
pickle.dump(model_saved,f)
f.close()
return None
# from Liu et al.
lr_rate = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr_rate)
interpolation_scale = 0.6
output_name = "pretrained_point001_point6.txt"
model, acc_dict, loss_dict = train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 50, verbose = True, scheduler= MultiStepLR(optimizer, milestones=[20,40], gamma=0.1), output_name = "")
| null |
Models/Model 1/Additional Hyperparameter Tuning Scripts/pretrained_point001_point6.py
|
pretrained_point001_point6.py
|
py
| 5,721 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "build_model_extrablock.build_model",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.float",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "torch.long",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "torch.max",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.MultiStepLR",
"line_number": 162,
"usage_type": "call"
}
] |
624394066
|
# uncompyle6 version 3.7.4
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/sensors/aws_sqs_sensor.py
# Compiled at: 2019-09-11 03:47:34
# Size of source mod 2**32: 3692 bytes
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_sqs_hook import SQSHook
from airflow.exceptions import AirflowException
class SQSSensor(BaseSensorOperator):
__doc__ = '\n Get messages from an SQS queue and then deletes the message from the SQS queue.\n If deletion of messages fails an AirflowException is thrown otherwise, the message\n is pushed through XCom with the key ``message``.\n\n :param aws_conn_id: AWS connection id\n :type aws_conn_id: str\n :param sqs_queue: The SQS queue url (templated)\n :type sqs_queue: str\n :param max_messages: The maximum number of messages to retrieve for each poke (templated)\n :type max_messages: int\n :param wait_time_seconds: The time in seconds to wait for receiving messages (default: 1 second)\n :type wait_time_seconds: int\n '
template_fields = ('sqs_queue', 'max_messages')
@apply_defaults
def __init__(self, sqs_queue, aws_conn_id='aws_default', max_messages=5, wait_time_seconds=1, *args, **kwargs):
(super(SQSSensor, self).__init__)(*args, **kwargs)
self.sqs_queue = sqs_queue
self.aws_conn_id = aws_conn_id
self.max_messages = max_messages
self.wait_time_seconds = wait_time_seconds
def poke(self, context):
"""
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
"""
sqs_hook = SQSHook(aws_conn_id=(self.aws_conn_id))
sqs_conn = sqs_hook.get_conn()
self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue)
messages = sqs_conn.receive_message(QueueUrl=(self.sqs_queue), MaxNumberOfMessages=(self.max_messages),
WaitTimeSeconds=(self.wait_time_seconds))
self.log.info('reveived message %s', str(messages))
if 'Messages' in messages:
if len(messages['Messages']) > 0:
entries = [{'Id':message['MessageId'], 'ReceiptHandle':message['ReceiptHandle']} for message in messages['Messages']]
result = sqs_conn.delete_message_batch(QueueUrl=(self.sqs_queue), Entries=entries)
if 'Successful' in result:
context['ti'].xcom_push(key='messages', value=messages)
return True
raise AirflowException('Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages))
return False
| null |
pycfiles/apache_airflow_arup-1.10.5-py3.6/aws_sqs_sensor.cpython-36.py
|
aws_sqs_sensor.cpython-36.py
|
py
| 2,946 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "airflow.sensors.base_sensor_operator.BaseSensorOperator",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "airflow.utils.decorators.apply_defaults",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "airflow.contrib.hooks.aws_sqs_hook.SQSHook",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "airflow.exceptions.AirflowException",
"line_number": 46,
"usage_type": "call"
}
] |
521203540
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import parler.models
import aldryn_translation_tools.models
import django.contrib.postgres.fields
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('aldryn_people', '0019_auto_20170225_2314'),
]
operations = [
migrations.CreateModel(
name='RegionalGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitudes', django.contrib.postgres.fields.ArrayField(default=[], size=None, base_field=models.FloatField(), blank=True)),
('longitudes', django.contrib.postgres.fields.ArrayField(default=[], size=None, base_field=models.FloatField(), blank=True)),
('number_of_sections', models.IntegerField(default=1, verbose_name='number of sections', blank=True)),
],
options={
'verbose_name': 'Regional Group',
'verbose_name_plural': 'Regional Groups',
},
bases=(aldryn_translation_tools.models.TranslationHelperMixin, aldryn_translation_tools.models.TranslatedAutoSlugifyMixin, parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='RegionalGroupTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('name', models.CharField(help_text="Provide this regional group's name.", max_length=255, verbose_name='name')),
('description', djangocms_text_ckeditor.fields.HTMLField(verbose_name='description', blank=True)),
('slug', models.SlugField(default='', max_length=255, blank=True, help_text='Leave blank to auto-generate a unique slug.', verbose_name='slug')),
('master', models.ForeignKey(related_name='translations', editable=False, to='aldryn_people.RegionalGroup', null=True)),
],
options={
'managed': True,
'db_table': 'aldryn_people_regionalgroup_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'Regional Group Translation',
},
),
migrations.AddField(
model_name='person',
name='regional_section_number',
field=models.IntegerField(default=None, null=True, verbose_name='Regional section number', blank=True),
),
migrations.AddField(
model_name='person',
name='regional_group',
field=models.ForeignKey(related_name='people', default=None, blank=True, to='aldryn_people.RegionalGroup', help_text='Choose the regional groups for this person.', null=True),
),
migrations.AlterUniqueTogether(
name='regionalgrouptranslation',
unique_together=set([('language_code', 'master')]),
),
]
| null |
aldryn_people/migrations/0020_auto_20170228_1549.py
|
0020_auto_20170228_1549.py
|
py
| 3,217 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.contrib.postgres.fields.ArrayField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.contrib",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.contrib.postgres.fields.ArrayField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.contrib",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "aldryn_translation_tools.models.models",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "aldryn_translation_tools.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "parler.models.models",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "parler.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "djangocms_text_ckeditor.fields.fields.HTMLField",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "djangocms_text_ckeditor.fields.fields",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "djangocms_text_ckeditor.fields",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterUniqueTogether",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 60,
"usage_type": "name"
}
] |
395763368
|
import cv2
import numpy as np
from keras.models import model_from_json
import base64
import face.CNN_MODEL as cnn
def predict_emotion(face_image_gray,sess):
resized_img = cv2.resize(face_image_gray, (48,48), interpolation = cv2.INTER_AREA)
pixel = np.zeros((48,48))
for i in range(48):
for j in range(48):
pixel[i][j] = resized_img[i][j]
list = cnn.Predict(pixel,sess)
return list[0]
def get_emotion(str,faceCascade,sess):
data =base64.b64decode(str)
dataStr = np.fromstring(data, np.uint8)
frame = cv2.imdecode(dataStr,cv2.IMREAD_COLOR)
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
img_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags= cv2.CASCADE_SCALE_IMAGE
)
ret = []
for (x, y, w, h) in faces:
face_image_gray = img_gray[y:y+h, x:x+w]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
list= predict_emotion(face_image_gray,sess)
# list = ["angry","disgust","fear","happy","sad","surprise","neutral"]
ret += [(int)(list[3]+list[5]-list[0]-list[1]-list[2]-list[4])]
return ret
| null |
face/FACE.py
|
FACE.py
|
py
| 1,127 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.resize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "face.CNN_MODEL.Predict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "face.CNN_MODEL",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "base64.b64decode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 34,
"usage_type": "call"
}
] |
265298716
|
import sys
import constants as c
from config import Config
from irc.session import Session
from lunatic import Lunatic
def main():
c.write("Lunatic started")
if c.DEBUG:
config = Config("lunatic.yaml.debug")
else:
config = Config("lunatic.yaml")
config.load()
irc_session = Session(config)
irc_session.connect()
lunatic = Lunatic(irc_session, config)
lunatic.loop()
if __name__ == "__main__":
main()
sys.exit(0)
| null |
lunatic/__main__.py
|
__main__.py
|
py
| 478 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "constants.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "constants.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "config.Config",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "config.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "irc.session.Session",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "lunatic.Lunatic",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lunatic.loop",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
}
] |
114608526
|
#!/bin/env python
# -*- coding:UTF8 -*-
#菜鸟教程: http://www.runoob.com/python3/python-mongodb.html
import time
import pymongo
from datetime import datetime
from datetime import timedelta
from pymongo import MongoClient
from pymongo.collation import Collation
import re
import os
import sys
path='./include'
sys.path.insert(0,path)
import functions as func
HOST=func.get_config('mongo','host')
PORT=int(func.get_config('mongo','port'))
"""
dbname: 数据库命令, RequestResponse
collection_pattern: 集合模式字符串,api_requestresponse
column_name: 索引列名,requesttime
order_type: 排序方式,asc 升序、desc降序
"""
def create_index(db,collection_pattern,column_name,order_type):
conn = pymongo.MongoClient(host=HOST, port=PORT)
admin = conn.admin
#获取数据库
v_db = conn[db]
#获取集合列表
v_cols = v_db.list_collection_names()
print(v_cols)
v_pattern1 = collection_pattern
for v_col in v_cols:
if re.match(v_pattern1,v_col):
print("create index for %r" % v_col)
if order_type == "asc" or order_type == "ASC":
v_db[v_col].create_index([(column_name,pymongo.ASCENDING)],background=True)
else:
v_db[v_col].create_index([(column_name,pymongo.DESCENDING)],background=True)
conn.close()
def drop_index(db,collection_pattern,column_name,order_type):
conn = pymongo.MongoClient(host=HOST, port=PORT)
admin = conn.admin
#获取数据库
v_db = conn[db]
#获取集合列表
v_cols = v_db.list_collection_names()
print(v_cols)
v_pattern1 = collection_pattern
for v_col in v_cols:
if re.match(v_pattern1,v_col):
print("drop index for %r" % v_col)
if order_type == "asc" or order_type == "ASC":
try:
v_db[v_col].drop_index([(column_name,pymongo.ASCENDING)])
except pymongo.errors.OperationFailure as e:
print (e)
pass
else:
try:
v_db[v_col].drop_index([(column_name,pymongo.DESCENDING)])
except pymongo.errors.OperationFailure as e:
print (e)
pass
conn.close()
if __name__ == '__main__':
#v_pattern = "ShipOrder_6|ShipOrder_7|ShipOrder_8|ShipOrder_9"
#create_index('AutoSendMessage2',v_pattern,'OrderId','asc')
#create_index('EbayMessage','ebaymailmessagechat','MessageId','asc')
create_index('RequestResponse','api_requestresponse','requesttime','desc')
#drop_index('RequestResponse','api_requestresponse','requesttime','as
| null |
tools/mongo_index.py
|
mongo_index.py
|
py
| 2,391 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "functions.get_config",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "functions.get_config",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pymongo.ASCENDING",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pymongo.DESCENDING",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pymongo.ASCENDING",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pymongo.errors",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pymongo.DESCENDING",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pymongo.errors",
"line_number": 72,
"usage_type": "attribute"
}
] |
107261190
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
def normalizer(x):
m = x.shape[0]
x = x - np.mean(x)
x = (x * m) / (np.sum(x ** 2))
# x = x - np.mean(x)
# x = x / np.sqrt(np.sum(x**2) / len(x) - 1)
return x
train = pd.read_csv("./datasets/train.csv")
########### Pclass ################
# sns.countplot(x='Survived',hue='Pclass',data=train)
# plt.show()
# Pclass 2 is obviously insignificant
# Pclass 1 should be tested
######### Embarked #############
# sns.countplot(x='Survived',hue='Embarked',data=train)
# plt.show()
# print(train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# should be tested
######### SEX #############
# sns.countplot(x='Survived',hue='Sex',data=train)
# plt.show()
# print(train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# obviously important
######### Name #############
# name = train.Name
# name = name.map( lambda name: name.split( ',' )[1].split( '.' )[0].strip() )
# name = name.replace(['Lady', 'the Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
# # name = name.replace(['Mlle', 'Ms'], 'Miss')
# # name = name.replace('Mme', 'Mrs')
# name = name.replace(['Mme', 'Mrs', 'Mlle', 'Ms', 'Miss'], 'Female')
# name = pd.get_dummies(name, drop_first=False)
# print(name.head)
# name.drop('Miss', axis=1, inplace=True)
# tmp = pd.concat([train['Survived'],name], axis=1)
# sns.countplot(x='Survived',hue='Name',data=tmp)
# plt.show()
# print(tmp[['Name', 'Survived']].groupby(['Name'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# Master is not important
########## Ticket ############
# ticket = train.Ticket
def ticket_handler(ticket):
ticket = ticket.replace( '.' , '' )
ticket = ticket.replace( '/' , '' )
ticket = ticket.split()
ticket = map( lambda t : t.strip() , ticket )
ticket = list(filter( lambda t : not t.isdigit() , ticket ))
if len( ticket ) > 0:
return (ticket[0])
else:
return 'X'
# ticket = ticket.map( ticket_handler )
# ticket = pd.get_dummies(ticket, drop_first=False)
# print(ticket.head(5))
# tmp = pd.concat([train['Survived'],ticket], axis=1)
# print(tmp.head(3))
# sns.countplot(x='Survived',hue='Ticket',data=tmp)
# plt.show()
# print(tmp[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# print(tmp.head(10))
############# Cabin #######################
# cabin = train.Cabin
# cabin = cabin.fillna( 'Without Cabin' )
# cabin = cabin.map( lambda c : c[0] )
# tmp = pd.concat([train['Survived'],cabin], axis=1)
# sns.countplot(x='Survived',hue='Cabin',data=tmp)
# plt.show()
# print(tmp[['Cabin', 'Survived']].groupby(['Cabin'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# use only E,B,D,W if they have significant impact
############# Family ####################
# siblings = train.SibSp
# parents = train.Parch
# size = siblings + parents
# train['isAlone'] = size.map( lambda s : 1 if s == 1 else 0 )
# tmp = pd.concat([train['Survived'],siblings], axis=1)
# sns.countplot(x='Survived',hue='SibSp',data=tmp)
# plt.show()
# print(tmp[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False))
# tmp = pd.concat([train['Survived'],train['isAlone']], axis=1)
# print(tmp.head())
# sns.countplot(x='Survived',hue='isAlone',data=tmp)
# plt.show()
# print(tmp[['isAlone', 'Survived']].groupby(['isAlone'], as_index=False).mean().sort_values(by='Survived', ascending=False))
z = pd.DataFrame()
z['E'] = train['Embarked']
z['S'] = train['Sex']
z = pd.get_dummies(z, drop_first=False)
# print(z.head())
z['new'] = z['E_C']*z['S_male'] + z['E_S']*z['S_female'] + z['E_Q']*z['S_female']
z['Survived'] = train['Survived']
# sns.countplot(x='Survived',hue='new',data=z)
# plt.show()
# print(z[['new', 'Survived']].groupby(['new'], as_index=False).mean().sort_values(by='Survived', ascending=False))
z = pd.DataFrame()
z['A'] = train['Age']
z['S'] = train['Sex']
z['S'] = z['S'].replace('male', 1)
z['S'] = z['S'].replace('female', -1)
z['N'] = z['S'] * z['A']
z['N'] = z['N'].mask(z['N'].between(0,11.2), 0)
z['N'] = z['N'].mask(z['N'].between(-38,-30), 0)
z['N'] = z['N'].mask(z['N']!=0, 1)
z['Survived'] = train['Survived']
# print(z.head())
# sns.countplot(x='Survived',hue='N',data=z)
# plt.show()
# print(z[['N', 'Survived']].groupby(['N'], as_index=False).mean().sort_values(by='Survived', ascending=False))
### Numercial features ###
def correlating_numerical_features(data, feature):
g = sns.FacetGrid(data, col='Survived')
g.map(plt.hist, feature, bins=20)
plt.show()
# correlating_numerical_features(train, 'Age')
def correlating_numerical_and_ordinal_features(data, feature_A, feature_B):
grid = sns.FacetGrid(data, col='Survived', row=feature_A, height=2.2, aspect=1.6)
grid.map(plt.hist, feature_B, alpha=.5, bins=20)
grid.add_legend()
plt.show()
# correlating_numerical_and_ordinal_features(train, 'Sex', 'Embarked')
def correlating_categorical_features(data, feature_A, feature_B, feature_C):
grid = sns.FacetGrid(data, row=feature_A, height=2.2, aspect=1.6)
grid.map(sns.pointplot, feature_B, 'Survived', feature_C, palette='deep')
grid.add_legend()
plt.show()
# correlating_categorical_features(train, 'Embarked', 'Pclass', 'Sex')
# correlating_categorical_features(train, 'Embarked', 'Pclass', 'Sex')
def correlating_categorical_and_numerical_features(data, feature_A, feature_B, feature_C):
grid = sns.FacetGrid(data, row=feature_A, col='Survived', height=2.2, aspect=1.6)
grid.map(sns.barplot, feature_B, feature_C, alpha=.5, ci=None)
grid.add_legend()
plt.show()
# correlating_categorical_and_numerical_features(train, 'Embarked', 'Sex', 'Fare')
# correlating_categorical_and_numerical_features(train, 'Embarked', 'Sex', 'Age')
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
plt.show()
# plot_distribution( train , var = 'Age' , target = 'Survived' , row = 'Sex' )
########### Age #############
# train['AgeBand'] = pd.qcut(train['Age'], 20)
# print(train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True))
############ Fare ##############
# train['FareBand'] = pd.qcut(train['Fare'], 4)
# print(train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True))
survived = train['Survived']
train.drop('Survived', axis=1, inplace=True)
test = pd.read_csv("./datasets/test.csv")
data = train.append(test , ignore_index = True)
data = data.sort_values('Name')
# data.drop('PassengerId', axis=1, inplace=True)
# sns.heatmap(data.isnull(), yticklabels=False, cbar=False, cmap='YlGnBu')
# plt.show()
# print(data.describe())
# print(data.describe(include=['O']))
# print(data.info())
"""
1 nan in Fare
Embarked has two nans
Cabin and Age have many nans
"""
passenger_id = data['PassengerId']
sex = pd.get_dummies(data['Sex'], drop_first=True)
embarked = pd.get_dummies(data['Embarked'], drop_first=False, prefix='Embark') #fills nan with 0 0 0
embarked.drop('Embark_C', axis=1, inplace=True)
# embarked.drop('Embark_Q', axis=1, inplace=True)
# pclass = data['Pclass']
pclass = pd.get_dummies(data['Pclass'], drop_first=False, prefix='class')
pclass.drop('class_2', axis=1, inplace=True)
pclass.drop('class_1', axis=1, inplace=True)
# sns.boxplot(x='Pclass', y='Age', data=data)
# plt.show()
#### 39 for pclass=1 & 29 pclass=2 & 24 pclass=3
def age_handler(cols):
Age=cols[0]
Pclass=cols[1]
if pd.isnull(Age):
if Pclass==1:
return 39
elif Pclass==2:
return 29
else:
return 24
else:
return Age
age = data[['Age', 'Pclass']].apply(age_handler, axis=1)
bins = (0, 20, 28, 38, 80) #LR coefficient = -0.29
# group_names = [0, 1, 2, 3]
group_names = [1, 2, 3, 4]
age = pd.cut(age, bins, labels=group_names)
# data['age'] = age ## Solving naming issue
fare = data['Fare'].fillna( data.Fare.mean() )
# print(data.Fare.describe())
# plt.hist(fare, bins=100)
# plt.show()
bins = (-1, 12, 31, 1000) #LR Coefficient = -0.16
group_names = [0, 1, 2]
fare = pd.cut(fare, bins, labels=group_names)
name = data['Name']
name = name.map( lambda name: name.split( ',' )[1].split( '.' )[0].strip() )
name = name.replace(['Lady', 'the Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
name = name.replace(['Mme', 'Mrs', 'Mlle', 'Ms', 'Miss'], 'Female')
name = pd.get_dummies(name, drop_first=False)
name.drop('Master', axis=1, inplace=True)
ticket = data.Ticket
def ticket_handler(ticket):
ticket = ticket.replace( '.' , '' )
ticket = ticket.replace( '/' , '' )
ticket = ticket.split()
ticket = map( lambda t : t.strip() , ticket )
ticket = list(filter( lambda t : not t.isdigit() , ticket ))
if len( ticket ) > 0:
return (ticket[0])
else:
return 'XX'
ticket = ticket.map( ticket_handler )
ticket = ticket.replace(['SWPP', 'SC'], 'AAA')
ticket = ticket.replace('FCC', 'BBB')
ticket = ticket.replace(['SCAH', 'PP', 'PC'], 'CCC')
ticket = ticket.replace(['CA', 'WEP'], 'DDD')
ticket = ticket.replace('LINE', 'EEE')
ticket = ticket.replace(['SOC', 'SOTONOQ'], 'FFF')
ticket = ticket.replace(['WC', 'A5'], 'GGG')
ticket = ticket.replace(['AS', 'CASOTON', 'SP', 'SOTONO2', 'SCA4', 'SOPP', 'SOP', 'FC', 'Fa', 'SCOW', 'A4'], 'HHH')
ticket = pd.get_dummies(ticket, drop_first=False)
ticket.drop(['PPP', 'STONO', 'STONO2', 'SCParis', 'SCPARIS', 'XX'], axis=1, inplace=True)
ticket.drop(['A', 'AQ3', 'AQ4', 'C', 'LP', 'SCA3', 'STONOQ'], axis=1, inplace=True)
cabin = data.Cabin
cabin = cabin.fillna( 'Without Cabin' )
cabin = cabin.map( lambda c : c[0] )
cabin = cabin.replace(['A','C','F','G','T'], 'drop')
cabin = pd.get_dummies(cabin, drop_first=False)
cabin.drop('drop', axis=1, inplace=True)
siblings = data.SibSp
parents = data.Parch
size = siblings + parents
Fare_Per_Person = (data['Fare'].fillna(data.Fare.mean()))/ (size + 1)
Age_class = data[['Age', 'Pclass']].apply(age_handler, axis=1) * data['Pclass']
isAlone = size.map( lambda s : 'Alone' if s == 1 else 'Not Alone' )
isAlone = pd.get_dummies(isAlone, drop_first=True)
siblings = pd.get_dummies(siblings, drop_first=False)
# siblings.drop([1,2,3,4,5,8], axis=1, inplace=True)
family = pd.DataFrame()
family['Name'] = data['Name']
family['Name'] = family['Name'].map( lambda nam: nam.split( ',' )[0])
family['distinction'] = 0
last = ''
last_index = 0
for index, row in family.iterrows():
if row['Name']==last :
family.loc[index, 'distinction'] = last_index
else:
last_index += 1
last = row['Name']
family.loc[index, 'distinction'] = last_index
family['distinction'] = normalizer(family['distinction'])
families = pd.DataFrame()
families['Sex'] = data['Sex']
families['PassengerId'] = data['PassengerId']
families['Name'] = data['Name'].map( lambda nam: nam.split( ',' )[0])
families['Survived'] = survived
families['Survived'] = families['Survived'].fillna(-1)
families['Ticket'] = data['Ticket']
families['male_alive'] = 0
families['male_dead'] = 0
families['female_dead'] = 0
families['female_alive'] = 0
# print(families.head())
for grp, grp_df in families.groupby(['Name','Ticket']):
if (len(grp_df) != 1): # a family
male_alive = 0
male_dead = 0
female_dead = 0
female_alive = 0
for ind, row in grp_df.iterrows():
if row['Survived']==1 and row['Sex']=='male':
male_alive=1
elif row['Survived']==0 and row['Sex']=='male':
male_dead=1
elif row['Survived']==1 and row['Sex']=='female':
female_alive=1
elif row['Survived']==0 and row['Sex']=='female':
female_dead=1
for ind, row in grp_df.iterrows():
passID = row['PassengerId']
if row['Sex']=='male':
families.loc[families['PassengerId'] == passID, 'male_alive'] = male_alive
families.loc[families['PassengerId'] == passID, 'male_dead'] = male_dead
families.loc[families['PassengerId'] == passID, 'female_dead'] = female_dead
else:
families.loc[families['PassengerId'] == passID, 'male_alive'] = male_alive
families.loc[families['PassengerId'] == passID, 'female_dead'] = female_dead
families.loc[families['PassengerId'] == passID, 'female_alive'] = female_alive
z = pd.DataFrame()
z['E'] = data['Embarked']
z['S'] = data['Sex']
z = pd.get_dummies(z, drop_first=False)
z['new'] = z['E_C']*z['S_male'] + z['E_S']*z['S_female'] + z['E_Q']*z['S_female']
my_feature = pd.DataFrame()
my_feature['feature'] = z['new']
my_feature = pd.get_dummies(my_feature['feature'], drop_first=False)
z = pd.DataFrame()
z['S'] = data['Sex']
z['S'] = z['S'].replace('male', 1)
z['S'] = z['S'].replace('female', -1)
z['N'] = z['S'] * train['Age']
z['N'] = z['N'].mask(z['N'].between(0,11.2), 0)
z['N'] = z['N'].mask(z['N'].between(-38,-30), 0)
z['N'] = z['N'].mask(z['N']!=0, 'Y')
my_feature2 = z['N']
my_feature2 = my_feature2.replace(0, 'X')
my_feature2 = my_feature2.replace(1, 'Y')
my_feature2 = pd.get_dummies(my_feature2, drop_first=False)
my_feature2.drop('Y', axis=1, inplace=True)
# print(my_feature2.head())
############## other features #########################
# sexx = train['Sex']
# sexx = sexx.replace('male', 1)
# sexx = sexx.replace('female', -1)
# age_sex = age.astype(np.int8) * sexx
# tmp = pd.DataFrame()
# tmp['survived'] = survived
# tmp['age_sex'] = age_sex
# sns.countplot(x='survived',hue='age_sex',data=tmp)
# plt.show()
# print(tmp[['age_sex', 'survived']].groupby(['age_sex'], as_index=False).mean().sort_values(by='survived', ascending=False))
sexx = data['Sex']
sexx = sexx.replace('male', 1)
sexx = sexx.replace('female', -1)
age_sex = age.astype(np.int8) * sexx
age_sex = pd.get_dummies(age_sex, drop_first=False)
age_sex.drop([-4,-3,-2,-1,2,3,4], axis=1, inplace=True)
processed_data = pd.concat([sex,pclass,fare,name,ticket,cabin,my_feature,age_sex], axis=1)
processed_data['age'] = age
processed_data['isAlone'] = isAlone
processed_data['distinction'] = family['distinction']
processed_data['id'] = passenger_id
Fare_Per_Person = normalizer(Fare_Per_Person)
# processed_data['Fare_Per_Person'] = Fare_Per_Person
Age_class = normalizer(Age_class)
processed_data['Age_class'] = Age_class
# age = data[['Age', 'Pclass']].apply(age_handler, axis=1)
# age = normalizer(age)
# processed_data['age'] = age
# fare = data['Fare'].fillna(data.Fare.mean())
# fare = normalizer(fare)
# processed_data['fare'] = fare
# processed_data['m_a'] = families['male_alive']
# processed_data['m_d'] = families['male_dead']
processed_data['f_d'] = families['female_dead']
processed_data['f_a'] = families['female_alive']
processed_data = processed_data.sort_values('id')
processed_data.drop('id', axis=1, inplace=True)
# print(processed_data.head())
# if __name__ == '__main__':
def after_preprocessing():
return processed_data
| null |
codes/preprocessing.py
|
preprocessing.py
|
py
| 15,810 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.mean",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "seaborn.FacetGrid",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "seaborn.FacetGrid",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "seaborn.FacetGrid",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "seaborn.pointplot",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "seaborn.FacetGrid",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "seaborn.barplot",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "seaborn.FacetGrid",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pandas.isnull",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "pandas.get_dummies",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 449,
"usage_type": "call"
}
] |
294036281
|
import settings
import packages.ipm_cloud_postgresql.model as model
import bth.interacao_cloud as interacao_cloud
from datetime import datetime
def iniciar():
print(':: Iniciando migração do sistema Protocolo')
global ano_inicial
# ano_inicial = 2000
ano_inicial = input("Ano inicial para migração: ")
global ano_final
# ano_final = 2005
ano_final = input("Ano final para migração: ")
global ano
for ano in range(int(ano_inicial), int(ano_final) + 1):
print("------------- INICIO MIGRAÇÃO DO ANO: " + str(ano) + " --------------")
params_exec = {
'clicodigo': '2016',
'somente_pre_validar': False,
'token': '',
'token': '', # Token base oficial biguaçu
'ano': str(ano)
}
mensagem_inicio(params_exec)
interacao_cloud.verifica_token(params_exec['token'])
verifica_tabelas_controle()
enviar(params_exec, 'postmultpart', ano)
# buscar(params_exec, 'buscaPessoas', ano)
# buscar(params_exec, 'buscaTiposVeiculoEquipamento', ano)
# buscar(params_exec, 'buscaUnidadesMedida', ano)
# buscar(params_exec, 'buscaMateriaisServicos', ano)
# buscar(params_exec, 'buscaOrganogramas', ano)
# buscar(params_exec, 'buscaMotoristas', ano)
# buscar(params_exec, 'buscaFornecedores', ano)
# buscar(params_exec, 'buscaMateriaisEspecificacao', ano)
# enviar(params_exec, 'funcionario', ano)
# enviar(params_exec, 'veiculoEquipamento', ano)
# enviar(params_exec, 'ordemAbastecimento', ano)
print("------------- TERMINO MIGRAÇÃO DO ANO: " + str(ano) + " -------------")
ano = ano + 1
def enviar(params_exec, tipo_registro, ano, *args, **kwargs):
print(f'\n:: Iniciando execução do serviço {tipo_registro}')
tempo_inicio = datetime.now()
path_padrao = f'packages.{settings.BASE_ORIGEM}.{settings.SISTEMA_ORIGEM}.rotinas_envio'
print(path_padrao)
try:
modulo = __import__(f'{path_padrao}.{tipo_registro}', globals(), locals(), ['iniciar_processo_envio'], 0)
# print(modulo)
modulo.iniciar_processo_envio(params_exec, ano)
print(f'- Rotina de {tipo_registro} finalizada. '
f'\nTempo total de execução: {(datetime.now() - tempo_inicio).total_seconds()} segundos.')
except:
print("Erro ao executar rotina para o tipo de registro: " + tipo_registro)
def buscar(params_exec, tipo_registro, ano, *args, **kwargs):
print(f'\n:: Iniciando execução do serviço {tipo_registro}')
tempo_inicio = datetime.now()
path_padrao = f'packages.{settings.BASE_ORIGEM}.{settings.SISTEMA_ORIGEM}.rotinas_envio'
print(path_padrao)
try:
modulo = __import__(f'{path_padrao}.{tipo_registro}', globals(), locals(), ['iniciar_processo_busca'], 0)
# print(modulo)
modulo.iniciar_processo_busca(params_exec, ano)
print(f'- Rotina de {tipo_registro} finalizada. '
f'\nTempo total de execução: {(datetime.now() - tempo_inicio).total_seconds()} segundos.')
except:
print("Erro ao executar rotina para o tipo de registro: " + tipo_registro)
def mensagem_inicio(params_exec):
print(f'\n:: Iniciando execução ferramenta {settings.BASE_ORIGEM}, utilizando os '
f'seguintes parâmetros: \n- {params_exec}')
def verifica_tabelas_controle():
pgcnn = model.PostgreSQLConnection()
pgcnn.verifica_tabelas_controle()
| null |
packages/ipm_cloud_postgresql/frotas/enviar.py
|
enviar.py
|
py
| 3,519 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bth.interacao_cloud.verifica_token",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bth.interacao_cloud",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "settings.BASE_ORIGEM",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "settings.SISTEMA_ORIGEM",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "settings.BASE_ORIGEM",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "settings.SISTEMA_ORIGEM",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "settings.BASE_ORIGEM",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "packages.ipm_cloud_postgresql.model.PostgreSQLConnection",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "packages.ipm_cloud_postgresql.model",
"line_number": 85,
"usage_type": "name"
}
] |
370308293
|
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, Float, Boolean
engine = create_engine('sqlite://') # in memory
metadata = MetaData()
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255)),
Column('count', Integer()),
Column('amount', Float()),
Column('valid', Boolean())
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print table details
print(repr(data))
# Define a new table with a name, count, amount, and valid column: data
data2 = Table('data2', metadata,
Column('name', String(255), unique=True),
Column('count', Integer(), default=1),
Column('amount', Float()),
Column('valid', Boolean(), default=False)
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print the table details
print(repr(metadata.tables['data2']))
| null |
datacamp/sqlalchemy/create_table.py
|
create_table.py
|
py
| 1,060 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.MetaData",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 27,
"usage_type": "call"
}
] |
292007069
|
# map
# filter
# reduce
# numbers = ["3","34","64"]
#
# # for i in range(len(numbers)) :
# # numbers[i] = int(numbers[i])
# numbers = list(map(int,numbers))
#
# numbers[2] = numbers[2] + 1
# print(numbers[2]) # 65
# numbers = list(map(int,numbers))
# def sq(a) :
# return a*a
# num = [2,3,4,5,44,1,3,2]
# # square = list(map(sq,num))
# square = list(map(lambda x:x*x,num))
# print(square) # [4, 9, 16, 25, 1936, 1, 9, 4]
# ############################### map #######
# def square(a) :
# return a*a
# def cube(a) :
# return a*a*a
# func = [square,cube]
# for i in range(5) :
# val = list(map(lambda x:x(i),func))
# print(val)
################################ Filter ########
# lisi1 = [1,2,34,5,56,3,2,27,8]
#
# def is_greater_5(num):
# return num>5
#
# gr_than_5 = list(filter(is_greater_5,lisi1))
# print(gr_than_5) # [34, 56, 27, 8]
############################ Reduce ###################################
from functools import reduce
lisi1 = [1,2,3,4]
num = reduce(lambda x,y:x+y,lisi1)
# num = 0
# for i in lisi1 :
# num = num + i
print(num) # 10
| null |
38_map_filter.py
|
38_map_filter.py
|
py
| 1,305 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "functools.reduce",
"line_number": 46,
"usage_type": "call"
}
] |
618335268
|
import psutil
class Stat(object):
def __init__(self):
self.connections = []
self.io_stat = []
def scan_stat(process, stat):
try:
stat.connections.extend(process.connections())
if hasattr(process, "io_counters"):
stat.io_stat.append(process.io_counters())
for child in process.children():
scan_stat(child, stat)
except psutil.NoSuchProcess:
pass
def extract_fields(obj, fields, decoders={}):
return {
k: decoders[k](obj.__dict__[k]) if k in decoders else obj.__dict__[k]
for k in fields
if hasattr(obj, k)
}
def normalize_addr(addr):
return extract_fields(addr, ["ip", "port"])
# pconn(fd=47, family=2, type=1, laddr=addr(ip='172.31.1.100', port=59498), raddr=addr(ip='149.154.175.50', port=443), status='ESTABLISHED')
def normalize_connection(con):
return extract_fields(
con,
[ "fd", "family", "type", "laddr", "raddr", "status"],
{
"laddr": normalize_addr,
"raddr": normalize_addr
}
)
# pio(read_count=5, write_count=14402, read_bytes=3590590464, write_bytes=5259264, read_chars=0, write_chars=47240294)
def normalize_io_stat(io_stat):
return extract_fields(
io_stat,
[ "read_count", "write_count", "read_bytes", "write_bytes", "read_chars", "write_chars" ]
)
def make_stat(pid):
process = psutil.Process(pid)
stat = Stat()
scan_stat(process, stat)
return {
"connections": [normalize_connection(con) for con in stat.connections],
"io_stat": [normalize_io_stat(io_stat) for io_stat in stat.io_stat]
}
| null |
monitor/psstat.py
|
psstat.py
|
py
| 1,659 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "psutil.NoSuchProcess",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "psutil.Process",
"line_number": 54,
"usage_type": "call"
}
] |
429324191
|
# from bunch import Bunch
import logging
import numpy as np
import pandas as pd
import tigerml.core.dataframe as td
from tigerml.core.utils import DictObject, compute_if_dask
_LOGGER = logging.getLogger(__name__)
class Encoder:
"""Encoder class."""
METHODS = DictObject({"onehot": "onehot", "ordinal": "ordinal", "target": "target"})
MAX_BUCKETS = 20
MIN_SAMPLE_PERC = 0.02
MIN_SAMPLE_ABS = 20
LONG_TAIL_CHECK = 0.1
def __init__(self, data, y=None):
if not data.__module__.startswith("tigerml"):
from tigerml.core.dataframe.helpers import tigerify
data = tigerify(data)
data = data.categorize()
self.data = data
self.encoding_rules = []
self.encoded_data = self.data
self.encoding_summary = {}
self.encoding_mapping = {}
self.y = None
if y:
self.y = self.data[y]
self.data = self.data.drop(y, axis=1)
def add_encode_rule(self, cols, method, **kwargs):
"""Adds rule dictionary to encoding rules."""
cols_already_applied = [
col for col in cols if any([col in rule[0] for rule in self.encoding_rules])
]
if cols_already_applied:
raise Exception(
"Encoding rule already applied " "for {}".format(cols_already_applied)
)
if [col for col in cols if col not in self.data]:
raise Exception(
"Columns not present in "
"data - {}".format([col for col in cols if col not in self.data])
)
if method not in self.METHODS:
raise Exception(
"Supported imputation methods: " "{}".format(self.METHODS.keys())
)
rule_dict = {"cols": cols, "method": method}
rule_dict.update(kwargs)
self.encoding_rules.append(rule_dict)
return self
def _default_encoding(self, encode_columns):
_LOGGER.info(
"Encoding categorical variables with default settings which will "
"not be ideal. "
"Processing these variables manually is highly recommended."
)
# encode_columns = [col for col in non_numeric_columns if col in cols]
if self.y is not None:
self.encoding_rules.append(
{"cols": encode_columns, "method": self.METHODS.target}
)
for col in encode_columns:
min_samples = (
round(self.MIN_SAMPLE_PERC * len(self.data))
if self.MIN_SAMPLE_PERC
else self.MIN_SAMPLE_ABS
)
levels_with_less_min_for_target = [
x
for x in compute_if_dask(self.data[col].unique())
if len(self.data[self.data[col] == x]) < min_samples
]
if levels_with_less_min_for_target:
_LOGGER.info(
"{} has levels with less than {}{} values. "
"Target encoding in such cases is not "
"recommended.".format(
col,
min_samples,
f" ({self.MIN_SAMPLE_PERC*100}%)"
if self.MIN_SAMPLE_PERC
else "",
)
)
else:
for col in encode_columns:
num_of_levels = self.data[col].nunique()
if num_of_levels <= self.MAX_BUCKETS:
self.encoding_rules.append(
{"cols": col, "method": self.METHODS.onehot}
)
else:
min_samples = (
(self.MIN_SAMPLE_PERC * len(self.data))
if self.MIN_SAMPLE_PERC
else self.MIN_SAMPLE_ABS
)
buckets_with_min_samples = [
x
for x in compute_if_dask(self.data[col].unique())
if len(self.data[self.data[col] == x]) >= min_samples
]
if (
len(buckets_with_min_samples)
> num_of_levels * self.LONG_TAIL_CHECK
):
groups = buckets_with_min_samples
self.encoding_rules.append(
{
"cols": col,
"method": self.METHODS.onehot,
"groups": groups,
}
)
else:
_LOGGER.info(
"CANNOT ENCODE {}. A good encoding "
"method is not found.".format(col)
)
continue
def transform(self, cols=[]):
"""Returns encoded data after transformation."""
if not cols:
from tigerml.core.utils import get_num_cols, get_dt_cols
numeric_columns = get_num_cols(self.data) + get_dt_cols(self.data)
cols = [col for col in self.data.columns if col not in numeric_columns]
cols_set = [
col
for col in cols
if not (
any(
[
"encoded_{}".format(col) in data_col
for data_col in self.data.columns
]
)
)
]
if len(cols_set) < len(cols):
_LOGGER.info(
"Encoding {} columns. Columns - {} are "
"already encoded.".format(
len(cols_set), list(set(cols) - set(cols_set))
)
)
else:
cols_set = cols
self.encoded_data = self.data
if not self.encoding_rules:
self._default_encoding(cols_set)
for rule in self.encoding_rules:
cols = rule.pop("cols")
if isinstance(cols, str):
cols = [cols]
cols = [col for col in cols if col in cols_set]
method = rule.pop("method")
kwargs = rule.copy()
if method == self.METHODS.target and "target" not in kwargs:
if self.y is None:
raise Exception("Need target for target encoding")
else:
kwargs.update({"target": self.y})
for col in cols:
if method == self.METHODS.onehot:
encoded = self.onehotEncode(self.data[col], **kwargs)
self.encoded_data = td.concat([self.encoded_data, encoded], axis=1)
elif method == self.METHODS.ordinal:
encoded, mapper = self.ordinalEncode(self.data[col], **kwargs)
self.encoding_mapping.update({col: mapper})
if encoded.name in self.encoded_data:
_LOGGER.info(
"{} already exists in data. "
"Overriding it.".format(encoded.name)
)
self.encoded_data[encoded.name] = encoded
else:
self.encoded_data = td.concat(
[self.encoded_data, encoded], axis=1
)
elif method == self.METHODS.target:
encoded, mapper = self.targetEncode(self.data[col], **kwargs)
encoded.index = self.encoded_data.index
self.encoding_mapping.update({col: mapper})
if encoded.name in self.encoded_data.columns:
_LOGGER.info(
"{} already exists in data. "
"Overriding it.".format(encoded.name)
)
self.encoded_data[encoded.name] = encoded
else:
self.encoded_data = td.concat(
[self.encoded_data, encoded], axis=1
)
self.encoding_summary.update(
{
col: {
"original_type": self.data.dtypes.astype(str)[col],
"new_columns": encoded.columns
if hasattr(encoded, "columns")
else encoded.name,
"method": "{} encoded".format(method),
}
}
)
if self.y is not None:
# import pdb
# pdb.set_trace()
self.encoded_data = td.concat([self.encoded_data, self.y], axis=1)
return self.encoded_data
def get_encoding_method(self, col_name):
"""Gets encoding method."""
if [rule for rule in self.encoding_rules if col_name in rule["cols"]]:
return [rule for rule in self.encoding_rules if col_name in rule["cols"]][
0
]["method"]
else:
return None
@staticmethod
def onehotEncode(feature, prefix="onehot_encoded", **kwargs):
"""
This method one hot encodes all the factors in category variable.
Parameters
----------
feature : str
Name of the category variable to be encoded.
prefix : str
Default is 'OHE'. The prefex will be appended to encoded variable.
Ex: 'OHE_VariableName_FactorName'
Returns
-------
dataframe :
Modified dataframe will be returned.
"""
prefix = prefix + "_" + feature.name
if feature.isna().sum() > 0:
include_na = True
else:
include_na = False
dummies = td.get_dummies(feature, dummy_na=include_na)
dummies = dummies.rename(
columns=dict(
zip(
list(dummies.columns),
[(prefix + "_" + str(x)) for x in dummies.columns],
)
)
)
if "groups" in kwargs:
new_dummies = td.DataFrame(backend=feature.backend)
for group in kwargs["groups"]:
group_name = prefix + "_" + str(group)
if isinstance(group, str):
new_dummies[prefix + "_" + group] = dummies[group_name]
elif isinstance(group, list):
if len(group) == 1:
group = group[0]
new_dummies[prefix + "_" + group] = dummies[group_name]
else:
dummy_name = "grouped_{}".format("_".join(group))
new_dummies[dummy_name] = dummies[group_name].sum()
else:
raise Exception("Incorrect input for groups")
dummies = dummies.drop(group_name, axis=1)
if not dummies.empty:
new_dummies[prefix + "_other"] = dummies.sum()
dummies = new_dummies
return dummies
@staticmethod
def ordinalEncode(feature, mapper, prefix="ordinal_encoded"):
"""
This method ordinally encodes all the factors in category variable.
Parameters
----------
feature : str
Name of the category variable to be encoded.
mapper : dict
Dictionary with factor to encoding value mapping.
Ex: If the variable has following levels low, medium and high and
you want to ordinal encode them
use the following mapper.
mapper = {'low':1, 'medium':2, 'high':3}
prefix : str
Default is 'ORB'. The prefex will be appended to encoded variable.
Ex: 'ORB_VariableName_FactorName'
Returns
-------
dataframe :
Modified dataframe will be returned.
"""
encoded_name = prefix + "_" + feature.name
encoded = feature.map(mapper)
encoded.rename(encoded_name)
if encoded.isnull().sum() > 0:
_LOGGER.info(
"Few levels are missing in the mapper, "
"appended such records with nans"
)
return encoded, mapper
@staticmethod
def targetEncode(
feature, target, min_samples=1, smoothing=1, prefix="target_encoded"
):
"""Target Encode.
This transformation is applied on categorical variable for a regression task.
Each factor value is replaced by the average of the response variable within
the factor group.
Parameters
----------
feature : str
Name of the category variable to be encoded.
min_samples : int
Default is 1. Min no of samples required within each factor.
smoothing : int
Default is 1. Smoothens variation in the transformation by giving
more weight to the prior average.
prefix : str
Default is 'TGE'. The prefex will be appended to encoded variable.
Ex: 'TGE_VariableName_FactorName'
Returns
-------
dataframe :
Modified dataframe will be returned.
"""
encoded_name = prefix + "_" + feature.name
df = td.concat([feature, target], axis=1)
averages = df.groupby(by=feature.name)[target.name].agg(["mean", "count"])
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples) / smoothing))
prior = df.loc[:, target.name].mean()
averages[target.name] = prior * (1 - smoothing) + averages["mean"] * (smoothing)
averages = averages.drop(["mean", "count"], axis=1)
encoded = td.merge(
td.DataFrame(feature),
td.DataFrame(
averages.reset_index().rename(
columns={"index": feature.name, target.name: "average"}
)
),
on=feature.name,
how="left",
)["average"].fillna(prior)
encoded = encoded.rename(encoded_name)
return encoded, averages.rename(columns={target.name: "encoded values"})
| null |
src/ta_lib/_vendor/tigerml/core/preprocessing/encoder.py
|
encoder.py
|
py
| 14,469 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tigerml.core.utils.DictObject",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe.helpers.tigerify",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tigerml.core.utils.compute_if_dask",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tigerml.core.utils.compute_if_dask",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tigerml.core.utils.get_num_cols",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tigerml.core.utils.get_dt_cols",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe.concat",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.concat",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.concat",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.concat",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.get_dummies",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.DataFrame",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.concat",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe.merge",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.DataFrame",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "tigerml.core.dataframe.DataFrame",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "tigerml.core.dataframe",
"line_number": 357,
"usage_type": "name"
}
] |
620317379
|
#!/usr/bin/python2.7
import rospy
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped
from nav_msgs.msg import OccupancyGrid, Path
from visualization_msgs.msg import MarkerArray, Marker
import visualization_msgs
import copy
import planners.astar
from move import Move
from state import State
from robot import Robot
from map import Map
class TrajectoryPlanner:
def __init__(self):
self.map = None
self.start = None
self.goal = None
self.moves = [Move(0.1, 0), # forward
Move(-0.1, 0), # back
Move(0, 1.5708), # turn left 90
Move(0, -1.5708)] # turn right 90
self.robot = Robot(0.5, 0.5)
self.is_working = False # Replace with mutex after all
self.map_subscriber = rospy.Subscriber("map", OccupancyGrid, self.new_map_callback)
self.start_subscriber = rospy.Subscriber("initialpose", PoseWithCovarianceStamped, self.new_start_callback)
self.goal_subscriber = rospy.Subscriber("goal", PoseStamped, self.new_goal_callback)
self.path_publisher = rospy.Publisher("trajectory", MarkerArray, queue_size=1)
self.pose_publisher = rospy.Publisher("debug_pose", PoseStamped, queue_size=1)
# what will be there. A module goes into variable. Isn't it too much memory consumption. Maybe I should assign function replan() to this variable?
self.planner = planners.astar.replan
def ready_to_plan(self):
return self.map is not None and self.start is not None and self.goal is not None
def new_goal_callback(self, goal_pose):
if not self.is_working:
self.is_working = True
new_goal = State.from_pose(goal_pose.pose)
if self.map is not None and self.map.is_allowed(new_goal, self.robot):
self.goal = new_goal
rospy.loginfo("New goal was set")
if self.ready_to_plan():
self.replan()
else:
rospy.logwarn("New goal is bad or no map available")
self.is_working = False
def new_start_callback(self, start_pose):
if not self.is_working:
self.is_working = True
new_start = State.from_pose(start_pose.pose.pose)
if self.map is not None and self.map.is_allowed(new_start, self.robot):
self.start = new_start
rospy.loginfo("New start was set")
if self.ready_to_plan():
self.replan()
else:
rospy.logwarn("New start is bad or no map available")
self.is_working = False
def new_map_callback(self, grid_map):
if not self.is_working:
self.is_working = True
self.map = Map(grid_map)
rospy.loginfo("New map was set")
if self.ready_to_plan():
self.replan()
self.is_working = False
def replan(self):
rospy.loginfo("Planning was started")
final_state = self.planner(self.map, self.moves, self.robot, self.start, self.goal, self.pose_publisher)
if final_state is None:
rospy.loginfo("No path found")
else:
# Restore and publish path
rospy.loginfo("Restoring path from final state...")
path = self.restore_path(final_state)
self.path_publisher.publish(path)
rospy.loginfo("Planning was finished...")
def restore_path(self, final_state):
current_state = copy.copy(final_state)
path = MarkerArray()
pose_id = 0
while True:
pose_marker = current_state.to_marker(self.robot)
pose_marker.id = pose_id
path.markers.append(pose_marker)
current_state = current_state.parent
pose_id += 1
if current_state is None:
break
return path
def main():
rospy.init_node("trajectory_planner")
planner = TrajectoryPlanner()
rospy.spin()
main()
| null |
visualization/rviz_tools_py/src/rviz_tools_py/test_bug.py
|
test_bug.py
|
py
| 4,057 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "move.Move",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "move.Move",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "move.Move",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "move.Move",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "robot.Robot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "nav_msgs.msg.OccupancyGrid",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.PoseWithCovarianceStamped",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.PoseStamped",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "visualization_msgs.msg.MarkerArray",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "rospy.Publisher",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.PoseStamped",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "planners.astar.astar",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "planners.astar",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "state.State.from_pose",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "state.State",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "rospy.loginfo",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "rospy.logwarn",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "state.State.from_pose",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "state.State",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "rospy.loginfo",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "rospy.logwarn",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "map.Map",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "visualization_msgs.msg.MarkerArray",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "rospy.init_node",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "rospy.spin",
"line_number": 110,
"usage_type": "call"
}
] |
647500287
|
import os
import csv
from reportlab.pdfgen import Canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.lib import colors
from reporlab.platypus import Table
xmargin = 3.2 * inch
ymargin = 6 * inch
my_path = "/Users/staniya/Downloads/book1-exercises-master/chp09/practice_files"
with open(os.path.join(my_path, "pastimes.csv"), "r") as my_input, open(os.path.join(my_path, "categorized pastimes.csv"), "w") as my_output:
my_file_reader = csv.reader(my_input)
my_file_writer = csv.writer(my_output)
next(my_file_reader)
my_file_writer.writerow(["Name", "Favorite Pastime", "Type of Pastime"])
for row in my_file_reader:
if row[1].find("fighting") != False:
row.append("Combat")
else:
row.append("Other")
my_file_writer.writerow(row)
print(row)
with open(os.path.join(my_path, "categorized_pastimes.csv"), "r") as my_csv_input:
data_reader = csv.reader(my_csv_input)
c = canvas.Canvas("reportLab test.pdf", pagesize=letter)
c.setFont('Helvetica', 12)
t = Table(data_reader)
t.setStyle([("TEXTCOLOR", colors.blue)])
t.wrapOn(c, xmargin, ymargin)
t.drawOn(c, xmargin, ymargin)
c.save()
| null |
ReportLab.py
|
ReportLab.py
|
py
| 1,245 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "reportlab.lib.units.inch",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.units.inch",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.pagesizes.letter",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "reporlab.platypus.Table",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.colors.blue",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "reportlab.lib.colors",
"line_number": 35,
"usage_type": "name"
}
] |
430362458
|
#!/usr/bin/env python
import array
import json
import sys
import re
from time import sleep
import traceback
import usb.core
import argparse
import struct
import sys
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
TIMEOUT_MS = 1000
PAGE_SIZE = 64
class Fixture(object):
def __init__(self):
self.eeprom_pages = None
self.fields = {}
self.field_names = []
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="debug output")
parser.add_argument("--dump", action="store_true", help="just dump and exit (default)")
parser.add_argument("--erase", action="store_true", help="erase (write all 0xff)")
parser.add_argument("--hex", action="store_true", help="output in hex")
parser.add_argument("--force-offset", action="store_true", help="force ARMs to use the old 'offset' write method")
parser.add_argument("--pid", default="1000", help="USB PID in hex (default 1000)", choices=["1000", "2000", "4000"])
parser.add_argument("--restore", type=str, help="restore an EEPROM from text file")
parser.add_argument("--max-pages", type=int, help="override standard max pages (default 8)", default=8)
parser.add_argument("--reprogram", action="store_true", help="overwrites first 8 pages with zeros, then populates minimal defaults (HIGHLY DANGEROUS)")
parser.add_argument("--pixels", type=int, default=1024, help="active_pixels_horizontal when reprogramming")
self.args = parser.parse_args()
if not (self.args.dump or \
self.args.restore):
self.args.dump = True
self.pid = int(self.args.pid, 16)
self.dev = usb.core.find(idVendor=0x24aa, idProduct=self.pid)
if not self.dev:
print("No spectrometers found with PID 0x%04x" % self.pid)
def run(self):
self.read_eeprom()
self.parse_eeprom()
self.dump_eeprom()
if self.args.erase:
self.do_erase()
self.write_eeprom()
if self.args.reprogram:
self.do_reprogram()
if self.args.dump:
return
if self.args.restore:
self.do_restore()
def do_restore(self):
self.load(filename = self.args.restore)
self.dump_eeprom("Proposed")
cont = input("\nWrite EEPROM? (y/N)")
if cont.lower() != "y":
print("Cancelled")
return
self.write_eeprom()
def do_erase(self, value=0xff):
print("Erasing buffers")
for page in range(len(self.eeprom_pages)):
for i in range(PAGE_SIZE):
self.pack((page, i, 1), "B", value)
def load(self, filename):
if filename.endswith(".json"):
self.load_json(filename)
else:
self.load_other(filename)
def load_json(self, filename):
with open(filename) as f:
doc = json.load(f)
buffers_string = doc["buffers"][1:-2] # strip first/last []
page_strings = buffers_string.split(", array")
for page in range(len(page_strings)):
m = re.search(r"\[(.*)\]", page_strings[page])
delimited = m.group(1)
values = [ int(v.strip()) for v in delimited.split(",") ]
self.pack_page(page, values)
##
# This function will load an EEPROM definition from an external
# text file. It supports a couple of different file formats,
# including:
#
# - extract of ENLIGHTEN logfile
# - output of this program (eeprom-util.py)
def load_other(self, filename):
linecount = 0
filetype = None
print(f"restoring from {filename}")
with open(filename) as f:
for line in f:
self.debug("read: %s" % line)
line = line.strip()
if line.startswith("#") or len(line) == 0:
continue
linecount += 1
values = None
page = None
################################################################
# use first non-blank, non-comment line to determine filetype
################################################################
if linecount == 1:
# ENLIGHTEN logfile: 2020-03-19 12:05:41,726 Process-2 wasatch.FeatureIdentificationDevice DEBUG GET_MODEL_CONFIG(0): get_code: request 0xff value 0x0001 index 0x0000 = [array('B', [87, 80, 45, 55, 56, 53, 45, 88, 45, 83, 82, 45, 83, 0, 0, 0, 87, 80, 45, 48, 48, 53, 54, 49, 0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 1, 0, 0, 17, 3, 50, 0, 2, 0, 10, 0, 0, 51, 51, 243, 63, 0, 0, 51, 51, 243, 63, 0, 0, 0, 0, 0, 6])]
if "wasatch.FeatureIdentificationDevice" in line and "GET_MODEL_CONFIG" in line:
filetype = "ENLIGHTEN_LOG"
# eeprom-util.py: Page 0: array('B', [83, 105, 71, 45, 55, 56, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 87, 80, 45, 48, 48, 54, 52, 54, 0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 1, 1, 2, 0, 25, 0, 15, 0, 15, 0, 0, 0, 0, 0, 65, 0, 0, 51, 51, 243, 63, 0, 0, 0, 0, 0, 9])
elif re.match(r"Page\s+\d+:\s*array\('B',\s*\[", line):
filetype = "eeprom-util"
# unknown
else:
raise Exception("ERROR: could not determine filetype")
self.debug(f"filetype: {filetype}")
################################################################
# filetype has been determined, so parse each line as read
################################################################
if filetype == "ENLIGHTEN_LOG":
m = re.search("GET_MODEL_CONFIG\((\d)\)", line)
if not m:
raise Exception("can't parse page number")
page = int(m.group(1))
m = re.search("array\('B', \[([0-9, ]+)\]\)", line)
if not m:
raise Exception("can't parse data")
delimited = m.group(1)
values = [ int(v.strip()) for v in delimited.split(",")]
elif filetype == "eeprom-util":
m = re.search(r"""Page\s+(\d+)\s*:\s*array\('B',\s*\[(.*)\]\)""", line)
if not m:
raise Exception("could not parse line: %s" % line)
page = int(m.group(1))
if not (0 <= page <= self.args.max_pages):
raise Exception("invalid page")
delimited = m.group(2)
values = [ int(v.strip()) for v in delimited.split(",")]
else:
raise Exception(f"Unsupported filetype: {filetype}")
if page is None or values is None:
raise Exception(f"could not parse line: {line}")
self.pack_page(page, values)
self.debug(f"parsed and packed page {page}")
def pack_page(self, page, values):
if not (0 <= page <= self.args.max_pages):
raise Exception(f"invalid page: {page}")
length = len(values)
if length != 64:
raise Exception(f"wrong array length: {length}")
self.debug(f"packing {length} values")
for i in range(length):
v = values[i]
if not (0 <= v <= 255):
raise Exception(f"invalid byte: {v}")
self.pack((page, i, 1), "B", values[i])
def read_eeprom(self):
print("Reading EEPROM")
self.eeprom_pages = []
for page in range(self.args.max_pages):
buf = self.get_cmd(cmd=0xff, value=0x01, index=page, length=PAGE_SIZE)
self.eeprom_pages.append(buf)
def dump_eeprom(self, state="Current"):
print("%s EEPROM:" % state)
for page in range(len(self.eeprom_pages)):
print(f" Page {page}: ", end='')
if self.args.hex:
print(" ".join([f"{i:02x}" for i in self.eeprom_pages[page]]))
else:
print(self.eeprom_pages[page])
def write_eeprom(self):
print("Writing EEPROM")
for page in range(len(self.eeprom_pages)):
buf = self.eeprom_pages[page]
print(f" writing page {page}: {buf}")
if self.pid == 0x4000 and not self.args.force_offset:
self.send_cmd(cmd=0xff, value=0x02, index=page, buf=buf)
else:
DATA_START = 0x3c00
offset = DATA_START + page * 64
self.send_cmd(cmd=0xa2, value=offset, buf=buf)
sleep(0.2)
def do_reprogram(self):
print("\n*** HAZARDOUS OPERATION ***\n")
print("Reprogram EEPROM to bland defaults? This is a destructive")
print("operation which will overwrite all configuration data on")
print("the spectrometer, destroying any factory calibrations.\n")
cont = input("\nReprogram EEPROM to bland defaults? (y/N)")
if cont.lower() != "y":
print("Cancelled")
return
# set all buffers to zero
self.do_erase(value=0x00)
# minimum set of defaults to allow ENLIGHTEN operation
self.pack((0, 63, 1), "B", 15, "format")
self.pack((0, 0, 16), "s", "WP-FOO", "model")
self.pack((0, 16, 16), "s", "WP-00000", "serial_number")
self.pack((0, 48, 4), "f", 1, "gain")
self.pack((1, 4, 4), "f", 1, "wavecal_c1")
self.pack((2, 0, 16), "s", "unknown", "detector")
self.pack((2, 16, 2), "H", self.args.pixels, "active_pixels_horizontal")
self.pack((2, 25, 2), "H", self.args.pixels, "actual_pixels_horizontal")
self.pack((3, 40, 4), "I", 1, "min_integ")
self.pack((3, 44, 4), "I", 60000, "max_integ")
self.write_eeprom()
def parse_eeprom(self):
print("Parsing EEPROM")
self.format = self.unpack((0, 63, 1), "B", "format")
self.unpack((0, 0, 16), "s", "model")
self.unpack((0, 16, 16), "s", "serial_number")
self.unpack((0, 32, 4), "I", "baud_rate")
self.unpack((0, 36, 1), "?", "has_cooling")
self.unpack((0, 37, 1), "?", "has_battery")
self.unpack((0, 38, 1), "?", "has_laser")
self.unpack((0, 39, 2), "H", "feature_mask")
self.unpack((0, 39, 2), "H", "excitation_nm")
self.unpack((0, 41, 2), "H", "slit_um")
self.unpack((0, 43, 2), "H", "start_integ")
self.unpack((0, 45, 2), "h", "start_temp")
self.unpack((0, 47, 1), "B", "start_trigger")
self.unpack((0, 48, 4), "f", "gain")
self.unpack((0, 52, 2), "h", "offset")
self.unpack((0, 54, 4), "f", "gain_odd")
self.unpack((0, 58, 2), "h", "offset_odd")
self.unpack((1, 0, 4), "f", "wavecal_c0")
self.unpack((1, 4, 4), "f", "wavecal_c1")
self.unpack((1, 8, 4), "f", "wavecal_c2")
self.unpack((1, 12, 4), "f", "wavecal_c3")
self.unpack((1, 16, 4), "f", "degCtoDAC_c0")
self.unpack((1, 20, 4), "f", "degCtoDAC_c1")
self.unpack((1, 24, 4), "f", "degCtoDAC_c2")
self.unpack((1, 28, 2), "h", "max_temp")
self.unpack((1, 30, 2), "h", "min_temp")
self.unpack((1, 32, 4), "f", "adcToDegC_c0")
self.unpack((1, 36, 4), "f", "adcToDegC_c1")
self.unpack((1, 40, 4), "f", "adcToDegC_c2")
self.unpack((1, 44, 2), "h", "r298")
self.unpack((1, 46, 2), "h", "beta")
self.unpack((1, 48, 12), "s", "cal_date")
self.unpack((1, 60, 3), "s", "cal_tech")
self.unpack((2, 0, 16), "s", "detector")
self.unpack((2, 16, 2), "H", "active_pixels_horizontal")
self.unpack((2, 18, 1), "B", "laser_warmup_sec")
self.unpack((2, 19, 2), "H", "active_pixels_vertical")
self.unpack((2, 21, 4), "f", "wavecal_c4")
self.unpack((2, 25, 2), "H", "actual_pixels_horizontal")
self.unpack((2, 27, 2), "H", "roi_horiz_start")
self.unpack((2, 29, 2), "H", "roi_horiz_end")
self.unpack((2, 31, 2), "H", "roi_vertical_region_1_start")
self.unpack((2, 33, 2), "H", "roi_vertical_region_1_end")
self.unpack((2, 35, 2), "H", "roi_vertical_region_2_start")
self.unpack((2, 37, 2), "H", "roi_vertical_region_2_end")
self.unpack((2, 39, 2), "H", "roi_vertical_region_3_start")
self.unpack((2, 41, 2), "H", "roi_vertical_region_3_end")
self.unpack((2, 43, 4), "f", "linearity_c0")
self.unpack((2, 47, 4), "f", "linearity_c1")
self.unpack((2, 51, 4), "f", "linearity_c2")
self.unpack((2, 55, 4), "f", "linearity_c3")
self.unpack((2, 59, 4), "f", "linearity_c4")
self.unpack((3, 12, 4), "f", "laser_power_c0")
self.unpack((3, 16, 4), "f", "laser_power_c1")
self.unpack((3, 20, 4), "f", "laser_power_c2")
self.unpack((3, 24, 4), "f", "laser_power_c3")
self.unpack((3, 28, 4), "f", "max_laser_mW")
self.unpack((3, 32, 4), "f", "min_laser_mW")
self.unpack((3, 36, 4), "f", "excitation_nm_float")
self.unpack((3, 40, 4), "I", "min_integ")
self.unpack((3, 44, 4), "I", "max_integ")
self.unpack((3, 48, 4), "f", "avg_resolution")
for field in self.field_names:
print("%30s %s" % (field, self.fields[field]))
############################################################################
# Utility Methods
############################################################################
def debug(self, msg):
if self.args.debug:
print("DEBUG: %s" % msg)
def send_cmd(self, cmd, value, index=0, buf=None):
if buf is None:
if self.pid == 0x4000:
buf = [0] * 8
else:
buf = ""
self.debug("ctrl_transfer(%02x, %02x, %04x, %04x) >> %s" % (HOST_TO_DEVICE, cmd, value, index, buf))
self.dev.ctrl_transfer(HOST_TO_DEVICE, cmd, value, index, buf, TIMEOUT_MS)
def get_cmd(self, cmd, value=0, index=0, length=64):
return self.dev.ctrl_transfer(DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS)
##
# Unpack a single field at a given buffer offset of the given datatype.
#
# @param address a tuple of the form (buf, offset, len)
# @param data_type see https://docs.python.org/2/library/struct.html#format-characters
# @param field where to store
def unpack(self, address, data_type, field):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
if page > len(self.eeprom_pages):
print("error unpacking EEPROM page %d, offset %d, len %d as %s: invalid page (field %s)" % (
page, start_byte, length, data_type, field))
return
buf = self.eeprom_pages[page]
if buf is None or end_byte > len(buf):
print("error unpacking EEPROM page %d, offset %d, len %d as %s: buf is %s (field %s)" % (
page, start_byte, length, data_type, buf, field))
return
if data_type == "s":
# This stops at the first NULL, so is not appropriate for binary data (user_data).
# OTOH, it doesn't currently enforce "printable" characters either (nor support Unicode).
unpack_result = ""
for c in buf[start_byte:end_byte]:
if c == 0:
break
unpack_result += chr(c)
else:
unpack_result = 0
try:
unpack_result = struct.unpack(data_type, buf[start_byte:end_byte])[0]
except:
print("error unpacking EEPROM page %d, offset %d, len %d as %s" % (page, start_byte, length, data_type))
return
if field is None:
self.debug("Unpacked [%s]: %s" % (data_type, unpack_result))
else:
self.debug("Unpacked [%s]: %s (%s)" % (data_type, unpack_result, field))
self.field_names.append(field)
self.fields[field] = unpack_result
return unpack_result
##
# Marshall or serialize a single field at a given buffer offset of the given datatype.
#
# @param address a tuple of the form (buf, offset, len)
# @param data_type see https://docs.python.org/2/library/struct.html#format-characters
# @param value value to serialize
def pack(self, address, data_type, value, label=None):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
if page > len(self.eeprom_pages):
raise Exception("error packing EEPROM page %d, offset %d, len %d as %s: invalid page (label %s)" % (
page, start_byte, length, data_type, label))
# don't try to write negatives to unsigned types
if data_type in ["H", "I"] and value < 0:
self.debug("rounding negative to zero when writing to unsigned field (address %s, data_type %s, value %s)" % (address, data_type, value))
value = 0
buf = self.eeprom_pages[page]
if buf is None or end_byte > 64: # byte [63] for revision
raise Exception("error packing EEPROM page %d, offset %2d, len %2d as %s: buf is %s" % (
page, start_byte, length, data_type, buf))
if data_type == "s":
for i in range(min(length, len(value))):
if i < len(value):
buf[start_byte + i] = ord(value[i])
else:
buf[start_byte + i] = 0
else:
struct.pack_into(data_type, buf, start_byte, value)
# self.debug("Packed (%d, %2d, %2d) '%s' value %s -> %s" % (page, start_byte, length, data_type, value, buf[start_byte:end_byte]))
fixture = Fixture()
if fixture.dev:
fixture.run()
| null |
generic/eeprom-util.py
|
eeprom-util.py
|
py
| 18,429 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "usb.core.core.find",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "usb.core.core",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "usb.core",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "struct.pack_into",
"line_number": 425,
"usage_type": "call"
}
] |
209464419
|
# -*- coding: utf-8 -*-
#------------------------
# v2021-10-30 1. update test
#========================
from flask import Flask, request, abort, render_template, Response
from flask import json, jsonify, session, redirect, url_for
#from flask_cors import CORS, cross_origin # for cross domain problem
from flask import send_file
import requests
import csv
import folium
import geocoder
from apscheduler.schedulers.background import BackgroundScheduler
import os
from sqlalchemy import create_engine
import time
app = Flask(__name__, static_url_path='', static_folder='static')
@app.route("/", methods=['GET'])
def basic_url():
return 'hello'
@app.route("/hello", methods=['GET'])
def hello():
name = request.args.get('name')
return 'hello ' + name
@app.route("/map/kh-parking", methods=['GET'])
def map_kh_parking():
url = "https://data.kcg.gov.tw/dataset/449e45d9-dead-4873-95a9-cc34dabbb3af/resource/fe3f93da-9673-4f7b-859c-9017d793f798/download/108.6.21.csv"
r = requests.get(url)
print(r)
decoded_content = r.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
data_list = list(cr)
# 開始產生地圖
location = geocoder.osm('高雄市').latlng
m = folium.Map(location=location, zoom_start=14)
for item in data_list[1:]:
try:
name = item[2]
total = item[7]
fee = item[10]
lat = item[5]
lng = item[4]
info = '%s<br>%s<br>停車格數:%s' %(name, fee, total)
folium.Marker([float(lat), float(lng)], tooltip=info,
icon=folium.Icon(color='green', prefix='fa', icon='fa-car')).add_to(m)
except Exception as e:
print(e.args)
m.save('./map_kh_parking.html')
return send_file('./map_kh_parking.html')
@app.route("/map/w01-6", methods=['GET'])
def map_w01_6():
return app.send_static_file('W01-6.html')
#####################
# Scheduler
#####################
def job_wakeup():
print('cron fun1: awake myself')
url = 'https://malo-cron2.herokuapp.com/'
r = requests.get(url)
print(r)
def send_line(msg, token='rpHUQIIMkArQh6EtQpqfjK6hjPN2jjNxh0zDbcFVoD2'):
url = "https://notify-api.line.me/api/notify" # --> 不支援http, 只能用https
headers = {"Authorization" : "Bearer "+ token}
title = '排程測試'
message = '[%s] %s' %(title, msg)
payload = {"message" : message}
r = requests.post(url ,headers = headers ,params=payload)
#- 空污通報
def job_function2():
url = 'https://data.epa.gov.tw/api/v1/aqx_p_432?format=json&api_key=9be7b239-557b-4c10-9775-78cadfc555e9'
r = requests.get(url)
print(r)
data = r.json()
records = data['records']
for item in records:
if item['County']=='高雄市' and item['SiteName']=='鳳山':
send_line('%s>> AQI=%s' %(item['SiteName'], item['AQI']))
#- 空污資料收集
def job_function3():
mysql_db_url = 'mysql+pymysql://user1:[email protected]:32769/testdb'
my_db = create_engine(mysql_db_url)
# check and create table
resultProxy = my_db.execute("CREATE TABLE IF NOT EXISTS your_table (uuid text NOT NULL, time text NOT NULL, aqi text, pm25 text)")
# get data
url = 'https://data.epa.gov.tw/api/v1/aqx_p_432?format=json&api_key=9be7b239-557b-4c10-9775-78cadfc555e9'
r = requests.get(url)
data = r.json()
records = data['records']
uuid = ''
my_time = ''
aqi = ''
pm25 = ''
for item in records:
if item['County']=='高雄市':
uuid = item['SiteName']
my_time = item['PublishTime']
aqi = item['AQI']
pm25 = item['PM2.5']
# insert
resultProxy=my_db.execute("insert into your_table (uuid, time, aqi, pm25) values('%s', '%s', '%s', '%s')" %(uuid, my_time, aqi, pm25))
# get data from db
resultProxy=my_db.execute("select * from your_table")
data = resultProxy.fetchall()
print('-- data --')
print(data)
def start_scheduler():
scheduler = BackgroundScheduler()
# run every 10 minute
scheduler.add_job(job_wakeup, 'cron', minute='*/10')
# 每天早上6:30執行
scheduler.add_job(job_function2, 'cron', hour='6', minute='30')
#scheduler.add_job(job_function2, 'cron', minute='*/1')
# 每小時的20分執行
scheduler.add_job(job_function3, 'cron', minute='20')
# start the scheduler
scheduler.start()
def run_web():
os.system('gunicorn -w 2 app:app')
if __name__ == "__main__":
#app.run()
start_scheduler()
run_web()
| null |
W05/W04_補充/flask-cron-03/app.py
|
app.py
|
py
| 4,674 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "geocoder.osm",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "folium.Map",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "folium.Marker",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "folium.Icon",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "apscheduler.schedulers.background.BackgroundScheduler",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 149,
"usage_type": "call"
}
] |
410073298
|
# -*- coding: utf-8 -*-
import json
"""
--- Day 12: JSAbacusFramework.io ---
Santa's Accounting-Elves need help balancing the books after a recent order. Unfortunately, their accounting software uses a peculiar storage format. That's where you come in.
They have a JSON document which contains a variety of things: arrays ([1,2,3]), objects ({"a":1, "b":2}), numbers, and strings. Your first job is to simply find all of the numbers throughout the document and add them together.
For example:
[1,2,3] and {"a":2,"b":4} both have a sum of 6.
[[[3]]] and {"a":{"b":4},"c":-1} both have a sum of 3.
{"a":[-1,1]} and [-1,{"a":1}] both have a sum of 0.
[] and {} both have a sum of 0.
You will not encounter any strings containing numbers.
What is the sum of all numbers in the document?
"""
def advent_day_12(json_doc, red_matters=False):
def find_numbers_sum(json_object):
nsum = 0
if isinstance(json_object, int):
return json_object
elif isinstance(json_object, list):
for obj in json_object:
nsum += find_numbers_sum(obj)
elif isinstance(json_object, dict):
dict_sum = 0
for k, v in json_object.items():
if red_matters and v == 'red':
dict_sum = 0
break
dict_sum += find_numbers_sum(v)
nsum += dict_sum
return nsum
doc = json.loads(json_doc)
return find_numbers_sum(doc)
| null |
day12.py
|
day12.py
|
py
| 1,386 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.loads",
"line_number": 48,
"usage_type": "call"
}
] |
132469264
|
#!/usr/bin/python3
''' starts a Flask web application '''
from flask import Flask, render_template
from models import storage
app = Flask(__name__)
@app.route('/hbnb', strict_slashes=False)
def states_hbnb():
''' display “states HBNB!” '''
list_state = list(storage.all("State").values())
list_amenity = list(storage.all("Amenity").values())
list_place = list(storage.all("Place").values())
list_user = list(storage.all("User").values())
return render_template('100-hbnb.html',
list_state=list_state, list_amenity=list_amenity,
list_place=list_place, list_user=list_user)
@app.teardown_appcontext
def state_close(error):
''' close the session '''
storage.close()
if __name__ == '__main__':
app.run(host='0.0.0.0')
| null |
web_flask/100-hbnb.py
|
100-hbnb.py
|
py
| 813 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "models.storage.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.storage.close",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 23,
"usage_type": "name"
}
] |
123157719
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 17:37:51 2018
@author: 10257
"""
import urllib.request as r
import json
url = []
for i in range(0,100):
i+=44
url.append("https://s.taobao.com/search?q=%E8%A3%99%E5%AD%90&imgfile=&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.1&ie=utf8&initiative_id=tbindexz_20170306&bcoffset=3&ntoffset=3&p4ppushleft=1%2C48&s="+str(i)+"&ajax=true")
def PaQu():
f=open('淘宝数据.csv','w',encoding='gbk')
for i in range(100):
data = r.urlopen(url[i]).read().decode('utf-8','ignore')
data = json.loads(data)
for i in range(40):
f.write(data["mods"]["itemlist"]["data"]["auctions"][i]["view_price"]
,data["mods"]["itemlist"]["data"]["auctions"][i]["view_sales"],
data["mods"]["itemlist"]["data"]["auctions"][i]["raw_title"]+'\n')
f.close()
print("爬取结束")
PaQu()
| null |
case7.py
|
case7.py
|
py
| 976 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
}
] |
174146171
|
#Flask RestFul
#Get
#Post
#Put
#Delete
from flask import Flask
from flask_restful import Api,Resource
from Controller.cerveja_controller import CervejaController
app=Flask(__name__)
api=Api(app)
api.add_resource(CervejaController,'/api/cerveja')
@app.route('/')
def inicio():
return 'Bem vindo a API'
app.run(debug=True,port=80)
| null |
Aula36/aula36.py
|
aula36.py
|
py
| 335 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Controller.cerveja_controller.CervejaController",
"line_number": 12,
"usage_type": "argument"
}
] |
618230781
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import JobboleSpiderItem
from scrapy.loader import ItemLoader
class JobboleSpider(CrawlSpider):
name = 'jobbole'
allowed_domains = ['web.jobbole.com']
start_urls = ['http://web.jobbole.com/all-posts/page/1/']
rules = (
Rule(LinkExtractor(
allow=r'http:\/\/web.jobbole.com\/all-posts\/page\/\d+\/')),
Rule(LinkExtractor(allow=r'http:\/\/web.jobbole.com\/\d+\/$'),
callback='parse_item', follow=True),
)
def parse_item(self, response):
loader = ItemLoader(item=JobboleSpiderItem(), response=response)
loader.add_css('title', '.entry-header h1::text')
loader.add_css('tags', '.entry-meta-hide-on-mobile')
loader.add_css('create_time', '.entry-meta-hide-on-mobile')
loader.add_value('link', response.url)
yield loader.load_item()
| null |
jobbole_spider/spiders/jobbole.py
|
jobbole.py
|
py
| 987 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scrapy.loader.ItemLoader",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "items.JobboleSpiderItem",
"line_number": 22,
"usage_type": "call"
}
] |
67376494
|
import mysql.connector
import logging
import pandas as pd
import os
from pathlib import Path
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from util.json_util import *
class db_connector:
def __init__(self):
self.connectorCode = "DB"
conn_relative_path = Path('config')
try:
config = read_json(os.path.join(conn_relative_path, "db_param.json"))
self.config = config
except Exception:
logger.fatal("NO Database Connection Configuration")
raise Exception
def initial_connection_cursor(self):
conn = mysql.connector.connect(**self.config)
return conn ,conn.cursor(buffered=True)
def GET(self,table_name,column=None, condition=None):
if column:
column_str = ','.join(column)
else:
column_str = '*'
logger.info(condition)
if condition:
tmp = []
for item in condition.keys():
tmp.append(f"{item} = '{condition[item]}'")
tmp_str = ' AND '.join(tmp)
condition_str = f'WHERE {tmp_str}'
else:
condition_str = ''
query = f'SELECT {column_str} FROM {table_name} {condition_str}'
logger.info(query)
conn , cursor = self._execute(query)
ret = pd.DataFrame(cursor.fetchall())
logger.info(ret)
return ret
def UPDATE(self,table_name,value:dict,condition:dict):
set_tmp = []
condition_tmp = []
for item in value.keys():
set_tmp.append(f"{item}='{value[item]}'")
for item in condition.keys():
condition_tmp.append(f"{item} = '{condition[item]}'")
set_str = ','.join(set_tmp)
tmp_str = ' AND '.join(condition_tmp)
condition_str = f'WHERE {tmp_str}'
query = f'UPDATE {table_name} SET {set_str} {condition_str}'
logger.info(query)
conn, cursor = self._execute(query)
conn.close()
def INSERT(self,table_name,data_dict):
#retrieve data
column_str,value_str = self.form_column(data_dict)
query = f'INSERT INTO {table_name} ' \
f'{column_str} VALUES {value_str}' #(example,example)
conn , cursor = self._execute(query,data_dict)
conn.close()
def INSERT_MANY(self,table_name,data_list):
#retrieve data
for item in data_list:
data_dict = item
column_str,value_str = self.form_column(data_dict)
query = f'INSERT INTO {table_name} ' \
f'{column_str} VALUES {value_str}' #(example,example)
conn , cursor = self._execute(query,data_dict)
conn.close()
def DELETE(self,table_name:str,condition:dict):
if condition is None:
raise Exception
return None
tmp = []
for item in condition.keys():
tmp.append(f"{item} = '{condition[item]}'")
tmp_str = ' AND '.join(tmp)
condition_str = f'WHERE {tmp_str}'
query = f'DELETE FROM {table_name} {condition_str}'
conn , cursor = self._execute(query)
conn.close()
def form_column(self,data_dict:dict):
column_key = data_dict.keys()
column = ','.join(column_key)
ret_column = f'({column})'
tmp = []
for item in column_key:
tmp.append(f'%({item})s')
value = ','.join(tmp)
ret_value = f'({value})'
return ret_column , ret_value
def CUSTOM(self,query):
conn , cursor = self._execute(query)
ret = pd.DataFrame(cursor.fetchall())
conn.close()
return ret
def _execute(self,query,value_dict=()):
try:
conn , cursor = self.initial_connection_cursor()
cursor.execute(query,value_dict)
conn.commit()
return conn , cursor
#conn.close()
except Exception as e:
logger.error(e)
| null |
transport/db_transport.py
|
db_transport.py
|
py
| 4,188 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 114,
"usage_type": "call"
}
] |
64076946
|
from django.shortcuts import render,redirect
from .models import Question
from django.http import HttpResponse,HttpResponseRedirect
from django.core.urlresolvers import reverse
import json
def Home_view(request):
msg='Hi'
return render(request,'QDemo/Home.html',{'msg':msg})
def RootQuestion_view(request):
question=Question.objects.all().order_by('id')[0]
return render(request,'QDemo/Questions.html',{'question':question})
def GetQuestion_view(request):
if request.method=='POST':
pk=int(request.POST['pkOfPrecedentQ'])
response=request.POST['responseToPrecedentQ']
nextQuestion=Question.objects.get(pk=pk).childID
if nextQuestion:
leaf=1
nextQtext=nextQuestion.text
nextQpk=nextQuestion.id
nextQResponses=nextQuestion.responses
nextQCategory=nextQuestion.category
datarespons=json.dumps({'text': nextQtext,'pk':nextQpk,'responses':nextQResponses,'category':nextQCategory,'IsAleaf':leaf})
print (datarespons)
return HttpResponse(datarespons, content_type="application/json")
else:
leaf=0
url=reverse('Result')
datarespons=json.dumps({'url':url,'IsAleaf':leaf})
return HttpResponse(datarespons, content_type="application/json")
def Result_view (request):
cadeau='TaMere'
return render(request,'QDemo/result.html',{'cadeau':cadeau})
# Create your views here.
| null |
QDemo/views.py
|
views.py
|
py
| 1,313 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Question.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Question.objects.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 33,
"usage_type": "call"
}
] |
130556696
|
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import automation
from esphomeyaml.const import CONF_DEVICE_CLASS, CONF_ID, CONF_INTERNAL, CONF_INVERTED, \
CONF_MAX_LENGTH, CONF_MIN_LENGTH, CONF_MQTT_ID, CONF_ON_CLICK, CONF_ON_DOUBLE_CLICK, \
CONF_ON_PRESS, CONF_ON_RELEASE, CONF_TRIGGER_ID, CONF_FILTERS, CONF_INVERT, CONF_DELAYED_ON, \
CONF_DELAYED_OFF, CONF_LAMBDA, CONF_HEARTBEAT
from esphomeyaml.helpers import App, NoArg, Pvariable, add, add_job, esphomelib_ns, \
setup_mqtt_component, bool_, process_lambda, ArrayInitializer
DEVICE_CLASSES = [
'', 'battery', 'cold', 'connectivity', 'door', 'garage_door', 'gas',
'heat', 'light', 'lock', 'moisture', 'motion', 'moving', 'occupancy',
'opening', 'plug', 'power', 'presence', 'problem', 'safety', 'smoke',
'sound', 'vibration', 'window'
]
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
binary_sensor_ns = esphomelib_ns.namespace('binary_sensor')
PressTrigger = binary_sensor_ns.PressTrigger
ReleaseTrigger = binary_sensor_ns.ReleaseTrigger
ClickTrigger = binary_sensor_ns.ClickTrigger
DoubleClickTrigger = binary_sensor_ns.DoubleClickTrigger
BinarySensor = binary_sensor_ns.BinarySensor
InvertFilter = binary_sensor_ns.InvertFilter
LambdaFilter = binary_sensor_ns.LambdaFilter
DelayedOnFilter = binary_sensor_ns.DelayedOnFilter
DelayedOffFilter = binary_sensor_ns.DelayedOffFilter
HeartbeatFilter = binary_sensor_ns.HeartbeatFilter
MQTTBinarySensorComponent = binary_sensor_ns.MQTTBinarySensorComponent
FILTER_KEYS = [CONF_INVERT, CONF_DELAYED_ON, CONF_DELAYED_OFF, CONF_LAMBDA, CONF_HEARTBEAT]
FILTERS_SCHEMA = vol.All(cv.ensure_list, [vol.All({
vol.Optional(CONF_INVERT): None,
vol.Optional(CONF_DELAYED_ON): cv.positive_time_period_milliseconds,
vol.Optional(CONF_DELAYED_OFF): cv.positive_time_period_milliseconds,
vol.Optional(CONF_HEARTBEAT): cv.positive_time_period_milliseconds,
vol.Optional(CONF_LAMBDA): cv.lambda_,
}, cv.has_exactly_one_key(*FILTER_KEYS))])
BINARY_SENSOR_SCHEMA = cv.MQTT_COMPONENT_SCHEMA.extend({
cv.GenerateID(CONF_MQTT_ID): cv.declare_variable_id(MQTTBinarySensorComponent),
cv.GenerateID(): cv.declare_variable_id(BinarySensor),
vol.Optional(CONF_DEVICE_CLASS): vol.All(vol.Lower, cv.one_of(*DEVICE_CLASSES)),
vol.Optional(CONF_FILTERS): FILTERS_SCHEMA,
vol.Optional(CONF_ON_PRESS): vol.All(cv.ensure_list, [automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(PressTrigger),
})]),
vol.Optional(CONF_ON_RELEASE): vol.All(cv.ensure_list, [automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(ReleaseTrigger),
})]),
vol.Optional(CONF_ON_CLICK): vol.All(cv.ensure_list, [automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(ClickTrigger),
vol.Optional(CONF_MIN_LENGTH, default='50ms'): cv.positive_time_period_milliseconds,
vol.Optional(CONF_MAX_LENGTH, default='350ms'): cv.positive_time_period_milliseconds,
})]),
vol.Optional(CONF_ON_DOUBLE_CLICK):
vol.All(cv.ensure_list, [automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(DoubleClickTrigger),
vol.Optional(CONF_MIN_LENGTH, default='50ms'): cv.positive_time_period_milliseconds,
vol.Optional(CONF_MAX_LENGTH, default='350ms'): cv.positive_time_period_milliseconds,
})]),
vol.Optional(CONF_INVERTED): cv.invalid(
"The inverted binary_sensor property has been replaced by the "
"new 'invert' binary sensor filter. Please see "
"https://esphomelib.com/esphomeyaml/components/binary_sensor/index.html."
),
})
BINARY_SENSOR_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(BINARY_SENSOR_SCHEMA.schema)
def setup_filter(config):
if CONF_INVERT in config:
yield InvertFilter.new()
elif CONF_DELAYED_OFF in config:
yield App.register_component(DelayedOffFilter.new(config[CONF_DELAYED_OFF]))
elif CONF_DELAYED_ON in config:
yield App.register_component(DelayedOnFilter.new(config[CONF_DELAYED_ON]))
elif CONF_HEARTBEAT in config:
yield App.register_component(HeartbeatFilter.new(config[CONF_HEARTBEAT]))
elif CONF_LAMBDA in config:
lambda_ = None
for lambda_ in process_lambda(config[CONF_LAMBDA], [(bool_, 'x')]):
yield None
yield LambdaFilter.new(lambda_)
def setup_filters(config):
filters = []
for conf in config:
filter = None
for filter in setup_filter(conf):
yield None
filters.append(filter)
yield ArrayInitializer(*filters)
def setup_binary_sensor_core_(binary_sensor_var, mqtt_var, config):
if CONF_INTERNAL in config:
add(binary_sensor_var.set_internal(CONF_INTERNAL))
if CONF_DEVICE_CLASS in config:
add(binary_sensor_var.set_device_class(config[CONF_DEVICE_CLASS]))
if CONF_INVERTED in config:
add(binary_sensor_var.set_inverted(config[CONF_INVERTED]))
if CONF_FILTERS in config:
filters = None
for filters in setup_filters(config[CONF_FILTERS]):
yield
add(binary_sensor_var.add_filters(filters))
for conf in config.get(CONF_ON_PRESS, []):
rhs = binary_sensor_var.make_press_trigger()
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
automation.build_automation(trigger, NoArg, conf)
for conf in config.get(CONF_ON_RELEASE, []):
rhs = binary_sensor_var.make_release_trigger()
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
automation.build_automation(trigger, NoArg, conf)
for conf in config.get(CONF_ON_CLICK, []):
rhs = binary_sensor_var.make_click_trigger(conf[CONF_MIN_LENGTH], conf[CONF_MAX_LENGTH])
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
automation.build_automation(trigger, NoArg, conf)
for conf in config.get(CONF_ON_DOUBLE_CLICK, []):
rhs = binary_sensor_var.make_double_click_trigger(conf[CONF_MIN_LENGTH],
conf[CONF_MAX_LENGTH])
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
automation.build_automation(trigger, NoArg, conf)
setup_mqtt_component(mqtt_var, config)
def setup_binary_sensor(binary_sensor_obj, mqtt_obj, config):
binary_sensor_var = Pvariable(config[CONF_ID], binary_sensor_obj,
has_side_effects=False)
mqtt_var = Pvariable(config[CONF_MQTT_ID], mqtt_obj,
has_side_effects=False)
add_job(setup_binary_sensor_core_, binary_sensor_var, mqtt_var, config)
def register_binary_sensor(var, config):
binary_sensor_var = Pvariable(config[CONF_ID], var, has_side_effects=True)
rhs = App.register_binary_sensor(binary_sensor_var)
mqtt_var = Pvariable(config[CONF_MQTT_ID], rhs, has_side_effects=True)
add_job(setup_binary_sensor_core_, binary_sensor_var, mqtt_var, config)
BUILD_FLAGS = '-DUSE_BINARY_SENSOR'
| null |
esphomeyaml/components/binary_sensor/__init__.py
|
__init__.py
|
py
| 7,072 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "esphomeyaml.config_validation.PLATFORM_SCHEMA.extend",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.PLATFORM_SCHEMA",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.esphomelib_ns.namespace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.esphomelib_ns",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_INVERT",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_ON",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_OFF",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_LAMBDA",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_HEARTBEAT",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "voluptuous.All",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.ensure_list",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "voluptuous.Optional",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_INVERT",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_ON",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_OFF",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_HEARTBEAT",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_LAMBDA",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.lambda_",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.has_exactly_one_key",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.MQTT_COMPONENT_SCHEMA.extend",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.MQTT_COMPONENT_SCHEMA",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MQTT_ID",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "voluptuous.Optional",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_DEVICE_CLASS",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_FILTERS",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ON_PRESS",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ON_RELEASE",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ON_CLICK",
"line_number": 58,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ON_DOUBLE_CLICK",
"line_number": 63,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_INVERTED",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "voluptuous.Lower",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation.one_of",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "voluptuous.All",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.ensure_list",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.validate_automation",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.ensure_list",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.validate_automation",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.ensure_list",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.validate_automation",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "voluptuous.Optional",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MIN_LENGTH",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MAX_LENGTH",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "voluptuous.All",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.ensure_list",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.validate_automation",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.GenerateID",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "voluptuous.Optional",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MIN_LENGTH",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MAX_LENGTH",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.config_validation.declare_variable_id",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.positive_time_period_milliseconds",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.config_validation.invalid",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.config_validation",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_INVERT",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_OFF",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.App.register_component",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.App",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_OFF",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_ON",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.App.register_component",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.App",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_DELAYED_ON",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_HEARTBEAT",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.App.register_component",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.App",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_HEARTBEAT",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_LAMBDA",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.process_lambda",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_LAMBDA",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.bool_",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.ArrayInitializer",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_INTERNAL",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_INTERNAL",
"line_number": 108,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.const.CONF_DEVICE_CLASS",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_DEVICE_CLASS",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_INVERTED",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_INVERTED",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_FILTERS",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_FILTERS",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ON_PRESS",
"line_number": 119,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.build_automation",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.NoArg",
"line_number": 122,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_ON_RELEASE",
"line_number": 124,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.build_automation",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.NoArg",
"line_number": 127,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_ON_CLICK",
"line_number": 129,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.const.CONF_MIN_LENGTH",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_MAX_LENGTH",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.build_automation",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.NoArg",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_ON_DOUBLE_CLICK",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.const.CONF_MIN_LENGTH",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.const.CONF_MAX_LENGTH",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_TRIGGER_ID",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.automation.build_automation",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.NoArg",
"line_number": 138,
"usage_type": "argument"
},
{
"api_name": "esphomeyaml.automation",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.setup_mqtt_component",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ID",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MQTT_ID",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add_job",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_ID",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.App.register_binary_sensor",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.helpers.App",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.Pvariable",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "esphomeyaml.const.CONF_MQTT_ID",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "esphomeyaml.helpers.add_job",
"line_number": 155,
"usage_type": "call"
}
] |
279953303
|
"""Table-of-contents magic
for IPython Notebook
Just do:
%load_ext nbtoc
%nbtoc
to get a floating table of contents
All the interesting code, c/o @magican and @nonamenix:
https://gist.github.com/magican/5574556
"""
import io
import os
from IPython.display import display_html, display_javascript
here = os.path.abspath(os.path.dirname(__file__))
if not os.path.isfile(os.path.join(here, 'nbtoc.js')) or not os.path.isfile(os.path.join(here, 'nbtoc.js')):
import urllib2
def download(url, fout):
""" Saves the url file to fout filename """
filein = urllib2.urlopen(url)
fileout = open(fout, "wb")
while True:
bytes = filein.read(1*1024) # 1*1024bytes
fileout.write(bytes)
if bytes == "": break
filein.close()
fileout.close()
download('https://raw.github.com/minrk/ipython_extensions/master/nbtoc.js', os.path.join(here, 'nbtoc.js'))
download('https://raw.github.com/minrk/ipython_extensions/master/nbtoc.html', os.path.join(here, 'nbtoc.html'))
with io.open(os.path.join(here, 'nbtoc.js')) as f:
toc_js = f.read()
with io.open(os.path.join(here, 'nbtoc.html')) as f:
toc_html = f.read()
def nbtoc(line):
display_html(toc_html, raw=True)
display_javascript(toc_js, raw=True)
def load_ipython_extension(ip):
ip.magics_manager.register_function(nbtoc)
| null |
nbtoc.py
|
nbtoc.py
|
py
| 1,386 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "io.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "io.open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "IPython.display.display_html",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "IPython.display.display_javascript",
"line_number": 50,
"usage_type": "call"
}
] |
381817887
|
import paho.mqtt.client as mqtt
import time
import csv
import datetime
import pyradamsa
rad = pyradamsa.Radamsa()
topic = "dev/test"
m = "Sending message "
def on_connect(client, userdata, flags, rc):
print(f"Connected with result code {rc}")
client = mqtt.Client()
cases = []
with open("usernames-to-mutate.txt") as f:
lines = f.readlines()
for ele in lines:
cases.append(ele.rstrip())#they have a \n so I removed it
def write_to_file(testCase, topic, message):
with open("sent_log.csv", 'a', encoding="utf-8", newline="") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([datetime.datetime.now(), testCase, topic, message])
for ele in cases:
for i in range(10000):
case = rad.fuzz(ele.encode("UTF-8"))
decodedCase = case.decode("UTF-8", "ignore")
client.username_pw_set(username=decodedCase, password="password")#make sure allow anonymous is true and there is no linked password file
client.on_connect = on_connect
client.connect("192.168.0.25", 1883, 60)
message = m + str(i) + " with original topic " + ele
client.publish(topic, payload=message, qos=0, retain=False)
write_to_file(decodedCase, topic, message)
client.disconnect()
print("Done")
| null |
Simple fuzzer/username fuzz/mqtt username fuzz.py
|
mqtt username fuzz.py
|
py
| 1,291 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyradamsa.Radamsa",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client.Client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "attribute"
}
] |
18329281
|
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
import numpy as np
from colorizers import eccv16, siggraph17
from skimage import color
import torch
import torch.nn.functional as F
from IPython import embed
import argparse
import matplotlib.pyplot as plt
import cv2
import sys
image_path = sys.argv[1]
image_name = sys.argv[2]
def load_img(img_path):
out_np = np.asarray(Image.open(img_path))
if(out_np.ndim==2):
out_np = np.tile(out_np[:,:,None],3)
return out_np
def resize_img(img, HW=(256,256), resample=3):
return np.asarray(Image.fromarray(img).resize((HW[1],HW[0]), resample=resample))
def preprocess_img(img_rgb_orig, HW=(256,256), resample=3):
# return original size L and resized L as torch Tensors
img_rgb_rs = resize_img(img_rgb_orig, HW=HW, resample=resample)
img_lab_orig = color.rgb2lab(img_rgb_orig)
img_lab_rs = color.rgb2lab(img_rgb_rs)
img_l_orig = img_lab_orig[:,:,0]
img_l_rs = img_lab_rs[:,:,0]
tens_orig_l = torch.Tensor(img_l_orig)[None,None,:,:]
tens_rs_l = torch.Tensor(img_l_rs)[None,None,:,:]
return (tens_orig_l, tens_rs_l)
def postprocess_tens(tens_orig_l, out_ab, mode='bilinear'):
# tens_orig_l 1 x 1 x H_orig x W_orig
# out_ab 1 x 2 x H x W
HW_orig = tens_orig_l.shape[2:]
HW = out_ab.shape[2:]
# call resize function if needed
if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]):
out_ab_orig = F.interpolate(out_ab, size=HW_orig, mode='bilinear')
else:
out_ab_orig = out_ab
out_lab_orig = torch.cat((tens_orig_l, out_ab_orig), dim=1)
return color.lab2rgb(out_lab_orig.data.cpu().numpy()[0,...].transpose((1,2,0)))
# load colorizers
colorizer_eccv16 = eccv16(pretrained=True).eval()
colorizer_siggraph17 = siggraph17(pretrained=True).eval()
#load image
# image_path = 'SampleImage/2in.jpg'
# img = load_img(str(image_path))
img = cv2.imread(str(image_path))
# default size to process images is 256x256
# grab L channel in both original ("orig") and resized ("rs") resolutions
(tens_l_orig, tens_l_rs) = preprocess_img(img, HW=(256,256))
# colorizer outputs 256x256 ab map
# resize and concatenate to original L channel
img_bw = postprocess_tens(tens_l_orig, torch.cat((0*tens_l_orig,0*tens_l_orig),dim=1))
out_img_eccv16 = postprocess_tens(tens_l_orig, colorizer_eccv16(tens_l_rs).cpu())
out_img_siggraph17 = postprocess_tens(tens_l_orig, colorizer_siggraph17(tens_l_rs).cpu())
# plt.imsave('ouput_eccv16.png', out_img_eccv16)
# plt.imsave('output_siggraph17.png', out_img_siggraph17)
image_save_path = image_path.replace(image_name, "temp.png")
# cv2.imwrite(str(image_save_path), out_img_siggraph17)
plt.imsave(str(image_save_path), out_img_siggraph17)
print('media/temp.png')
| null |
image_colorization.py
|
image_colorization.py
|
py
| 2,727 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.tile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "skimage.color.rgb2lab",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "skimage.color",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "skimage.color.rgb2lab",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "skimage.color",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "skimage.color.lab2rgb",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "skimage.color",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "colorizers.eccv16",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "colorizers.siggraph17",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imsave",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
}
] |
301619966
|
import argparse
import numpy as np
import os
def scale(x):
x_min, x_max = np.min(x, axis=0), np.max(x, axis=0)
return (x - x_min)/(x_max-x_min)
def normalize(x):
x_mean, x_std = np.mean(x, axis=0), np.std(x, axis=0)
return (x - x_mean)/x_std
def preprocess(filepath):
input_data = np.loadtxt(filepath, delimiter=',')
x_data, y_data = input_data[:, :8], input_data[:, 8]
x_data = normalize(scale(x_data))
y_data = np.expand_dims(y_data, -1)
data = np.hstack((x_data, y_data))
np.random.shuffle(data)
train_split = int(data.shape[0]*0.7)
train_data = data[:train_split, :]
test_data = data[train_split:, :]
output_filepath = filepath[:filepath.rfind(".")]
np.save(output_filepath+"_train", train_data)
np.save(output_filepath+"_test", test_data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--filepath', default=os.path.join("..", "dataset",
"cal_housing.data"), help="Path to the training data")
args = parser.parse_args()
preprocess(args.filepath)
if __name__ == '__main__':
main()
| null |
Assignment_1/utils/data_preprocessing_part_b.py
|
data_preprocessing_part_b.py
|
py
| 1,123 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.min",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
}
] |
379473309
|
import optparse
from django.core.management import base
class Command(base.BaseCommand):
"""
Base class for a declarative management command API.
"""
option_list = ()
option_groups = ()
option_names = ()
actions = ()
def usage(self, subcommand):
"""
Override the usage display.
"""
usage = "%prog {subcommand:s} {args:}".format(
subcommand=subcommand, args=self.args)
if self.help:
return "{usage:s}\n\n{help:s}".format(
usage=usage, help=self.help)
return usage
def create_parser(self, prog_name, subcommand):
"""
Customize the OptionParser.
"""
parser = super(Command, self).create_parser(prog_name, subcommand)
for name, description, option_list in self.option_groups:
group = optparse.OptionGroup(parser, name, description);
map(group.add_option, option_list)
parser.add_option_group(group)
return parser
def parse_options(self):
for name in self.option_names:
parse = getattr(self, "parse_option_{name:s}".format(
name=name), None)
if parse is not None and callable(parse):
self.options[name] = parse()
def handle(self, *args, **options):
self.args = args
self.options = options
self.parse_options()
for name in self.actions:
validate = getattr(self, "validate_{name:s}".format(
name=name), None)
if validate is not None and callable(validate):
validate()
for name in self.actions:
handle = getattr(self, "handle_{name:s}".format(
name=name), None)
if handle is not None and callable(handle):
handle()
class BaseCommandMixin(object):
"""
Base Django management command options.
"""
option_list = base.BaseCommand.option_list
option_groups = (
("[standard options]",
"Standard Django management command options.",
option_list,
),
)
option_names = ("verbosity",)
actions = ()
def parse_option_verbosity(self):
try:
verbosity = int(self.options.get("verbosity", 1))
except (ValueError, TypeError):
verbosity = 1
return verbosity
class BaseCommand(BaseCommandMixin, Command):
"""
Base Django management command.
"""
pass
| null |
grunt/management/base.py
|
base.py
|
py
| 2,568 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "optparse.OptionGroup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base",
"line_number": 69,
"usage_type": "name"
}
] |
392091621
|
#wapp to wish the user gm/ga/ge
import datetime
dt = datetime.datetime.now()
hour = dt.hour
if( hour >=6 and hour <=12):
print("gm")
elif( hour >=12 and hour <=15):
print("ga")
else:
print("ge")
| null |
L11/P4.py
|
P4.py
|
py
| 201 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "attribute"
}
] |
524764222
|
from sklearn.naive_bayes import MultinomialNB
#[possui brilho?, Possui vários metros cúbicos ?, Possui Trasnparência ?, É denso?]
rocha1 = [0,1,0,0]
rocha2 = [1,1,0,0]
rocha3 = [0,1,0,0]
mineral1 = [1,0,1,1]
mineral2 = [0,0,1,1]
mineral3 = [1,0,0,1]
dados = [rocha1, rocha2, rocha3, mineral1, mineral2, mineral3]
marcacoes = ['rocha', 'rocha', 'rocha', 'mineral', 'mineral', 'mineral']
modelo = MultinomialNB()
modelo.fit(dados, marcacoes)
#[possui brilho?, Possui vários metros cúbicos ?, Possui Transparência ?, É denso?]
misterioso1 = [1,0,0,0]
misterioso2 = [1,1,0,0]
misterioso3 = [1,1,1,0]
misterioso4 = [1,1,1,1]
misterioso5 = [0,1,0,0]
misterioso6 = [0,1,1,0]
misterioso7 = [0,1,1,1]
misterioso8 = [0,0,0,0]#erro
misterioso9 = [0,0,1,0]
misterioso10 = [0,0,1,1]
misterioso11 = [0,0,0,1]
misterioso12 = [1,0,1,0]
misterioso13 = [1,0,1,1]
misterioso14 = [1,0,0,1]
misterioso15 = [1,1,0,1]#erro
misterioso16 = [0,1,0,1]
#taxa de erro de 87.5
misteriosos = [misterioso1, misterioso2, misterioso3, misterioso4, misterioso5, misterioso6,misterioso7, misterioso8 ,
misterioso9, misterioso10, misterioso11, misterioso12, misterioso13, misterioso14, misterioso15,
misterioso16]
print(modelo.predict(misteriosos))
| null |
Questao02.py
|
Questao02.py
|
py
| 1,255 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_number": 13,
"usage_type": "call"
}
] |
570160576
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 14:01:30 2018
@author: bazzz
"""
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
from scipy.misc import face
import time
from imageio import imread, imsave
#Z = face()[100:500,400:1000,1]/255
#imgName = 'Face'
Z = imread('Rain.jpg')[:,:,0]/255
imgName = 'Rain'
#Z = imread('Bas.png')[:,:,0]/255
#imgName = 'Bas'
np.random.seed(3929)
def imgNoise(A):
Anew = A+ 0.1 * np.random.normal(size=A.shape)
return Anew
def anorm(x):
return np.sqrt((x*x).sum(-1))
def nabla(A):
h,w = A.shape
dA = np.zeros([h,w,2])
#dA_x = A(x+1,y) - A(x,y) (Border = 0)
dA[:-1,:,1] = A[1:,:] - A[:-1,:]
#dA_y = A(x,y+1) - A(x,y) (Border = 0)
dA[:,:-1,0] = A[:,1:] - A[:,:-1]
return dA
#TV-L_1 Norm
def g1(X, Orig, clambda):
TV = anorm(nabla(X)).sum()
Edata = (np.abs(X-Orig)).sum()
return TV + Edata*clambda
def gradg_ij(Y,i,j,X,clambda,alph = .001):
padY[1:-1,1:-1] = Y
return clambda*np.sign(Y[i,j] - X[i,j]) + (
(2*padY[i+1,j+1]-padY[i+2,j+1]-padY[i+1,j+2])/sqrt(alph+(padY[i+2,j+1]-padY[i+1,j+1])**2+(padY[i+1,j+2]-padY[i+1,j+1])**2)+
(padY[i+1,j+1]-padY[i,j+1])/sqrt(alph+(padY[i+1,j+1]-padY[i,j+1])**2 + (padY[i,j+2]-padY[i,j+1])**2)+
(padY[i+1,j+1]-padY[i+1,j])/sqrt(alph + (padY[i+2,j]-padY[i+1,j])**2 + (padY[i+1,j+1]-padY[i+1,j])**2))
def gradg(Y,X,clambda,alph = .0001):
for i in range(0,h):
for j in range(0,w):
gradY[i,j] = gradg_ij(Y,i,j,X,clambda,alph)
return gradY
Y = imgNoise(Z)
X = Y
padY = np.pad(Y,1,'constant')
h,w = Y.shape
gradY = np.zeros((h,w))
clock1 = time.clock()
diff = 1E-3
clambda = 1
rho = 0.5
c1 = 10**(-4)
gr = gradg(Y,X,clambda)
counter = 0
grOld = np.zeros((h,w))
while (abs(np.linalg.norm(gr)-np.linalg.norm(grOld))> diff and counter < 150):
grOld = np.copy(gr)
counter += 1
pk = -gr
alph = 1
while (g1(Y+alph*pk,X,clambda) > g1(Y,X,clambda) + c1*alph*np.reshape(pk,pk.size).dot(np.reshape(gr,gr.size))): #Backtracking Algo
alph *= rho
Y = Y + alph*pk
gr = gradg(Y,X,clambda)
print(f'{counter}, : , {np.linalg.norm(gr)}')
clock2 = time.clock()
print(clock2-clock1)
plt.gray()
plt.imshow(Z)
imsave('.\TVL1Steepest\ Orig'+imgName+'.png',Z)
plt.show()
plt.gray()
plt.imshow(X)
imsave('.\TVL1Steepest\ Noisy'+imgName+'.png',X)
plt.show()
plt.gray()
plt.imshow(Y)
imsave('.\TVL1Steepest\ Denoi'+imgName+'.png',Y)
plt.show()
| null |
Total Variation Denoising - Fall 2018/TVL1 Steepest.py
|
TVL1 Steepest.py
|
py
| 2,495 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "imageio.imread",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "time.clock",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gray",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "imageio.imsave",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gray",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "imageio.imsave",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gray",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "imageio.imsave",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
}
] |
251476502
|
from __future__ import print_function
from time import time
import csv
import sys
import os
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import lda
import logging
logging.basicConfig(filename='lda_analyser.log', level=logging.DEBUG)
if not os.path.exists("results"):
os.makedirs("results")
for n_topics in [10, 20, 50, 100]:
n_features = 10000
n_top_words = int(sys.argv[1]) + 1
corpus = []
topics_write_file = csv.writer(open("results/lda_topics_{}topics_{}words.csv".format(n_topics, n_top_words - 1), "wb"), delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
write_file = csv.writer(open("results/lda_topics_{}topics_{}words_mapping.csv".format(n_topics, n_top_words - 1), "wb"), delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
def print_top_words(model, doc_topic, feature_names, n_top_words, dictionary):
for i, topic_dist in enumerate(model):
topic_words = np.array(feature_names)[np.argsort(topic_dist)][:-n_top_words:-1]
#write_file.write('Topic {}: {}\n'.format(i, ' '.join(topic_words)))
topic_row = [str(i)]
topic_row.extend(topic_words)
topics_write_file.writerow(topic_row)
for i in range(len(corpus)):
document_row = [dictionary[i][0], dictionary[i][1]]
document_row.append(doc_topic[i].argmax())
#document_row.append(corpus[i])
write_file.writerow(document_row)
entity_day_dict = dict()
# read all files and store their contents on a dictionary
for i in os.listdir(os.getcwd() + "/filtered_tweets"):
for filename in os.listdir(os.getcwd() + "/filtered_tweets" + "/" + i):
entity_day_dict[i+" "+filename] = open(os.getcwd() + "/filtered_tweets" + "/" + i + "/" + filename, 'r').read()
corpus = []
entity_day_key_index = dict()
i = 0
for key in entity_day_dict:
entity_day_key_index[i] = key.split(" ")
corpus.append(entity_day_dict[key])
i += 1
# Use tf (raw term count) features for LDA.
logging.info("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(corpus)
logging.info("done in %0.3fs." % (time() - t0))
logging.info("Fitting LDA models with tf")
model = lda.LDA(n_topics=n_topics, n_iter=1500, random_state=1)
#LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', #learning_offset=50., random_state=0)
t0 = time()
model.fit(tf)
logging.info("done in %0.3fs." % (time() - t0))
topic_word = model.topic_word_
doc_topic = model.doc_topic_
logging.info("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(topic_word, doc_topic, tf_feature_names, n_top_words, entity_day_key_index)
| null |
src/lda_without_tf_idf.py
|
lda_without_tf_idf.py
|
py
| 3,013 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "lda.LDA",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 82,
"usage_type": "call"
}
] |
146724018
|
from igraph import *
import igraph
import xml.etree.ElementTree as ET
from skimage import io
from skimage import transform
import cv2
import numpy as np
import argparse
def parserargs():
parser = argparse.ArgumentParser()
parser.add_argument('-image', action='store',
dest='image')
parser.add_argument('-target', action='store',
dest='target')
parser.add_argument('-source', action='store',
dest='source')
parser.add_argument('-plot', action='store',
dest='plot',
default=False)
results = parser.parse_args()
return results.image, results.source, results.target, results.plot
def imshow(name, image):
cv2.imshow(name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def do_dictionary_label_list(image):
dictionary = {}
label_list = []
count = 0
for i, row in enumerate(image):
for j, pixel in enumerate(row):
label_list.append(str(count))
dictionary["({}, {})".format(i, j)] = count
count += 1
return dictionary, label_list
def remove_duplicate(vextex_list):
temp = []
for a,b in vextex_list :
if (a,b) not in temp and (b,a) not in temp: #to check for the duplicate tuples
temp.append((a,b))
vextex_list = temp * 1 #copy temp to d
return vextex_list
if __name__ == '__main__':
image_path, source, target, plot = parserargs()
print("LOG:\nImage: {}\nSource: {}\nTarget: {}\nPlot: {}".format(image_path, source, target, plot))
image = cv2.imread(image_path, 0)
# image = np.array([
# [20, 220, 46],
# [55, 98, 33],
# [22, 11, 99],
# ]
# )
X = image.shape[0]-1
Y = image.shape[1]-1
neighbors = lambda x, y : [(x2, y2) for x2 in range(x-1, x+2)
for y2 in range(y-1, y+2)
if (-1 < x <= X and
-1 < y <= Y and
(x != x2 or y != y2) and
(0 <= x2 <= X) and
(0 <= y2 <= Y))]
vextex_list = []
weight_list = []
dictionary = {}
label_list = []
weight_list = {}
dictionary, label_list = do_dictionary_label_list(image)
for i, row in enumerate(image):
for j, pixel in enumerate(row):
for n in neighbors(i, j):
vextex_list.append( (dictionary["({}, {})".format(i, j)], dictionary["({}, {})".format(n[0], n[1])]) )
weight_list[(dictionary["({}, {})".format(i, j)], dictionary["({}, {})".format(n[0], n[1])])] = abs(float(image[(i,j)]) - float(image[n]))
vextex_list = remove_duplicate(vextex_list)
g = Graph()
g.add_vertices(image.shape[0]*image.shape[1])
g.add_edges(vextex_list)
g.vs["name"] = label_list
g.vs["label"] = label_list
g.es["weight"] = 0
ks = list(weight_list)
while ks:
pair = ks.pop(0)
aux = str(pair).replace("(","").replace(")", "").replace(",","").split(" ")
first = aux[0]
second = aux[1]
g[first, second] = weight_list[pair]
path = g.shortest_paths_dijkstra(source=source, target=target, weights=g.es["weight"], mode=OUT)
print("****\nShortest_path: ", path[0][0])
if plot:
layout = g.layout("kk")
igraph.plot(g, layout = layout)
| null |
contextual_extractor.py
|
contextual_extractor.py
|
py
| 3,504 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "igraph.plot",
"line_number": 124,
"usage_type": "call"
}
] |
508767213
|
from loguru import logger
from util import util
import requests
import json
# Using math.js web service for expression eval
API_URL = "http://api.mathjs.org/v4/?expr="
def handle(update, context):
util.log_chat("calc", update)
query = update.message.text
query = query.split(" ")
try:
calc_str = " ".join(query[1:])
response = calc_engine(calc_str)
except Exception as e:
response = str(e)
finally:
logger.info("[calc] calc_str='{}' ; response='{}'", calc_str, response)
update.message.reply_text(response)
def calc_engine(calc_str):
query_url = API_URL + requests.utils.quote(calc_str)
response = requests.request("GET", query_url)
response = json.loads(response.text)
return str(response)
| null |
src/handlers/calc.py
|
calc.py
|
py
| 780 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "util.util.log_chat",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "util.util",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "loguru.logger.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "requests.utils.quote",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.utils",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "requests.request",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
}
] |
326811944
|
# In[]
import sys, os
sys.path.append('../')
import numpy as np
from umap import UMAP
import time
import torch
import matplotlib.pyplot as plt
import pandas as pd
import scipy.sparse as sp
import scmomat.model as model
import scmomat.utils as utils
import scmomat.bmk as bmk
import scmomat.umap_batch as umap_batch
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
plt.rcParams["font.size"] = 10
import warnings
warnings.filterwarnings("ignore")
def lsi(counts):
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
tfidf = TfidfTransformer(norm='l2', sublinear_tf=True)
normed_count = tfidf.fit_transform(counts)
# perform SVD on the sparse matrix
lsi = TruncatedSVD(n_components=50, random_state=42)
lsi_r = lsi.fit_transform(normed_count)
lsi.explained_variance_ratio_
X_lsi = lsi_r[:, 1:]
return X_lsi
# # In[]
# # ------------------------------------------------------------------------------------------------------------------------------------------------------
# #
# # NOTE: 1. Subsampling the data batches to create imbalanced datasets
# #
# # ------------------------------------------------------------------------------------------------------------------------------------------------------
# # NOTE: read in dataset
# data_dir = "../data/simulated/6b16c_test_10/unequal/"
# # subsample batches 0, 3, 5 by 10: [::10]
# imbalanced_dir = "../data/simulated/6b16c_test_10/imbalanced/"
# if not os.path.exists(imbalanced_dir):
# os.makedirs(imbalanced_dir)
# n_batches = 6
# for batch in range(n_batches):
# label = pd.read_csv(os.path.join(data_dir, 'cell_label' + str(batch + 1) + '.txt'), index_col=0, sep = "\t")
# if batch in [0,3,5]:
# # subsample by 10
# label = label.iloc[::10,:]
# label.to_csv(os.path.join(imbalanced_dir, 'cell_label' + str(batch + 1) + '.txt'), sep = "\t")
# print("number of cells: {:d}".format(label.shape[0]))
# counts_atac = np.loadtxt(os.path.join(data_dir, 'RxC' + str(batch + 1) + ".txt"), delimiter = "\t")
# if batch in [0,3,5]:
# # subsample by 10
# counts_atac = counts_atac[:,::10]
# np.savetxt(os.path.join(imbalanced_dir, 'RxC' + str(batch + 1) + '.txt'), X = counts_atac, delimiter = "\t")
# counts_rna = np.loadtxt(os.path.join(data_dir, 'GxC' + str(batch + 1) + ".txt"), delimiter = "\t")
# if batch in [0,3,5]:
# # subsample by 10
# counts_rna = counts_rna[:,::10]
# np.savetxt(os.path.join(imbalanced_dir, 'GxC' + str(batch + 1) + '.txt'), X = counts_rna, delimiter = "\t")
# A = np.loadtxt(os.path.join(data_dir +'region2gene.txt'), delimiter = "\t")
# np.savetxt(os.path.join(imbalanced_dir, 'region2gene.txt'), X = A, delimiter = "\t")
# In[]
# ------------------------------------------------------------------------------------------------------------------------------------------------------
#
# NOTE: 1. Load dataset and running scmomat (without retraining, retraining see the third section)
#
# ------------------------------------------------------------------------------------------------------------------------------------------------------
# NOTE: read in dataset
data_dir = "../data/simulated/6b16c_test_1/imbalanced/"
result_dir = "simulated/6b16c_test_1/imbalanced"
scmomat_dir = result_dir + "/scmomat/"
if not os.path.exists(scmomat_dir):
os.makedirs(scmomat_dir)
n_batches = 6
counts_rnas = []
counts_atacs = []
labels = []
for batch in range(n_batches):
label = pd.read_csv(os.path.join(data_dir, 'cell_label' + str(batch + 1) + '.txt'), index_col=0, sep = "\t")["pop"].values.squeeze()
labels.append(label)
print("number of cells: {:d}".format(label.shape[0]))
try:
counts_atac = np.loadtxt(os.path.join(data_dir, 'RxC' + str(batch + 1) + ".txt"), delimiter = "\t").T
counts_atac = utils.preprocess(counts_atac, modality = "ATAC")
print("read atac for batch" + str(batch + 1))
except:
counts_atac = None
try:
counts_rna = np.loadtxt(os.path.join(data_dir, 'GxC' + str(batch + 1) + ".txt"), delimiter = "\t").T
print("read rna for batch" + str(batch + 1))
counts_rna = utils.preprocess(counts_rna, modality = "RNA", log = False)
except:
counts_rna = None
# preprocess the count matrix
counts_rnas.append(counts_rna)
counts_atacs.append(counts_atac)
counts = {"rna":counts_rnas, "atac": counts_atacs}
# NOTE: SCENARIO 1: diagonal integration
counts["rna"][0] = None
counts["rna"][1] = None
counts["rna"][2] = None
# counts["atac"][3] = None
counts["atac"][4] = None
counts["atac"][5] = None
# No need for pseudo-count matrix
A = np.loadtxt(os.path.join(data_dir +'region2gene.txt'), delimiter = "\t").T
# CALCULATE PSEUDO-SCRNA-SEQ
for idx in range(len(counts["atac"])):
if (counts["rna"][idx] is None) & (counts["atac"][idx] is not None):
counts["rna"][idx] = counts["atac"][idx] @ A.T
#BINARIZE, still is able to see the cluster pattern, much denser than scRNA-Seq (cluster pattern clearer)
counts["rna"][idx] = (counts["rna"][idx]!=0).astype(int)
# obtain the feature name
genes = np.array(["gene_" + str(x) for x in range(counts["rna"][-1].shape[1])])
regions = np.array(["region_" + str(x) for x in range(counts["atac"][0].shape[1])])
feats_name = {"rna": genes, "atac": regions}
counts["feats_name"] = feats_name
counts["nbatches"] = n_batches
# In[]
# NOTE: Running scmomat
# weight on regularization term
lamb = 0.001
batchsize = 0.1
# running seed
seed = 0
# number of latent dimensions
K = 20
interval = 1000
T = 4000
lr = 1e-2
# start_time = time.time()
# model1 = model.scmomat_model(counts = counts, K = K, batch_size = batchsize, interval = interval, lr = lr, lamb = lamb, seed = seed, device = device)
# losses1 = model1.train_func(T = T)
# end_time = time.time()
# print("running time: " + str(end_time - start_time))
# torch.save(model1, scmomat_dir + f'CFRM_{K}_{T}.pt')
model1 = torch.load(scmomat_dir + f'CFRM_{K}_{T}.pt')
# In[]
# NOTE: Plot the result before post-processing
plt.rcParams["font.size"] = 10
umap_op = UMAP(n_components = 2, n_neighbors = 30, min_dist = 0.2, random_state = 0)
zs = []
for batch in range(n_batches):
z = model1.softmax(model1.C_cells[str(batch)].cpu().detach()).numpy()
zs.append(z)
x_umap = umap_op.fit_transform(np.concatenate(zs, axis = 0))
# separate into batches
x_umaps = []
leiden_labels = []
for batch in range(n_batches):
if batch == 0:
start_pointer = 0
end_pointer = start_pointer + zs[batch].shape[0]
x_umaps.append(x_umap[start_pointer:end_pointer,:])
elif batch == (n_batches - 1):
start_pointer = start_pointer + zs[batch - 1].shape[0]
x_umaps.append(x_umap[start_pointer:,:])
else:
start_pointer = start_pointer + zs[batch - 1].shape[0]
end_pointer = start_pointer + zs[batch].shape[0]
x_umaps.append(x_umap[start_pointer:end_pointer,:])
utils.plot_latent_ext(x_umaps, annos = labels, mode = "separate", save = scmomat_dir + f'latent_separate_{K}_{T}.png', figsize = (15,30), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True)
utils.plot_latent_ext(x_umaps, annos = labels, mode = "modality", save = scmomat_dir + f'latent_batches_{K}_{T}.png', figsize = (15,10), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True)
# In[]
# NOTE: Post-processing, clustering, and plot the result after post-processing
n_neighbors = 30
r = 0.7
zs = []
for batch in range(n_batches):
z = model1.softmax(model1.C_cells[str(batch)].cpu().detach()).numpy()
zs.append(z)
s_pair_dist, knn_indices, knn_dists = utils.post_process(zs, n_neighbors, njobs = 8, r = r)
resolution = 0.5
labels_tmp = utils.leiden_cluster(X = None, knn_indices = knn_indices, knn_dists = knn_dists, resolution = resolution)
umap_op = umap_batch.UMAP(n_components = 2, n_neighbors = n_neighbors, min_dist = 0.30, random_state = 0,
metric='precomputed', knn_dists=knn_dists, knn_indices=knn_indices)
x_umap = umap_op.fit_transform(s_pair_dist)
# separate into batches
x_umaps = []
leiden_labels = []
for batch in range(n_batches):
if batch == 0:
start_pointer = 0
end_pointer = start_pointer + zs[batch].shape[0]
x_umaps.append(x_umap[start_pointer:end_pointer,:])
leiden_labels.append(labels_tmp[start_pointer:end_pointer])
elif batch == (n_batches - 1):
start_pointer = start_pointer + zs[batch - 1].shape[0]
x_umaps.append(x_umap[start_pointer:,:])
leiden_labels.append(labels_tmp[start_pointer:])
else:
start_pointer = start_pointer + zs[batch - 1].shape[0]
end_pointer = start_pointer + zs[batch].shape[0]
x_umaps.append(x_umap[start_pointer:end_pointer,:])
leiden_labels.append(labels_tmp[start_pointer:end_pointer])
utils.plot_latent_ext(x_umaps, annos = labels, mode = "separate", save = scmomat_dir + f'latent_separate_{K}_{T}_processed.png',
figsize = (7,20), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, alpha = 0.7, text_size = "x-large")
utils.plot_latent_ext(x_umaps, annos = labels, mode = "joint", save = scmomat_dir + f'latent_joint_{K}_{T}_processed.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, alpha = 0.7, text_size = "x-large")
utils.plot_latent_ext(x_umaps, annos = labels, mode = "modality", save = scmomat_dir + f'latent_batches_{K}_{T}_processed.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, alpha = 0.7)
utils.plot_latent_ext(x_umaps, annos = leiden_labels, mode = "joint", save = scmomat_dir + f'latent_leiden_clusters_{K}_{T}_{resolution}_processed.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, alpha = 0.7)
# In[]
# ------------------------------------------------------------------------------------------------------------------------------------------------------
#
# NOTE: 2. Benchmarking with baseline methods
#
# ------------------------------------------------------------------------------------------------------------------------------------------------------
# NOTE: Baseline methods
# 1. UINMF
uinmf_path = result_dir + "/uinmf/"
H1_uinmf = pd.read_csv(uinmf_path + "H1_norm.csv", index_col = 0).values
H2_uinmf = pd.read_csv(uinmf_path + "H2_norm.csv", index_col = 0).values
H3_uinmf = pd.read_csv(uinmf_path + "H3_norm.csv", index_col = 0).values
H4_uinmf = pd.read_csv(uinmf_path + "H4_norm.csv", index_col = 0).values
H5_uinmf = pd.read_csv(uinmf_path + "H5_norm.csv", index_col = 0).values
H6_uinmf = pd.read_csv(uinmf_path + "H6_norm.csv", index_col = 0).values
uinmf_umap = UMAP(n_components = 2, min_dist = 0.4, random_state = 0).fit_transform(np.concatenate((H1_uinmf, H2_uinmf, H3_uinmf, H4_uinmf, H5_uinmf, H6_uinmf), axis = 0))
uinmf_umaps = []
for batch in range(n_batches):
if batch == 0:
start_pointer = 0
end_pointer = start_pointer + zs[batch].shape[0]
uinmf_umaps.append(uinmf_umap[start_pointer:end_pointer,:])
elif batch == (n_batches - 1):
start_pointer = start_pointer + zs[batch - 1].shape[0]
uinmf_umaps.append(uinmf_umap[start_pointer:,:])
else:
start_pointer = start_pointer + zs[batch - 1].shape[0]
end_pointer = start_pointer + zs[batch].shape[0]
uinmf_umaps.append(uinmf_umap[start_pointer:end_pointer,:])
utils.plot_latent_ext(uinmf_umaps, annos = labels, mode = "separate", save = uinmf_path + f'latent_separate_uinmf.png',
figsize = (7,20), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, text_size = "large", alpha = 0.7)
utils.plot_latent_ext(uinmf_umaps, annos = labels, mode = "modality", save = uinmf_path + f'latent_batches_uinmf.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, text_size = "large", alpha = 0.7)
utils.plot_latent_ext(uinmf_umaps, annos = labels, mode = "joint", save = uinmf_path + f'latent_joint_uinmf.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, text_size = "large", alpha = 0.7)
# # 1. Liger
# liger_path = result_dir + "/liger/"
# H1_liger = pd.read_csv(liger_path + "H1_norm.csv", index_col = 0).values
# H2_liger = pd.read_csv(liger_path + "H2_norm.csv", index_col = 0).values
# H3_liger = pd.read_csv(liger_path + "H3_norm.csv", index_col = 0).values
# H4_liger = pd.read_csv(liger_path + "H4_norm.csv", index_col = 0).values
# H5_liger = pd.read_csv(liger_path + "H5_norm.csv", index_col = 0).values
# H6_liger = pd.read_csv(liger_path + "H6_norm.csv", index_col = 0).values
# liger_umap = UMAP(n_components = 2, min_dist = 0.4, random_state = 0).fit_transform(np.concatenate((H1_liger, H2_liger, H3_liger, H4_liger, H5_liger, H6_liger), axis = 0))
# liger_umaps = []
# for batch in range(n_batches):
# if batch == 0:
# start_pointer = 0
# end_pointer = start_pointer + zs[batch].shape[0]
# liger_umaps.append(liger_umap[start_pointer:end_pointer,:])
# elif batch == (n_batches - 1):
# start_pointer = start_pointer + zs[batch - 1].shape[0]
# liger_umaps.append(liger_umap[start_pointer:,:])
# else:
# start_pointer = start_pointer + zs[batch - 1].shape[0]
# end_pointer = start_pointer + zs[batch].shape[0]
# liger_umaps.append(liger_umap[start_pointer:end_pointer,:])
# utils.plot_latent_ext(liger_umaps, annos = labels, mode = "separate", save = liger_path + f'latent_separate_liger.png',
# figsize = (10,27), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, text_size = "large", colormap = "Paired", alpha = 0.7)
# utils.plot_latent_ext(liger_umaps, annos = labels, mode = "modality", save = liger_path + f'latent_batches_liger.png',
# figsize = (10,7), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, text_size = "large", colormap = "Paired", alpha = 0.7)
# utils.plot_latent_ext(liger_umaps, annos = labels, mode = "joint", save = liger_path + f'latent_joint_liger.png',
# figsize = (12,7), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, text_size = "large", colormap = "Paired", alpha = 0.7)
# Multimap
multimap_path = result_dir + "/multimap/"
batches = pd.read_csv(multimap_path + "batch_id.csv", index_col = 0)
X_multimap = np.load(multimap_path + "multimap.npy")
G_multimap = sp.load_npz(multimap_path + "multimap_graph.npz").toarray()
X_multimaps = []
for batch in ["C1", "C2", "C3", "C4", "C5", "C6"]:
X_multimaps.append(X_multimap[batches.values.squeeze() == batch, :])
utils.plot_latent_ext(X_multimaps, annos = labels, mode = "separate", save = multimap_path + f'latent_separate_multimap.png',
figsize = (7,20), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, alpha = 0.7, text_size = "x-large")
utils.plot_latent_ext(X_multimaps, annos = labels, mode = "modality", save = multimap_path + f'latent_batches_multimap.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, alpha = 0.7, text_size = "x-large")
utils.plot_latent_ext(X_multimaps, annos = labels, mode = "joint", save = multimap_path + f'latent_joint_multimap.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, alpha = 0.7)
# Stabmap
stabmap_path = result_dir + "/stabmap/"
stabmap_b1 = pd.read_csv(stabmap_path + "stab_b1.csv", index_col = 0).values
stabmap_b2 = pd.read_csv(stabmap_path + "stab_b2.csv", index_col = 0).values
stabmap_b3 = pd.read_csv(stabmap_path + "stab_b3.csv", index_col = 0).values
stabmap_b4 = pd.read_csv(stabmap_path + "stab_b4.csv", index_col = 0).values
stabmap_b5 = pd.read_csv(stabmap_path + "stab_b5.csv", index_col = 0).values
stabmap_b6 = pd.read_csv(stabmap_path + "stab_b6.csv", index_col = 0).values
stabmap_umap = UMAP(n_components = 2, min_dist = 0.4, random_state = 0).fit_transform(np.concatenate((stabmap_b1, stabmap_b2, stabmap_b3, stabmap_b4, stabmap_b5, stabmap_b6), axis = 0))
stabmap_umaps = []
for batch in range(n_batches):
if batch == 0:
start_pointer = 0
end_pointer = start_pointer + zs[batch].shape[0]
stabmap_umaps.append(stabmap_umap[start_pointer:end_pointer,:])
elif batch == (n_batches - 1):
start_pointer = start_pointer + zs[batch - 1].shape[0]
stabmap_umaps.append(stabmap_umap[start_pointer:,:])
else:
start_pointer = start_pointer + zs[batch - 1].shape[0]
end_pointer = start_pointer + zs[batch].shape[0]
stabmap_umaps.append(stabmap_umap[start_pointer:end_pointer,:])
utils.plot_latent_ext(stabmap_umaps, annos = labels, mode = "separate", save = stabmap_path + f'latent_separate_stabmap.png',
figsize = (7,20), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = True, text_size = "large", alpha = 0.7)
utils.plot_latent_ext(stabmap_umaps, annos = labels, mode = "modality", save = stabmap_path + f'latent_batches_stabmap.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, text_size = "large", alpha = 0.7)
utils.plot_latent_ext(stabmap_umaps, annos = labels, mode = "joint", save = stabmap_path + f'latent_joint_stabmap.png',
figsize = (7,5), axis_label = "UMAP", markerscale = 6, s = 5, label_inplace = False, text_size = "large", alpha = 0.7)
# In[]
n_neighbors = knn_indices.shape[1]
# graph connectivity score (gc) measure the batch effect removal per cell identity
# 1. scMoMaT
knn_graph = np.zeros((knn_indices.shape[0], knn_indices.shape[0]))
knn_graph[np.arange(knn_indices.shape[0])[:, None], knn_indices] = 1
gc_scmomat = bmk.graph_connectivity(G = knn_graph, groups = np.concatenate(labels, axis = 0))
print('GC (scmomat): {:.3f}'.format(gc_scmomat))
# 2. UINMF
gc_uinmf = bmk.graph_connectivity(X = np.concatenate((H1_uinmf, H2_uinmf, H3_uinmf, H4_uinmf, H5_uinmf, H6_uinmf), axis = 0), groups = np.concatenate(labels, axis = 0), k = n_neighbors)
print('GC (UINMF): {:.3f}'.format(gc_uinmf))
# # 2. LIGER
# gc_liger = bmk.graph_connectivity(X = np.concatenate((H1_liger, H2_liger, H3_liger, H4_liger, H5_liger, H6_liger), axis = 0), groups = np.concatenate(labels, axis = 0), k = n_neighbors)
# print('GC (LIGER): {:.3f}'.format(gc_liger))
# 3. Multimap
# NOTE: G_multimap is an affinity graph, closer neighbor with larger value
# argsort from small to large, select the last n_neighbors
G_multimap = sp.load_npz(multimap_path + "multimap_graph.npz").toarray()
knn_indices_multimap = G_multimap.argsort(axis = 1)[:, -n_neighbors:]
knn_graph_multimap = np.zeros_like(G_multimap)
knn_graph_multimap[np.arange(knn_indices_multimap.shape[0])[:, None], knn_indices_multimap] = 1
gc_multimap = bmk.graph_connectivity(G = knn_graph_multimap, groups = np.concatenate(labels, axis = 0), k = n_neighbors)
gc_multimap2 = bmk.graph_connectivity(X = np.concatenate(X_multimaps, axis = 0), groups = np.concatenate(labels, axis = 0), k = n_neighbors)
print('GC (MultiMap Graph): {:.3f}'.format(gc_multimap))
print('GC (MultiMap): {:.3f}'.format(gc_multimap2))
# 4. Stabmap
gc_stabmap = bmk.graph_connectivity(X = np.concatenate((stabmap_b1, stabmap_b2, stabmap_b3, stabmap_b4, stabmap_b5, stabmap_b6), axis = 0), groups = np.concatenate(labels, axis = 0), k = n_neighbors)
print('GC (Stabmap): {:.3f}'.format(gc_stabmap))
# Conservation of biological identity
# NMI, ARI, and F1
# F1 score: rare cell type detection
gt_labels = np.concatenate(labels)
uniq_labels, label_counts = np.unique(gt_labels, return_counts = True)
rare_label = uniq_labels[np.argsort(label_counts)[0]]
gt_rare_labels = np.where(gt_labels == rare_label, 1, 0)
# 1. scMoMaT
nmi_scmomat = []
ari_scmomat = []
f1_scmomat = []
for resolution in np.arange(0.1, 10, 0.5):
# use the post-processed graph
leiden_labels_scmomat = utils.leiden_cluster(X = None, knn_indices = knn_indices, knn_dists = knn_dists, resolution = resolution)
nmi_scmomat.append(bmk.nmi(group1 = np.concatenate(labels), group2 = leiden_labels_scmomat))
ari_scmomat.append(bmk.ari(group1 = np.concatenate(labels), group2 = leiden_labels_scmomat))
# calculate F1 score
uniq_labels, label_counts = np.unique(leiden_labels_scmomat[np.where(gt_labels == rare_label)[0]], return_counts = True)
predict_rare_label = uniq_labels[np.argsort(label_counts)[-1]]
predict_rare_labels = np.where(leiden_labels_scmomat == predict_rare_label, 1, 0)
f1_scmomat.append(bmk.F1_score(gt_rare_labels, predict_rare_labels))
print('NMI (scMoMaT): {:.3f}'.format(max(nmi_scmomat)))
print('ARI (scMoMaT): {:.3f}'.format(max(ari_scmomat)))
print('F1 (scMoMaT): {:.3f}'.format(max(f1_scmomat)))
# 2. UINMF
nmi_uinmf = []
ari_uinmf = []
f1_uinmf = []
for resolution in np.arange(0.1, 10, 0.5):
leiden_labels_uinmf = utils.leiden_cluster(X = np.concatenate((H1_uinmf, H2_uinmf, H3_uinmf, H4_uinmf, H5_uinmf, H6_uinmf), axis = 0), knn_indices = None, knn_dists = None, resolution = resolution)
nmi_uinmf.append(bmk.nmi(group1 = np.concatenate(labels), group2 = leiden_labels_uinmf))
ari_uinmf.append(bmk.ari(group1 = np.concatenate(labels), group2 = leiden_labels_uinmf))
# calculate F1 score
uniq_labels, label_counts = np.unique(leiden_labels_uinmf[np.where(gt_labels == rare_label)[0]], return_counts = True)
predict_rare_label = uniq_labels[np.argsort(label_counts)[-1]]
predict_rare_labels = np.where(leiden_labels_uinmf == predict_rare_label, 1, 0)
f1_uinmf.append(bmk.F1_score(gt_rare_labels, predict_rare_labels))
print('NMI (UINMF): {:.3f}'.format(max(nmi_uinmf)))
print('ARI (UINMF): {:.3f}'.format(max(ari_uinmf)))
print('F1 (UINMF): {:.3f}'.format(max(f1_uinmf)))
# # 2. Liger
# nmi_liger = []
# ari_liger = []
# for resolution in np.arange(0.1, 10, 0.5):
# leiden_labels_liger = utils.leiden_cluster(X = np.concatenate((H1_liger, H2_liger, H3_liger, H4_liger, H5_liger, H6_liger), axis = 0), knn_indices = None, knn_dists = None, resolution = resolution)
# nmi_liger.append(bmk.nmi(group1 = np.concatenate(labels), group2 = leiden_labels_liger))
# ari_liger.append(bmk.ari(group1 = np.concatenate(labels), group2 = leiden_labels_liger))
# print('NMI (LIGER): {:.3f}'.format(max(nmi_liger)))
# print('ARI (LIGER): {:.3f}'.format(max(ari_liger)))
# 3. Multimap
G_multimap = sp.load_npz(multimap_path + "multimap_graph.npz").toarray()
nmi_multimap = []
ari_multimap = []
f1_multimap = []
for resolution in np.arange(0.1, 10, 0.5):
# leiden_labels_seurat = utils.leiden_cluster(X = np.concatenate(seurat_pcas, axis = 0), knn_indices = None, knn_dists = None, resolution = resolution)
# Multimap state to use graph for clustering, leiden cluster the same as multimap tutorial [Checked]
leiden_labels_multimap = utils.leiden_cluster(affin = G_multimap, resolution = resolution)
nmi_multimap.append(bmk.nmi(group1 = np.concatenate(labels), group2 = leiden_labels_multimap))
ari_multimap.append(bmk.ari(group1 = np.concatenate(labels), group2 = leiden_labels_multimap))
# calculate F1 score
uniq_labels, label_counts = np.unique(leiden_labels_multimap[np.where(gt_labels == rare_label)[0]], return_counts = True)
predict_rare_label = uniq_labels[np.argsort(label_counts)[-1]]
predict_rare_labels = np.where(leiden_labels_multimap == predict_rare_label, 1, 0)
f1_multimap.append(bmk.F1_score(gt_rare_labels, predict_rare_labels))
print('NMI (MultiMap): {:.3f}'.format(max(nmi_multimap)))
print('ARI (MultiMap): {:.3f}'.format(max(ari_multimap)))
print('F1 (MultiMap): {:.3f}'.format(max(f1_multimap)))
# 4. Stabmap
nmi_stabmap = []
ari_stabmap = []
f1_stabmap = []
for resolution in np.arange(0.1, 10, 0.5):
leiden_labels_stabmap = utils.leiden_cluster(X = np.concatenate((stabmap_b1, stabmap_b2, stabmap_b3, stabmap_b4, stabmap_b5, stabmap_b6), axis = 0), knn_indices = None, knn_dists = None, resolution = resolution)
nmi_stabmap.append(bmk.nmi(group1 = np.concatenate(labels), group2 = leiden_labels_stabmap))
ari_stabmap.append(bmk.ari(group1 = np.concatenate(labels), group2 = leiden_labels_stabmap))
# calculate F1 score
uniq_labels, label_counts = np.unique(leiden_labels_stabmap[np.where(gt_labels == rare_label)[0]], return_counts = True)
predict_rare_label = uniq_labels[np.argsort(label_counts)[-1]]
predict_rare_labels = np.where(leiden_labels_stabmap == predict_rare_label, 1, 0)
f1_stabmap.append(bmk.F1_score(gt_rare_labels, predict_rare_labels))
print('NMI (Stabmap): {:.3f}'.format(max(nmi_stabmap)))
print('ARI (Stabmap): {:.3f}'.format(max(ari_stabmap)))
print('F1 (Stabmap): {:.3f}'.format(max(f1_stabmap)))
# Label transfer accuracy
# randomly select a half of cells as query
np.random.seed(0)
query_cell = np.array([False] * knn_indices.shape[0])
query_cell[np.random.choice(np.arange(knn_indices.shape[0]), size = int(0.5 * knn_indices.shape[0]), replace = False)] = True
training_cell = (1 - query_cell).astype(np.bool)
query_label = np.concatenate(labels)[query_cell]
training_label = np.concatenate(labels)[training_cell]
# NOTE: KNN graph should be constructed between train and query cells. We should have n_neighbors train cells around each query cell, and then vote
# however, the pre-reconstructed knn graph for scMoMaT and MultiMap find n_neighbors from all cells (train+query), it's hard to modify pre-reconstructed graph to match the requirement.
# We use the pre-reconstructed graph directly and ignore the query cells when voting, to methods still have the same number of n_neighbors
# scmomat
knn_graph = np.zeros((knn_indices.shape[0], knn_indices.shape[0]))
knn_graph[np.arange(knn_indices.shape[0])[:, None], knn_indices] = 1
knn_graph = knn_graph[query_cell, :][:, training_cell]
lta_scmomat = bmk.transfer_accuracy(query_label = query_label, train_label = training_label, knn_graph = knn_graph)
# UINMF
lta_uinmf = bmk.transfer_accuracy(query_label = query_label, train_label = training_label,
z_query = np.concatenate((H1_uinmf, H2_uinmf, H3_uinmf, H4_uinmf, H5_uinmf, H6_uinmf), axis = 0)[query_cell,:],
z_train = np.concatenate((H1_uinmf, H2_uinmf, H3_uinmf, H4_uinmf, H5_uinmf, H6_uinmf), axis = 0)[training_cell,:])
# MultiMap
G_multimap = sp.load_npz(multimap_path + "multimap_graph.npz").toarray()
knn_indices_multimap = G_multimap.argsort(axis = 1)[:, -n_neighbors:]
knn_graph_multimap = np.zeros_like(G_multimap)
knn_graph_multimap[np.arange(knn_indices_multimap.shape[0])[:, None], knn_indices_multimap] = 1
lta_multimap = bmk.transfer_accuracy(query_label = query_label, train_label = training_label, knn_graph = knn_graph_multimap[query_cell, :][:, training_cell])
lta_multimap2 = bmk.transfer_accuracy(query_label = query_label, train_label = training_label,
z_query = np.concatenate(X_multimaps, axis = 0)[query_cell,:],
z_train = np.concatenate(X_multimaps, axis = 0)[training_cell,:])
# stabmap
lta_stabmap = bmk.transfer_accuracy(query_label = query_label, train_label = training_label,
z_query = np.concatenate((stabmap_b1, stabmap_b2, stabmap_b3, stabmap_b4, stabmap_b5, stabmap_b6), axis = 0)[query_cell,:],
z_train = np.concatenate((stabmap_b1, stabmap_b2, stabmap_b3, stabmap_b4, stabmap_b5, stabmap_b6), axis = 0)[training_cell,:])
print("Label transfer accuracy (scMoMaT): {:.3f}".format(lta_scmomat))
print("Label transfer accuracy (UINMF): {:.3f}".format(lta_uinmf))
print("Label transfer accuracy (MultiMap Graph): {:.3f}".format(lta_multimap))
print("Label transfer accuracy (MultiMap): {:.3f}".format(lta_multimap2))
print("Label transfer accuracy (Stabmap): {:.3f}".format(lta_stabmap))
# scores = pd.DataFrame(columns = ["methods", "resolution", "NMI", "ARI", "GC"])
# scores["NMI"] = np.array(nmi_scmomat + nmi_uinmf + nmi_liger + nmi_multimap)
# scores["ARI"] = np.array(ari_scmomat + ari_uinmf + ari_liger + ari_multimap)
# scores["GC"] = np.array([gc_scmomat] * len(nmi_scmomat) + [gc_uinmf] * len(nmi_uinmf) + [gc_liger] * len(nmi_liger) +[gc_multimap] * len(ari_multimap))
# scores["resolution"] = np.array([x for x in np.arange(0.1, 10, 0.5)] * 4)
# scores["methods"] = np.array(["scMoMaT"] * len(nmi_scmomat) + ["UINMF"] * len(nmi_uinmf) + ["LIGER"] * len(nmi_liger) + ["MultiMap"] * len(ari_multimap))
# NO LIGER
scores = pd.DataFrame(columns = ["methods", "resolution", "NMI", "ARI", "GC", "LTA", "F1"])
scores["NMI"] = np.array(nmi_scmomat + nmi_uinmf + nmi_multimap + nmi_stabmap)
scores["ARI"] = np.array(ari_scmomat + ari_uinmf + ari_multimap + ari_stabmap)
scores["F1"] = np.array(f1_scmomat + f1_uinmf + f1_multimap + f1_stabmap)
scores["GC"] = np.array([gc_scmomat] * len(nmi_scmomat) + [gc_uinmf] * len(nmi_uinmf) + [gc_multimap] * len(ari_multimap) + [gc_stabmap] * len(ari_stabmap))
scores["LTA"] = np.array([lta_scmomat] * len(nmi_scmomat) + [lta_uinmf] * len(nmi_uinmf) + [lta_multimap] * len(ari_multimap) + [lta_stabmap] * len(ari_stabmap))
scores["resolution"] = np.array([x for x in np.arange(0.1, 10, 0.5)] * 4)
scores["methods"] = np.array(["scMoMaT"] * len(nmi_scmomat) + ["UINMF"] * len(nmi_uinmf) + ["MultiMap"] * len(ari_multimap) + ["Stabmap"] * len(ari_stabmap))
scores.to_csv(result_dir + "/score.csv")
# In[]
if True:
nmi_scmomat = []
ari_scmomat = []
gc_scmomat = []
lta_scmomat = []
f1_scmomat = []
nmi_uinmf = []
ari_uinmf = []
gc_uinmf = []
lta_uinmf = []
f1_uinmf = []
nmi_liger = []
ari_liger = []
gc_liger = []
lta_liger = []
f1_liger = []
nmi_multimap = []
ari_multimap = []
gc_multimap = []
lta_multimap = []
f1_multimap = []
nmi_stabmap = []
ari_stabmap = []
gc_stabmap = []
lta_stabmap = []
f1_stabmap = []
# for seed in [1,2,3,4,9]:
# ARI higher: 2, 3, 9
for seed in [1,2,3,4,5,6,7,9]:
result_dir = f'simulated/6b16c_test_{seed}/imbalanced/'
scores = pd.read_csv(result_dir + "score.csv", index_col = 0)
scores_scmomat = scores[scores["methods"] == "scMoMaT"]
scores_uinmf = scores[scores["methods"] == "UINMF"]
# scores_liger = scores[scores["methods"] == "LIGER"]
scores_multimap = scores[scores["methods"] == "MultiMap"]
scores_stabmap = scores[scores["methods"] == "Stabmap"]
nmi_scmomat.append(np.max(scores_scmomat["NMI"].values))
ari_scmomat.append(np.max(scores_scmomat["ARI"].values))
gc_scmomat.append(np.max(scores_scmomat["GC"].values))
lta_scmomat.append(np.max(scores_scmomat["LTA"].values))
f1_scmomat.append(np.max(scores_scmomat["F1"].values))
nmi_uinmf.append(np.max(scores_uinmf["NMI"].values))
ari_uinmf.append(np.max(scores_uinmf["ARI"].values))
gc_uinmf.append(np.max(scores_uinmf["GC"].values))
lta_uinmf.append(np.max(scores_uinmf["LTA"].values))
f1_uinmf.append(np.max(scores_uinmf["F1"].values))
# nmi_liger.append(np.max(scores_liger["NMI"].values))
# ari_liger.append(np.max(scores_liger["ARI"].values))
# gc_liger.append(np.max(scores_liger["GC"].values))
# lta_liger.append(np.max(scores_liger["LTA"].values))
nmi_multimap.append(np.max(scores_multimap["NMI"].values))
ari_multimap.append(np.max(scores_multimap["ARI"].values))
gc_multimap.append(np.max(scores_multimap["GC"].values))
lta_multimap.append(np.max(scores_multimap["LTA"].values))
f1_multimap.append(np.max(scores_multimap["F1"].values))
nmi_stabmap.append(np.max(scores_stabmap["NMI"].values))
ari_stabmap.append(np.max(scores_stabmap["ARI"].values))
gc_stabmap.append(np.max(scores_stabmap["GC"].values))
lta_stabmap.append(np.max(scores_stabmap["LTA"].values))
f1_stabmap.append(np.max(scores_stabmap["F1"].values))
new_score = pd.DataFrame()
new_score["method"] = ["scMoMaT"] * len(ari_scmomat) + ["MultiMap"] * len(ari_multimap) + ["UINMF"] * len(ari_uinmf) + ["Stabmap"] * len(ari_stabmap)
new_score["ARI"] = ari_scmomat + ari_multimap + ari_uinmf + ari_stabmap
new_score["NMI"] = nmi_scmomat + nmi_multimap + nmi_uinmf + nmi_stabmap
new_score["GC"] = gc_scmomat + gc_multimap + gc_uinmf + gc_stabmap
new_score["LTA"] = lta_scmomat + lta_multimap + lta_uinmf + lta_stabmap
new_score["F1"] = f1_scmomat + f1_multimap + f1_uinmf + f1_stabmap
import seaborn as sns
plt.rcParams["font.size"] = 20
fig = plt.figure(figsize = (32, 5))
ax = fig.subplots(nrows = 1, ncols = 5)
sns.boxplot(data = new_score, x = "method", y = "GC", ax = ax[0])
sns.stripplot(data = new_score, x = "method", y = "GC", ax = ax[0], color = "black")
sns.boxplot(data = new_score, x = "method", y = "ARI", ax = ax[1])
sns.stripplot(data = new_score, x = "method", y = "ARI", ax = ax[1], color = "black")
sns.boxplot(data = new_score, x = "method", y = "NMI", ax = ax[2])
sns.stripplot(data = new_score, x = "method", y = "NMI", ax = ax[2], color = "black")
sns.boxplot(data = new_score, x = "method", y = "LTA", ax = ax[3])
sns.stripplot(data = new_score, x = "method", y = "LTA", ax = ax[3], color = "black")
sns.boxplot(data = new_score, x = "method", y = "F1", ax = ax[4])
sns.stripplot(data = new_score, x = "method", y = "F1", ax = ax[4], color = "black")
ax[0].set_title("Graph connectivity")
ax[1].set_title("ARI")
ax[2].set_title("NMI")
ax[3].set_title("Lable Transfer Accuracy")
ax[4].set_title("Rare cell type detection")
fig.tight_layout()
fig.savefig("simulated/scores_imbalanced.png", bbox_inches = "tight")
# %%
| null |
test/test_simulated_imbalanced.py
|
test_simulated_imbalanced.py
|
py
| 34,257 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.TruncatedSVD",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "scmomat.utils.preprocess",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "scmomat.utils.preprocess",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "umap.UMAP",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.post_process",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.leiden_cluster",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "scmomat.umap_batch.UMAP",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "scmomat.umap_batch",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "umap.UMAP",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.load_npz",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "umap.UMAP",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "scmomat.utils.plot_latent_ext",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.graph_connectivity",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.graph_connectivity",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.load_npz",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "numpy.zeros_like",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.graph_connectivity",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.graph_connectivity",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.graph_connectivity",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.leiden_cluster",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "scmomat.bmk.nmi",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 433,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.ari",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.F1_score",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.leiden_cluster",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.nmi",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.ari",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.F1_score",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.load_npz",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.leiden_cluster",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "scmomat.bmk.nmi",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.ari",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.F1_score",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "scmomat.utils.leiden_cluster",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "scmomat.utils",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.nmi",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.ari",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.F1_score",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 508,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 518,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "numpy.bool",
"line_number": 519,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.transfer_accuracy",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "scmomat.bmk.transfer_accuracy",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 533,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.load_npz",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "numpy.zeros_like",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.transfer_accuracy",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "scmomat.bmk.transfer_accuracy",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk.transfer_accuracy",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "scmomat.bmk",
"line_number": 548,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 611,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 618,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 638,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 641,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 657,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 657,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "seaborn.boxplot",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 665,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 668,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 669,
"usage_type": "call"
}
] |
571621997
|
from django import forms
from django.conf import settings
from .models import Event
from .exceptions import EventStartDateTimeException
import datetime
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = [
'title',
'place',
'description',
'type',
]
widgets = {
'place': forms.TextInput(attrs={"type": "hidden"}),
'type': forms.NumberInput(attrs={
'min': settings.TYPE_CLOSE_EVENT,
'max': settings.TYPE_OPEN_EVENT,
'default': settings.TYPE_CLOSE_EVENT}),
'title': forms.TextInput()
}
labels = {
'place': 'Search place in map',
'type': "Select type to event"
}
start_date_time = forms.DateTimeField(input_formats=[settings.DATETIME_FORMAT],
label='start',
widget=forms.DateTimeInput(attrs={'class': "form-control"})
)
def is_valid(self):
super(EventForm, self).is_valid()
return self.is_valid_start_dtime()
def is_valid_start_dtime(self):
dt_event = self.cleaned_data.get('start_date_time')
print(self.cleaned_data)
if not dt_event:
return False
event_info = dt_event.astimezone().tzinfo
now = datetime.datetime.now(tz=event_info)
if now > dt_event:
raise EventStartDateTimeException("Event cannot be earlier than current date.")
return True
def save(self, commit=True):
event = Event()
event.create(
self.cleaned_data.get('title', ''),
self.cleaned_data.get('description', ''),
self.cleaned_data.get('place', ''),
self.cleaned_data.get('type', 0),
self.cleaned_data.get('start_date_time')
)
if commit:
event.save()
return event
| null |
events/forms.py
|
forms.py
|
py
| 2,018 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Event",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.forms.NumberInput",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.TYPE_CLOSE_EVENT",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.TYPE_OPEN_EVENT",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.TYPE_CLOSE_EVENT",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.forms.DateTimeField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATETIME_FORMAT",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.forms.DateTimeInput",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "exceptions.EventStartDateTimeException",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Event",
"line_number": 53,
"usage_type": "call"
}
] |
444671274
|
import re
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.html import strip_tags
from django import forms
from sidrun.models import Tag
class CustomSelectMultipleTags(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.name
class AddTaskForm(forms.ModelForm):
tags = CustomSelectMultipleTags(widget=forms.CheckboxSelectMultiple, queryset=Tag.objects.all())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(AddTaskForm, self).__init__(*args, **kwargs)
def clean(self):
data = super(AddTaskForm, self).clean()
deadline = data.get('deadline')
time_to_complete_task = data.get('time_to_complete_task')
try:
hours_between_dates = (deadline - timezone.now()).total_seconds() / 3600
except TypeError:
return data
if time_to_complete_task > hours_between_dates:
raise ValidationError("Hours to complete task has to fit between now and deadline!")
return data
def clean_deadline(self):
deadline = self.cleaned_data.get("deadline")
if deadline and deadline < timezone.now():
raise ValidationError("Please enter a deadline that is not in the past!")
return deadline
def save(self, commit=True):
instance = super(AddTaskForm, self).save(commit=False)
if '_publish' in self.request.POST:
instance.start_date = timezone.now()
if commit:
instance.save()
return instance
class CustomForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(CustomForm, self).__init__(*args, **kwargs)
self.regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def need_to_validate(self):
return '_preview' in self.request.POST
def clean_body(self):
body = self.data.get("body") or ''
if self.need_to_validate():
min_body_length = 280
body_length = len(strip_tags(body))
if body_length < min_body_length:
raise ValidationError(
"Body length needs to be at least %d characters. You have %d." % (min_body_length, body_length))
return body
def clean_summary_pitch(self):
summary_pitch = self.data.get("summary_pitch") or ''
if self.need_to_validate():
min_summary_length = 140
summary_length = len(strip_tags(summary_pitch))
if summary_length < min_summary_length:
raise ValidationError("Summary pitch length needs to be at least %d characters. You have %d." % (
min_summary_length, summary_length))
return summary_pitch
def clean_conclusion(self):
conclusion = self.data.get("conclusion") or ''
if self.need_to_validate():
min_conclusion_length = 140
conclusion_length = len(strip_tags(conclusion))
if conclusion_length < min_conclusion_length:
raise ValidationError("Conclusion length needs to be at least %d characters. You have %d." % (
min_conclusion_length, conclusion_length))
return conclusion
def clean_references(self):
references = self.data.get("references")
if self.need_to_validate() and self.instance.task.require_references:
references_prepared_for_validation = re.findall(r'href=[\'"]?([^\'" >]+)', references)
if not references_prepared_for_validation:
raise ValidationError("There needs to be at least one url address in references. Please use the link icon to add one!")
validation_errors = []
for reference in references_prepared_for_validation:
if not self.regex.search(force_text(reference)):
validation_errors.append(ValidationError("'%s' is not valid url address." % reference))
if validation_errors:
raise ValidationError(validation_errors)
return references
def clean_videos(self):
videos = self.data.get("videos")
if self.need_to_validate() and self.instance.task.require_videos:
video_urls_prepared_for_validation = re.findall(r'href=[\'"]?([^\'" >]+)', videos)
if not video_urls_prepared_for_validation:
raise ValidationError("There needs to be at least one url address in videos. Please use the link icon to add one!")
validation_errors = []
for video in video_urls_prepared_for_validation:
if not self.regex.search(force_text(video)):
validation_errors.append(ValidationError("'%s' is not a valid url address." % video))
if validation_errors:
raise ValidationError(validation_errors)
return videos
| null |
sidrun/forms.py
|
forms.py
|
py
| 5,397 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.ModelMultipleChoiceField",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.forms.CheckboxSelectMultiple",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sidrun.models.Tag.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sidrun.models.Tag.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sidrun.models.Tag",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "django.utils.html.strip_tags",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.utils.html.strip_tags",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.utils.html.strip_tags",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.utils.encoding.force_text",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.utils.encoding.force_text",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 122,
"usage_type": "call"
}
] |
412240504
|
from time import strftime
from baobab.lims.config import VOLUME_UNITS
from bika.lims import PMF
from bika.lims import logger
from bika.lims.browser.bika_listing import WorkflowAction
from bika.lims.workflow import doActionFor
from Products.Archetypes.exceptions import ReferenceException
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.CMFCore.utils import getToolByName
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTPServerDisconnected, SMTPRecipientsRefused
import plone
import transaction
class BiospecimenWorkflowAction(WorkflowAction):
def __call__(self):
form = self.request.form
plone.protect.CheckAuthenticator(form)
action, _ = WorkflowAction._get_form_workflow_action(self)
if type(action) in (list, tuple):
action = action[0]
# Call out to the workflow action method
method_name = 'workflow_action_' + action
method = getattr(self, method_name, False)
if method:
method()
else:
WorkflowAction.__call__(self)
def workflow_action_receive(self):
form = self.request.form
# print form
selected_biospecimens = WorkflowAction._get_selected_items(self)
biospecimens = []
for uid in selected_biospecimens.keys():
if not form['Volume'][0][uid] or \
not form['Unit'][0][uid]: # or \
# not form['SubjectID'][0][uid]:
continue
try:
obj = selected_biospecimens.get(uid, None)
if 'Volume' in form and form['Volume'][0][uid]:
obj.getField('Volume').set(obj, form['Volume'][0][uid])
if 'SubjectID' in form and form['SubjectID'][0][uid]:
obj.getField('SubjectID').set(obj, form['SubjectID'][0][uid])
if 'Urgent' in form and form['Urgent'][0][uid]:
obj.getField('Urgent').set(obj, form['Urgent'][0][uid])
if 'SamplingDate' in form and form['SamplingDate'][0][uid]:
obj.getField('SamplingDate').set(obj, form['SamplingDate'][0][uid])
# unit = 'ml'
# for u in VOLUME_UNITS:
# if u['ResultValue'] == form['Unit'][0][uid]:
# unit = u['ResultText']
if 'Unit' in form and form['Unit'][0][uid]:
obj.getField('Unit').set(obj, form['Unit'][0][uid])
if 'CountryOfOrigin' in form and form['CountryOfOrigin'][0][uid]:
obj.getField('CountryOfOrigin').set(obj, form['CountryOfOrigin'][0][uid])
if 'SiteCentreOfOrigin' in form and form['SiteCentreOfOrigin'][0][uid]:
obj.getField('SiteCentreOfOrigin').set(obj, form['SiteCentreOfOrigin'][0][uid])
location = obj.getStorageLocation()
if location:
doActionFor(location, 'occupy')
obj.reindexObject()
biospecimens.append(obj)
except ReferenceException:
continue
message = PMF("Changes saved.")
self.context.plone_utils.addPortalMessage(message, 'info')
if biospecimens:
samples_received = biospecimens[:]
for biospecimen in biospecimens:
biospecimen.from_grid = True
doActionFor(biospecimen, 'receive')
for partition in biospecimen.objectValues('SamplePartition'):
doActionFor(partition, 'receive')
self.send_sample_status_update_emails(samples_received, 'receive')
# raise Exception('samples received')
self.destination_url = self.context.absolute_url()
if form['portal_type'] == 'Kit' or \
form['portal_type'] == 'SampleBatch':
self.destination_url = form['view_url']
self.destination_url += '/biospecimens'
self.request.response.redirect(self.destination_url)
def send_sample_status_update_emails(self, biospecimens, samples_status):
projects = {}
for biospecimen in biospecimens:
print(biospecimen)
project = biospecimen.aq_parent
if project.Title() in projects:
projects[project.Title()].append(biospecimen)
else:
projects[project.Title()] = [biospecimen]
for project, samples in projects.iteritems():
sample_parent = None
text_samples = []
for sample in samples:
if not sample_parent:
sample_parent = sample.aq_parent
barcode = sample.getField('Barcode').get(sample)
volume = sample.getField('Volume').get(sample) + ' ' + sample.getField('Unit').get(sample)
text_samples.append('Barcode: %s: - Volume: %s' % (barcode, volume))
if samples_status == 'receive':
subject = 'Samples received: '
elif samples_status == 'due':
subject = 'Samples due: '
subject += 'Total %s - Project %s' % (str(len(text_samples)), sample_parent.Title())
if samples_status == 'receive':
email_message = 'The following samples were received.\n\n'
elif samples_status == 'due':
email_message = 'The following samples are due.\n\n'
else:
raise Exception('Sample new status cannot be %s' %samples_status)
email_message += '\n'.join(text_samples)
client = sample_parent.getClient()
sender = client.EmailAddress
receiver = sender
self.send_email(sender, receiver, subject, email_message)
lab_contacts = sample_parent.getLabContacts()
for contact in lab_contacts:
self.send_email(sender, contact.EmailAddress, subject, email_message)
def send_email(self, sender, receiver, subject, email_message):
mime_msg = MIMEMultipart('related')
mime_msg['Subject'] = subject
mime_msg['From'] = sender
mime_msg['To'] = receiver
msg_txt = MIMEText(email_message, 'plain')
mime_msg.attach(msg_txt)
try:
host = getToolByName(self, 'MailHost')
host.send(mime_msg.as_string(), immediate=True)
except SMTPServerDisconnected as msg:
logger.warn("SMTPServerDisconnected: %s." % msg)
except SMTPRecipientsRefused as msg:
raise WorkflowException(str(msg))
except Exception as e:
logger.warn('Receive sample email exception: %s' %str(e))
def workflow_action_sample_due(self):
form = self.request.form
selected_biospecimens = WorkflowAction._get_selected_items(self)
biospecimens = []
for uid in selected_biospecimens.keys():
if not form['Barcode'][0][uid] or \
not form['Type'][0][uid]:
continue
try:
obj = selected_biospecimens.get(uid, None)
if 'SamplingDate' in form and form['SamplingDate'][0][uid]:
obj.getField('SamplingDate').set(obj, form['SamplingDate'][0][uid])
obj.getField('Barcode').set(obj, form['Barcode'][0][uid])
obj.getField('SampleType').set(obj, form['Type'][0][uid])
obj.setId(form['Barcode'][0][uid])
if 'CountryOfOrigin' in form and form['CountryOfOrigin'][0][uid]:
obj.getField('CountryOfOrigin').set(obj, form['CountryOfOrigin'][0][uid])
if 'SiteCentreOfOrigin' in form and form['SiteCentreOfOrigin'][0][uid]:
obj.getField('SiteCentreOfOrigin').set(obj, form['SiteCentreOfOrigin'][0][uid])
obj.edit(SampleID=obj.getId())
obj.reindexObject()
biospecimens.append(obj)
except ReferenceException:
continue
message = PMF("Changes saved.")
self.context.plone_utils.addPortalMessage(message, 'info')
if biospecimens:
samples_due = biospecimens[:]
for biospecimen in biospecimens:
doActionFor(biospecimen, 'sample_due')
for partition in biospecimen.objectValues('SamplePartition'):
doActionFor(partition, 'sample_due')
self.send_sample_status_update_emails(samples_due, 'due')
self.destination_url = self.context.absolute_url()
if form['portal_type'] == 'Kit' or \
form['portal_type'] == 'SampleBatch':
self.destination_url = form['view_url']
self.destination_url += '/biospecimens'
self.request.response.redirect(self.destination_url)
def workflow_action_dispose(self):
form = self.request.form
selected_biospecimens = WorkflowAction._get_selected_items(self)
biospecimens = []
for uid in selected_biospecimens.keys():
try:
obj = selected_biospecimens.get(uid, None)
if 'Rebleed' in form and form['Rebleed'][0][uid]:
obj.getField('Rebleed').set(obj, form['Rebleed'][0][uid])
else:
obj.getField('Rebleed').set(obj, 'No')
obj.edit(SampleID=obj.getId())
obj.reindexObject()
biospecimens.append(obj)
except ReferenceException:
continue
message = PMF("Changes saved.")
self.context.plone_utils.addPortalMessage(message, 'info')
for biospecimen in biospecimens:
doActionFor(biospecimen, 'dispose')
for partition in biospecimen.objectValues('SamplePartition'):
doActionFor(partition, 'dispose')
self.destination_url = self.context.absolute_url()
if form['portal_type'] == 'Kit' or \
form['portal_type'] == 'SampleBatch':
self.destination_url = form['view_url']
self.destination_url += '/biospecimens'
self.request.response.redirect(self.destination_url)
def workflow_action_return(self):
form = self.request.form
selected_biospecimens = WorkflowAction._get_selected_items(self)
biospecimens = []
for uid in selected_biospecimens.keys():
try:
obj = selected_biospecimens.get(uid, None)
if 'Rebleed' in form and form['Rebleed'][0][uid]:
obj.getField('Rebleed').set(obj, form['Rebleed'][0][uid])
else:
obj.getField('Rebleed').set(obj, 'No')
obj.edit(SampleID=obj.getId())
obj.reindexObject()
biospecimens.append(obj)
except ReferenceException:
continue
message = PMF("Changes saved.")
self.context.plone_utils.addPortalMessage(message, 'info')
for biospecimen in biospecimens:
doActionFor(biospecimen, 'return')
for partition in biospecimen.objectValues('SamplePartition'):
doActionFor(partition, 'return')
self.destination_url = self.context.absolute_url()
if form['portal_type'] == 'Kit' or \
form['portal_type'] == 'SampleBatch':
self.destination_url = form['view_url']
self.destination_url += '/biospecimens'
self.request.response.redirect(self.destination_url)
def workflow_action_delete(self):
form = self.request.form
selected_biospecimens = WorkflowAction._get_selected_items(self)
for uid in selected_biospecimens.keys():
try:
# raise Exception('Test exception')
obj = selected_biospecimens.get(uid, None)
obj.aq_parent.manage_delObjects([obj.getId()])
transaction.commit()
self.context.plone_utils.addPortalMessage('Deleted sample: %s' %obj.Title(), 'info')
except Exception as e:
self.context.plone_utils.addPortalMessage(str(e), 'error')
continue
# message = PMF("Changes saved.")
# self.context.plone_utils.addPortalMessage(message, 'info')
self.destination_url = self.context.absolute_url()
if form['portal_type'] == 'Kit' or \
form['portal_type'] == 'SampleBatch':
self.destination_url = form['view_url']
self.destination_url += '/biospecimens/folder_view?list_review_state=returned_disposed&list_sort_on=sortable_title'
self.request.response.redirect(self.destination_url)
| null |
baobab/lims/browser/biospecimen/workflow.py
|
workflow.py
|
py
| 12,711 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "plone.protect.CheckAuthenticator",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "plone.protect",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_form_workflow_action",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction.__call__",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_selected_items",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "Products.Archetypes.exceptions.ReferenceException",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "bika.lims.PMF",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "Products.CMFCore.utils.getToolByName",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTPServerDisconnected",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "bika.lims.logger.warn",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "bika.lims.logger",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "smtplib.SMTPRecipientsRefused",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "Products.CMFCore.WorkflowCore.WorkflowException",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "bika.lims.logger.warn",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "bika.lims.logger",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_selected_items",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.exceptions.ReferenceException",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "bika.lims.PMF",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_selected_items",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.exceptions.ReferenceException",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "bika.lims.PMF",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_selected_items",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.exceptions.ReferenceException",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "bika.lims.PMF",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "bika.lims.workflow.doActionFor",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction._get_selected_items",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "bika.lims.browser.bika_listing.WorkflowAction",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "transaction.commit",
"line_number": 289,
"usage_type": "call"
}
] |
158398789
|
#!/usr/bin/env python
# encoding: utf-8
from random import choice
import string
import logging
from termcolor import colored
import os, sys
class ColorLogFiler(logging.StreamHandler):
""" Override logging class to enable terminal colors """
def emit(self, record):
try:
msg = self.format(record)
msg = msg.replace("[+]",colored("[+]", "green"))
msg = msg.replace("[-]",colored("[-]", "green"))
msg = msg.replace("[!]",colored("[!]", "red"))
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
def randomAlpha(length):
""" Returns a random alphabetic string of length 'length' """
key = ''
for i in range(length): # @UnusedVariable
key += choice(string.ascii_lowercase)
return key
def getRunningApp():
if getattr(sys, 'frozen', False):
return sys.executable
else:
return os.path.abspath(__file__)
class MSTypes():
XL="Excel"
XL97="Excel97"
WD="Word"
WD97="Word97"
PPT="PowerPoint"
PPT97="PowerPoint97"
PUB="Publisher"
VBA="VBA"
UNKNOWN = "Unknown"
@classmethod
def guessApplicationType(self, documentPath):
""" Guess MS office application type based on extension """
result = ""
extension = os.path.splitext(documentPath)[1]
if ".xls" == extension:
result = self.XL97
elif ".xlsx" == extension or extension == ".xlsm":
result = self.XL
elif ".doc" == extension:
result = self.WD97
elif ".docx" == extension or extension == ".docm":
result = self.WD
elif ".ppt" == extension:
result = self.PPT97
elif ".pptm" == extension or extension == ".pptx":
result = self.PPT
elif ".pub" == extension:
result = self.PUB
elif ".vba" == extension:
result = self.VBA
else:
result = self.UNKNOWN
return result
| null |
src/common/utils.py
|
utils.py
|
py
| 2,133 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.StreamHandler",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "termcolor.colored",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.executable",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
}
] |
45273461
|
import sys
import math
import random
import maya.OpenMaya as om
import maya.OpenMayaMPx as omMPx
import maya.mel as mel
kPluginNodeTypeName = "MASH_Delay"
mashDelayId = om.MTypeId(0x0011BE07)
class mashDelay( omMPx.MPxNode ):
inputs = om.MObject()
outputArray = om.MObject()
#AETemplate embedded in the .py file, get me.
mel.eval('''
global proc AEMASH_DelayTemplate( string $nodeName )
{
editorTemplate -callCustom "headerIcons_MASH" "headerIconsEdit_MASH" "ae_MASH_Delay" "Delay";
editorTemplate -beginScrollLayout;
editorTemplate -beginLayout "MASH Delay" -collapse 0;
editorTemplate -label "Time Step" -addControl "timeStep";
editorTemplate -label "Time Variance" -addControl "timeVariance";
editorTemplate -label "Time Offset" -addControl "offset";
editorTemplate -beginLayout "Inputs" -collapse 1;
editorTemplate -ccu "createInputs_MASH" "editInputs_MASH" "inputs" "Leader Object";
editorTemplate -callCustom "AEaddLeaderButtons" "AEaddLeaderButtonsEdit" "";
//editorTemplate -label "Inputs" -addControl "inputs";
editorTemplate -label "Time" -addControl "time";
editorTemplate -suppress "inIterations";
editorTemplate -endLayout;
editorTemplate -endLayout;
AEdependNodeTemplate $nodeName;
editorTemplate -addExtraControls;
editorTemplate -endScrollLayout;
}
global proc AEaddLeaderButtons ( string $attr )
{
string $nodeName[];
tokenize($attr, ".", $nodeName);
button -label "Connect Leader" -c ("delayButtonCMDS " + $nodeName[0] + " 1")
connLeadMASHButton;
separator -w 100 -h 5 -hr 1 -st "none";
}
global proc AEaddLeaderButtonsEdit ( string $attr )
{
string $nodeName[];
tokenize($attr, ".", $nodeName);
button -e -c ("distButtonCMDS " + $nodeName[0] + " 1")
connLeadMASHButton;
}
global proc delayButtonCMDS (string $nodeName, int $whichCMD)
{
if ($whichCMD == 1)
{
string $obj[] = `ls -sl -tr`;
if (size($obj) > 0) {
connectAttr -force ($obj[0]+".translate") ($nodeName+".inputs");
print "Translate connected.";
}
else {
warning "Please select a translate.";
}
}
evalDeferred("updateAE " + $nodeName);
}
''')
def __init__( self ):
omMPx.MPxNode.__init__( self )
def compute(self, plug, dataBlock):
if plug != mashDelay.outputArray:
return om.kUnknownParameter
outResultArray = om.MVectorArray()
outResultArray.clear()
#Get input array
inputArray_dataHandle = dataBlock.inputValue(mashDelay.inputArray)
#Get output array
outputArray_dataHandle = dataBlock.outputValue(mashDelay.outputArray)
numberOfCalcs = dataBlock.inputValue(mashDelay.inIterations).asInt()
timeStepValue = dataBlock.inputValue(mashDelay.timeStep).asFloat()
timeVarianceValue = dataBlock.inputValue(mashDelay.timeVar).asFloat()
timeOffsetValue = dataBlock.inputValue(mashDelay.timeOffset).asFloat()
inputValues = dataBlock.inputValue(mashDelay.inputs).asFloat3()
currentTime = dataBlock.inputValue(self.aTime).asTime()
time = currentTime.value()
#Organise the input array
inDArray = om.MVectorArray()
inDArray.clear()
inDArrData = dataBlock.inputValue(self.inputArray).data()
inDArrFn = om.MFnVectorArrayData(inDArrData)
inDArray = inDArrFn.array()
testRead = om.MVector(0.0,0.0,0.0)
#Set the outputArray
outDData = dataBlock.outputValue(mashDelay.outputArray)
nData = om.MFnVectorArrayData()
#Get the time
currentTime = dataBlock.inputValue(self.aTime).asTime()
#are there more items in the input array then calculations from the waiter? - This almost certainly means echo mode, so compensate for that.
if inDArray.length() > numberOfCalcs:
numberOfCalcs = inDArray.length()
for i in range(numberOfCalcs):
if (inDArray.length() > 0):
testRead = om.MVector(0.0,0.0,0.0)
testRead = inDArray[i]
# Get the element index
index = i
random.seed(i)
variation = random.uniform(0.0,timeVarianceValue)
stepValue = (timeStepValue/numberOfCalcs)*i
calcTime = (time-variation)-stepValue-timeOffsetValue
#get the results at the previous time
ctx = om.MDGContext(om.MTime(calcTime))
thisNode = self.thisMObject()
fnThisNode = om.MFnDependencyNode ( thisNode )
plugX = fnThisNode.findPlug( 'inputs0' )
plugY = fnThisNode.findPlug( 'inputs1' )
plugZ = fnThisNode.findPlug( 'inputs2' )
outX = plugX.asFloat(ctx)
outY = plugY.asFloat(ctx)
outZ = plugZ.asFloat(ctx)
inNormalArray = om.MFloatArray()
inNormalArray.clear()
#if there is an input array, add those values
if (inDArray.length() > 0):
resultX = testRead.x+outX
resultY = testRead.y+outY
resultZ = testRead.z+outZ
#
else:
resultX = outX
resultY = outY
resultZ = outZ
#add the results to the out array
thisResult = om.MVector(resultX,resultY,resultZ)
outResultArray.append(thisResult)
#set the output
nDataObj = nData.create(outResultArray)
outDData.setMObject(nDataObj)
dataBlock.setClean(plug)
def nodeCreator():
return omMPx.asMPxPtr( mashDelay() )
def nodeInitializer():
defaultVectorArray = om.MVectorArray();
dArrayDataFn = om.MFnVectorArrayData();
dArrayDataFn.create( defaultVectorArray );
tAttr = om.MFnTypedAttribute()
mashDelay.inputArray = tAttr.create("inputArray", "inArray", om.MFnData.kVectorArray, dArrayDataFn.object())
tAttr.setWritable(1)
tAttr.setStorable(0)
tAttr.setReadable(1)
tAttr.setKeyable(0)
tAttr.setHidden(1)
mashDelay.addAttribute( mashDelay.inputArray)
nAttr = om.MFnNumericAttribute()
mashDelay.inIterations = nAttr.create ( "inIterations", "cal", om.MFnNumericData.kInt, 0 )
nAttr.setStorable(0)
nAttr.setKeyable(0)
tAttr.setHidden(1)
mashDelay.addAttribute ( mashDelay.inIterations )
tAttr = om.MFnTypedAttribute()
mashDelay.outputArray = tAttr.create("outputArray", "outArray", om.MFnData.kVectorArray, dArrayDataFn.object())
tAttr.setWritable(1)
tAttr.setStorable(0)
tAttr.setKeyable(False)
tAttr.setReadable(True)
tAttr.setHidden(1)
tAttr.setUsesArrayDataBuilder(True)
mashDelay.addAttribute( mashDelay.outputArray )
nAttr = om.MFnNumericAttribute()
mashDelay.timeStep = nAttr.create ( "timeStep", "ts", om.MFnNumericData.kFloat, 10 )
nAttr.setHidden(0)
nAttr.setSoftMax(10)
nAttr.setMin(0)
nAttr.setStorable(1)
mashDelay.addAttribute ( mashDelay.timeStep )
nAttr = om.MFnNumericAttribute()
mashDelay.timeOffset = nAttr.create ( "offset", "off", om.MFnNumericData.kFloat, 0 )
nAttr.setHidden(0)
nAttr.setSoftMin(0)
nAttr.setSoftMax(10)
nAttr.setStorable(1)
mashDelay.addAttribute ( mashDelay.timeOffset )
nAttr = om.MFnNumericAttribute()
mashDelay.timeVar = nAttr.create ( "timeVariance", "tva", om.MFnNumericData.kFloat, 20 )
nAttr.setHidden(0)
nAttr.setSoftMax(50)
nAttr.setMin(0)
nAttr.setStorable(1)
mashDelay.addAttribute ( mashDelay.timeVar )
nAttr = om.MFnNumericAttribute()
mashDelay.inputs = nAttr.create ( "inputs", "in", om.MFnNumericData.k3Float, 0 )
nAttr.setHidden(0)
nAttr.setStorable(1)
mashDelay.addAttribute ( mashDelay.inputs )
uAttr = om.MFnUnitAttribute()
mashDelay.aTime = uAttr.create("time", "ti", om.MFnUnitAttribute.kTime, 0.0)
mashDelay.addAttribute(mashDelay.aTime)
mashDelay.attributeAffects(mashDelay.timeOffset, mashDelay.outputArray)
mashDelay.attributeAffects(mashDelay.aTime, mashDelay.outputArray)
mashDelay.attributeAffects( mashDelay.timeVar, mashDelay.outputArray)
mashDelay.attributeAffects( mashDelay.timeStep, mashDelay.outputArray)
mashDelay.attributeAffects( mashDelay.inputs, mashDelay.outputArray)
mashDelay.attributeAffects( mashDelay.inputArray, mashDelay.outputArray)
def initializePlugin(mobject):
mplugin = omMPx.MFnPlugin(mobject, "Ian_Waters", "1.0", "Any")
try:
mplugin.registerNode( kPluginNodeTypeName, mashDelayId, nodeCreator, nodeInitializer )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeTypeName )
raise
def uninitializePlugin(mobject):
mplugin = omMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( mashDelayId )
except:
sys.stderr.write( "Failed to deregister node: %s" % PluginNodeTypeName )
| null |
BDmaya/plugins/win/2013/MASH_Delay.py
|
MASH_Delay.py
|
py
| 9,638 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "maya.OpenMaya.MTypeId",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "maya.OpenMayaMPx.MPxNode",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMayaMPx",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MObject",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MObject",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "maya.mel.eval",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "maya.mel",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "maya.OpenMayaMPx.MPxNode.__init__",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "maya.OpenMayaMPx.MPxNode",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMayaMPx",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.kUnknownParameter",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVectorArray",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVectorArray",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnVectorArrayData",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVector",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnVectorArrayData",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVector",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "random.seed",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya.MDGContext",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MTime",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya.MFnDependencyNode",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFloatArray",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVector",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "maya.OpenMayaMPx.asMPxPtr",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "maya.OpenMayaMPx",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MVectorArray",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnVectorArrayData",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnTypedAttribute",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnData",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericAttribute",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericData",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnTypedAttribute",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnData",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericAttribute",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericData",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericAttribute",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericData",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericAttribute",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericData",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericAttribute",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnNumericData",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnUnitAttribute",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "maya.OpenMaya",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "maya.OpenMaya.MFnUnitAttribute",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMaya",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "maya.OpenMayaMPx.MFnPlugin",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "maya.OpenMayaMPx",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "maya.OpenMayaMPx.MFnPlugin",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "maya.OpenMayaMPx",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 286,
"usage_type": "attribute"
}
] |
457877611
|
from django.urls import path
from . import views
app_name = 'requirements'
urlpatterns = [
path("", views.view_all_groups, name="all_groups"),
path("<int:group_id>/", views.view_group, name="one_group"),
path("reqs/<int:requirement_id>/", views.view_requirement, name="one_requirement"),
]
| null |
requirements/urls.py
|
urls.py
|
py
| 306 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
}
] |
204269597
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from db import Base, ToDoItem
engine = create_engine("sqlite:///tasks.db", echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
s = Session()
for desc in ("прочитать книгу", "выучить django", "помыть посуду","поесть"):
t = ToDoItem(desc)
s.add(t)
s.commit()
| null |
init_db.py
|
init_db.py
|
py
| 414 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "db.Base.metadata.create_all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "db.Base.metadata",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "db.Base",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "db.ToDoItem",
"line_number": 13,
"usage_type": "call"
}
] |
142317483
|
import cfnresponse
import json
import boto3
import time
import sys
responseStr = {'Status' : {}}
def getRouteTableID(PrimarySubnetId,SecondarySubnetId,vpcId,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
response = ec2.describe_route_tables(
Filters=[{'Name': 'association.subnet-id','Values': [PrimarySubnetId]}]
)
if len(response['RouteTables']) == 0:
response = ec2.describe_route_tables(Filters=[{'Name': 'vpc-id', 'Values': [vpcId]},{'Name': 'association.main', 'Values': ['true',]}])
PrimaryRouteTableID=response['RouteTables'][0]['Associations'][0]['RouteTableId']
response = ec2.describe_route_tables(
Filters=[{'Name': 'association.subnet-id','Values': [SecondarySubnetId]}]
)
if len(response['RouteTables']) == 0:
response = ec2.describe_route_tables(Filters=[{'Name': 'vpc-id', 'Values': [vpcId]},{'Name': 'association.main', 'Values': ['true',]}])
SecondaryRouteTableID=response['RouteTables'][0]['Associations'][0]['RouteTableId']
if PrimaryRouteTableID == SecondaryRouteTableID :
return PrimaryRouteTableID
else:
return 0
def updateRouteTable(HANAPrimaryInstanceID,HANAVirtualIP,RTabId,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
response=ec2.create_route(
RouteTableId=RTabId,
DestinationCidrBlock=HANAVirtualIP+'/32',
InstanceId=HANAPrimaryInstanceID
)
return 1
def deleteVirtualIPRoute(HANAVirtualIP,RTabId,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
response=ec2.delete_route(
DestinationCidrBlock=HANAVirtualIP+'/32',
RouteTableId=RTabId
)
def executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion):
session = boto3.Session()
ssmClient = session.client('ssm', region_name=AWSRegion)
ssmCommand = ssmClient.send_command(
InstanceIds=InstanceIDArray,
DocumentName='AWS-RunShellScript',
TimeoutSeconds=30,
Comment=CommentStr,
Parameters={
'commands': CommandArray
}
)
L_SSMCommandID = ssmCommand['Command']['CommandId']
status = 'Pending'
while status == 'Pending' or status == 'InProgress':
status = (ssmClient.list_commands(CommandId=L_SSMCommandID))['Commands'][0]['Status']
time.sleep(3)
if (status == "Success"):
#response = ssmClient.list_command_invocations(CommandId=L_SSMCommandID,InstanceId=InstanceIDArray[0],Details=True)
return 1
else:
return 0
def setupAWSConfigProfile(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
CommandArray = []
CommandArray.append('mkdir /root/.aws')
CommandArray.append('echo "[default]" > /root/.aws/config')
CommandArray.append('echo "region = '+AWSRegion+'" >> /root/.aws/config')
CommandArray.append('echo "[profile cluster]" >> /root/.aws/config')
CommandArray.append('echo "region = '+AWSRegion+'" >> /root/.aws/config')
CommandArray.append('echo "output = text" >> /root/.aws/config')
CommandArray.append('chmod 600 /root/.aws/config')
CommentStr = 'AWS cofig file on Primary & Secondary'
InstanceIDArray =[HANAPrimaryInstanceID,HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def disableSourceDestinationCheck(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
ec2.modify_instance_attribute(SourceDestCheck={'Value': False}, InstanceId=HANAPrimaryInstanceID)
ec2.modify_instance_attribute(SourceDestCheck={'Value': False}, InstanceId=HANASecondaryInstanceID)
return verifySourceDestinationCheck(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
def verifySourceDestinationCheck(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
retPri=ec2.describe_instance_attribute(Attribute='sourceDestCheck', InstanceId=HANAPrimaryInstanceID)
if (retPri['SourceDestCheck']['Value'] == False):
retSec=ec2.describe_instance_attribute(Attribute='sourceDestCheck', InstanceId=HANASecondaryInstanceID)
if (retSec['SourceDestCheck']['Value'] == False):
return 1
else:
return 0
else:
return 0
def createPacemakerTag(HANAPrimaryInstanceID,HANASecondaryInstanceID,PaceMakerTag,HANAPrimaryHostname,HANASecondaryHostname,hanaSID,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
ec2.create_tags(Resources=[HANAPrimaryInstanceID],Tags=[{'Key': PaceMakerTag,'Value': HANAPrimaryHostname}])
ec2.create_tags(Resources=[HANAPrimaryInstanceID],Tags=[{'Key': 'Name','Value': 'HANA - ' + hanaSID +' - Primary'}])
ec2.create_tags(Resources=[HANASecondaryInstanceID],Tags=[{'Key': PaceMakerTag,'Value': HANASecondaryHostname}])
ec2.create_tags(Resources=[HANASecondaryInstanceID],Tags=[{'Key': 'Name','Value': 'HANA - ' + hanaSID +' - Secondary'}])
return verifyPackemakerTag(HANAPrimaryInstanceID,HANASecondaryInstanceID,PaceMakerTag,HANAPrimaryHostname,HANASecondaryHostname,hanaSID,AWSRegion)
def verifyPackemakerTag(HANAPrimaryInstanceID,HANASecondaryInstanceID,PaceMakerTag,HANAPrimaryHostname,HANASecondaryHostname,hanaSID,AWSRegion):
session = boto3.Session()
ec2 = session.client('ec2', region_name=AWSRegion)
instDetail = ec2.describe_tags(Filters=[{'Name': 'tag:'+PaceMakerTag,'Values': [HANAPrimaryHostname,HANASecondaryHostname]}])
count = 0
for idx, tag in enumerate(instDetail['Tags']):
if (tag['ResourceId'] == HANAPrimaryInstanceID or tag['ResourceId'] == HANASecondaryInstanceID):
count = count + 1
if (count == 2):
return 1
else:
return 0
def installRsyslog(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
CommandArray = []
# SLES12 SP4 & SLES 15 do not have aws-vpc-move-ip installed by default
CommandArray.append('zypper install -y aws-vpc-move-ip')
CommandArray.append('zypper install -y rsyslog')
CommentStr = 'Install rsyslog'
InstanceIDArray =[HANAPrimaryInstanceID,HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def copySSFSFilesFromPrimaryToS3(HANAPrimaryInstanceID,TempS3Bucket,hanaSID,AWSRegion):
CommandArray = []
CommandArray.append('aws s3 cp /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/data/SSFS_'+hanaSID+'.DAT '+TempS3Bucket)
CommandArray.append('aws s3 cp /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/key/SSFS_'+hanaSID+'.KEY '+TempS3Bucket)
CommentStr = 'Copy SSFS from Primary to TempBucket'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def copySSFSFilesFromS3ToSecondary(HANASecondaryInstanceID,TempS3Bucket,hanaSID,AWSRegion):
CommandArray = []
CommandArray.append('su - '+hanaSID.lower()+'adm -c "HDB stop"')
CommandArray.append('mv /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/data/SSFS_'+hanaSID+'.DAT /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/data/SSFS_'+hanaSID+'.DAT.BAK')
CommandArray.append('mv /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/key/SSFS_'+hanaSID+'.KEY /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/key/SSFS_'+hanaSID+'.KEY.BAK')
CommandArray.append('aws s3 cp '+TempS3Bucket+'SSFS_'+hanaSID+'.DAT /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/data/SSFS_'+hanaSID+'.DAT')
CommandArray.append('aws s3 cp '+TempS3Bucket+'SSFS_'+hanaSID+'.KEY /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/key/SSFS_'+hanaSID+'.KEY')
CommandArray.append('chown '+hanaSID.lower()+'adm:sapsys /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/data/SSFS_'+hanaSID+'.DAT')
CommandArray.append('chown '+hanaSID.lower()+'adm:sapsys /usr/sap/'+hanaSID+'/SYS/global/security/rsecssfs/key/SSFS_'+hanaSID+'.KEY')
CommentStr = 'Copy SSFS from TempBucket to Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def disableHANAAutoStartSecondary(HANASecondaryInstanceID,HANASecondaryHostname,hanaSID,hanaInstanceNo,AWSRegion):
CommandArray = []
CommandArray.append("sed -i 's,^\(Autostart[ ]*=\).*,\1'Autostart=0',g' /usr/sap/"+hanaSID.upper()+"/SYS/profile/"+hanaSID.upper()+"_HDB"+hanaInstanceNo+"_"+HANASecondaryHostname)
CommentStr = 'Disable HANA AutoStart on Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def disableHANAAutoStartPrimary(HANAPrimaryInstanceID,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion):
CommandArray = []
CommandArray.append("sed -i 's,^\(Autostart[ ]*=\).*,\1'Autostart=0',g' /usr/sap/"+hanaSID.upper()+"/SYS/profile/"+hanaSID.upper()+"_HDB"+hanaInstanceNo+"_"+HANAPrimaryHostname)
CommentStr = 'Disable HANA AutoStart on Primary'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def updateHostFileSecondary(HANASecondaryInstanceID,HANAPrimaryHostname,HANAPrimaryIPAddress,domainName,AWSRegion):
CommandArray = []
CommandArray.append('echo "'+HANAPrimaryIPAddress+' '+HANAPrimaryHostname+'.'+domainName+' '+HANAPrimaryHostname+'" >> /etc/hosts')
CommentStr = 'Update Host File on Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def updateHostFilePrimary(HANAPrimaryInstanceID,HANASecondaryHostname,HANASecondaryIPAddress,domainName,AWSRegion):
CommandArray = []
CommandArray.append('echo "'+HANASecondaryIPAddress+' '+HANASecondaryHostname+'.'+domainName+' '+HANASecondaryHostname+'" >> /etc/hosts')
CommentStr = 'Update Host File on Primary'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def updatePreserveHostName(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
CommandArray = []
CommandArray.append("sed -i 's,^\(preserve_hostname[ ]*:\).*,\1'preserve_hostname:\ true',g' /etc/cloud/cloud.cfg")
CommentStr = 'Update Preserve Hostname in cloud.cfg on Primary & Secondary'
InstanceIDArray =[HANAPrimaryInstanceID,HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def updateDefaultTasksMax(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion):
#https://www.novell.com/support/kb/doc.php?id=7018594
CommandArray = []
CommandArray.append('sed -i".bak" "/\bDefaultTasksMax\b/d" /etc/systemd/system.conf')
CommandArray.append('echo -e "DefaultTasksMax=8192">> /etc/systemd/system.conf')
CommandArray.append('systemctl daemon-reload')
CommentStr = 'Update DefaultTasksMax on Primary & Secondary'
InstanceIDArray =[HANAPrimaryInstanceID,HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def CompleteCoroSyncSetup(HANAPrimaryInstanceID,RTabId,HANAVirtualIP,hanaSID,hanaInstanceNo,PaceMakerTag,AWSRegion):
CommandArray = []
CommandArray.append('mkdir /root/ClusterSetup')
CommandArray.append('echo "primitive res_AWS_STONITH stonith:external/ec2 \\\\" > /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "op start interval=0 timeout=180 \\\\" >> /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "op stop interval=0 timeout=180 \\\\" >> /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "op monitor interval=120 timeout=60 \\\\" >> /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "meta target-role=Started \\\\" >> /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "params tag='+PaceMakerTag+' profile=cluster" >> /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/aws-stonith.txt')
CommandArray.append('echo "primitive res_AWS_IP ocf:suse:aws-vpc-move-ip \\\\" > /root/ClusterSetup/aws-ip-move.txt')
#changed address to ip as address has been deprecated in lastest version (also added zypper install aws-vpc-move-ip so that latest version of agents is installed)
CommandArray.append('echo "params ip='+HANAVirtualIP+' routing_table='+RTabId+' interface=eth0 profile=cluster \\\\" >> /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('echo "op start interval=0 timeout=180 \\\\" >> /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('echo "op stop interval=0 timeout=180 \\\\" >> /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('echo "op monitor interval=60 timeout=60 \\\\" >> /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('echo "meta target-role=Started" >> /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/aws-ip-move.txt')
CommandArray.append('echo "property \$id=cib-bootstrap-options \\\\" > /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo " stonith-enabled=true \\\\" >> /root/ClusterSetup/crm-bs.txt')
#Changed poweroff to off as poweroff has been deprecated
CommandArray.append('echo " stonith-action=off \\\\" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "stonith-timeout=150s" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "rsc_defaults \$id=rsc-options \\\\" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "resource-stickiness=1000 \\\\" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "migration-threshold=5000" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "op_defaults \$id=op-options \\\\" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "timeout=600" >> /root/ClusterSetup/crm-bs.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/crm-bs.txt')
CommandArray.append('echo "primitive rsc_SAPHanaTopology_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' ocf:suse:SAPHanaTopology \\\\" > /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "operations \$id=rsc_sap2_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+'-operations \\\\" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "op monitor interval=10 timeout=300 \\\\" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "op start interval=0 timeout=300 \\\\" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "op stop interval=0 timeout=300 \\\\" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "params SID='+hanaSID.upper()+' InstanceNumber='+hanaInstanceNo+'" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "clone cln_SAPHanaTopology_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' rsc_SAPHanaTopology_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' \\\\" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "meta clone-node-max=1 interleave=true" >> /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/crm-hana-topology.txt')
CommandArray.append('echo "primitive rsc_SAPHana_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' ocf:suse:SAPHana \\\\" > /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "operations \$id=rsc_sap_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+'-operations \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "op start interval=0 timeout=3600 \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "op stop interval=0 timeout=3600 \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "op promote interval=0 timeout=3600 \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "op monitor interval=60 role=Master timeout=700 \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "op monitor interval=61 role=Slave timeout=700 \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "params SID='+hanaSID.upper()+' InstanceNumber='+hanaInstanceNo+' PREFER_SITE_TAKEOVER=true \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "ms msl_SAPHana_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' rsc_SAPHana_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' \\\\" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "meta clone-max=2 clone-node-max=1 interleave=true" >> /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/crm-saphana.txt')
CommandArray.append('echo "colocation col_IP_Primary 2000: res_AWS_IP:Started msl_SAPHana_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+':Master" > /root/ClusterSetup/aws-constraint.txt')
CommandArray.append('echo "order ord_SAPHana 2000: cln_SAPHanaTopology_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+' msl_SAPHana_'+hanaSID.upper()+'_HDB'+hanaInstanceNo+'" >> /root/ClusterSetup/aws-constraint.txt')
CommandArray.append('crm configure load update /root/ClusterSetup/aws-constraint.txt')
CommentStr = 'corosycn setup for SAP HANA'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def StartPaceMaker(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAMasterPass,AWSRegion):
CommandArray=[]
CommandArray.append('systemctl start pacemaker')
CommandArray.append('chkconfig pacemaker on')
CommandArray.append('systemctl start hawk')
CommandArray.append('chkconfig hawk on')
CommandArray.append('echo "hacluster:'+HANAMasterPass+'" | chpasswd')
CommentStr = 'Start Pacemaker on Primary and configure for autostart with OS'
InstanceIDArray =[HANAPrimaryInstanceID]
if ( executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion) == 1 ):
CommentStr = 'Start Pacemaker on Secondary and configure for autostart with OS'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
else:
return 0
def createCoroSyncConfig(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANASecondaryIPAddress,HANAPrimaryIPAddress,HANAPrimaryCorosync2ndIP,HANASecondaryCorosync2ndIP,AWSRegion):
CommandArray = []
CommandArray.append('echo "# Please read the corosync.conf.5 manual page" > /etc/corosync/corosync.conf')
CommandArray.append('echo "totem {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " version: 2" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " token: 30000" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " consensus: 36000" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " token_retransmits_before_loss_const: 6" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " crypto_cipher: none" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " crypto_hash: none" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " clear_node_high_bit: yes" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " rrp_mode: passive" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " " >> /etc/corosync/corosync.conf')
CommandArray.append('echo " interface {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ringnumber: 0" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " bindnetaddr: '+HANAPrimaryIPAddress+'" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " mcastport: 5405" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ttl: 1" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " }" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " transport: udpu" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "}" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "logging {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " fileline: off" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " to_logfile: yes" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " to_syslog: yes" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " logfile: /var/log/cluster/corosync.log" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " debug: off" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " timestamp: on" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " logger_subsys {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " subsys: QUORUM" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " debug: off" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " }" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "}" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "nodelist {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " node {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ring0_addr: '+HANAPrimaryIPAddress+'" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ring1_addr: '+HANAPrimaryCorosync2ndIP+'" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " nodeid: 1" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " }" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " node {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ring0_addr: '+HANASecondaryIPAddress+'" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " ring1_addr: '+HANASecondaryCorosync2ndIP+'" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " nodeid: 2" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " }" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "}" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " " >> /etc/corosync/corosync.conf')
CommandArray.append('echo " quorum {" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " # Enable and configure quorum subsystem (default: off)" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " # see also corosync.conf.5 and votequorum.5" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " provider: corosync_votequorum" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " expected_votes: 2" >> /etc/corosync/corosync.conf')
CommandArray.append('echo " two_node: 1" >> /etc/corosync/corosync.conf')
CommandArray.append('echo "}" >> /etc/corosync/corosync.conf')
CommandArray.append('chown root:root /etc/corosync/corosync.conf')
CommandArray.append('chmod 400 /etc/corosync/corosync.conf')
CommentStr = 'CoroSync cofigfile on Primary'
InstanceIDArray =[HANAPrimaryInstanceID]
if ( executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion) == 1 ):
CommandArray[13]=None
CommandArray[13]='echo " bindnetaddr: '+HANASecondaryIPAddress+'" >> /etc/corosync/corosync.conf'
CommentStr = 'CoroSync cofigfile on Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
else:
return 0
def setupCoroSyncKeyPrimary(HANAPrimaryInstanceID,HANASecondaryInstanceID,TempS3Bucket,AWSRegion):
CommandArray = []
CommandArray.append('corosync-keygen')
CommandArray.append('aws s3 cp /etc/corosync/authkey '+TempS3Bucket+'authkey')
CommentStr = 'CoroSync Key Generate On Primary'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def copyCoroSyncKeyToSecondary(HANAPrimaryInstanceID,HANASecondaryInstanceID,TempS3Bucket,AWSRegion):
CommandArray = []
CommandArray.append('aws s3 cp '+TempS3Bucket+'authkey '+'/etc/corosync/authkey')
CommandArray.append('chown root:root /etc/corosync/authkey')
CommandArray.append('chmod 400 /etc/corosync/authkey')
CommentStr = 'CoroSync Key Copy On Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def setupHSRPrimary(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimarySite,HANASecondarySite,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion):
CommandArray = []
CommandArray.append('su - '+hanaSID.lower()+'adm -c "hdbnsutil -sr_enable --name='+HANAPrimarySite+'"')
CommentStr = 'Enable HSR on Primary'
InstanceIDArray =[HANAPrimaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def setupHSRSecondary(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimarySite,HANASecondarySite,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion):
CommandArray = []
CommandArray.append('su - '+hanaSID.lower()+'adm -c "HDB stop"')
CommandArray.append('su - '+hanaSID.lower()+'adm -c "hdbnsutil -sr_register --name='+HANASecondarySite+' --remoteHost='+HANAPrimaryHostname+' --remoteInstance='+hanaInstanceNo+' --replicationMode=sync --operationMode=logreplay"')
CommentStr = 'Enable HSR on Secondary'
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def manageRetValue(retValue,FuncName,input, context):
global responseStr
if (retValue == 1):
responseStr['Status'][FuncName] = "Success"
else:
responseStr['Status'][FuncName] = "Failed"
cfnresponse.send(input, context, cfnresponse.FAILED, {'Status':json.dumps(responseStr)})
sys.exit(0)
def setupSUSESAPHanaHook(HANAPrimaryInstanceID,HANASecondaryInstanceID,hanaSID,sidadm,AWSRegion):
CommandArray = []
CommandArray.append('echo " " >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "[ha_dr_provider_SAPHanaSR]" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "provider = SAPHanaSR" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "path = /usr/share/SAPHanaSR" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "execution_order = 1" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo " " >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "[trace]" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "ha_dr_saphanasr = info" >> /hana/shared/'+hanaSID.upper()+'/global/hdb/custom/config/global.ini')
CommandArray.append('echo "'+sidadm+' ALL=(ALL) NOPASSWD: /usr/sbin/crm_attribute -n hana_'+hanaSID.lower()+'_site_srHook_*" >> /etc/sudoers')
CommentStr = 'Enable SAP HANA Hook'
InstanceIDArray =[HANAPrimaryInstanceID]
if ( executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion) == 1 ):
InstanceIDArray =[HANASecondaryInstanceID]
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
else:
return 0
def RHELStartPCSService(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAMasterPass,AWSRegion):
CommandArray = []
CommandArray.append('[ ! -e /usr/bin/aws ] && ln -s /usr/local/bin/aws /usr/bin/aws')
CommandArray.append('yum install -y pcs pacemaker fence-agents-aws aws-vpc-move-ip')
CommandArray.append('yum install -y resource-agents-sap-hana resource-agents')
CommandArray.append('mkdir -p /var/log/pcsd')
CommandArray.append('mkdir -p /var/log/cluster')
CommandArray.append('mkdir -p /var/log/sa')
CommandArray.append('systemctl start pcsd.service')
CommandArray.append('systemctl enable pcsd.service')
CommandArray.append('echo "hacluster:'+HANAMasterPass+'" | chpasswd')
InstanceIDArray =[HANAPrimaryInstanceID,HANASecondaryInstanceID]
CommentStr = 'Setup user hacluster and PCSD Service'
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def RHELSetupHANACluster(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimaryHostname,HANASecondaryHostname,HANAMasterPass,AWSRegion,hanaSID,hanaInstanceNo,HANAVirtualIP,RTabId):
CommandArray = []
CommandArray.append('pcs cluster auth '+HANAPrimaryHostname+' '+HANASecondaryHostname+' -u hacluster -p '+HANAMasterPass)
CommandArray.append('pcs cluster setup --name hanacluster '+HANAPrimaryHostname+' '+HANASecondaryHostname)
CommandArray.append('pcs cluster enable --all')
CommandArray.append('pcs cluster start --all')
CommandArray.append('pcs stonith create clusterfence fence_aws region='+AWSRegion+' pcmk_host_map="'+HANAPrimaryHostname+':'+HANAPrimaryInstanceID+';'+HANASecondaryHostname+':'+HANASecondaryInstanceID+'" power_timeout=240 pcmk_reboot_timeout=480 pcmk_reboot_retries=4')
#Removed resource-stickiness & migration-threshold based on recommendations from Red Hat
#CommandArray.append('pcs resource defaults resource-stickiness=1000')
#CommandArray.append('pcs resource defaults migration-threshold=5000')
CommandArray.append('pcs resource create SAPHanaTopology_'+hanaSID+'_'+hanaInstanceNo+' SAPHanaTopology SID='+hanaSID+' InstanceNumber='+hanaInstanceNo+' op start timeout=600 op stop timeout=300 op monitor interval=10 timeout=600 --clone clone-max=2 clone-node-max=1 interleave=true')
CommandArray.append('pcs resource create SAPHana_'+hanaSID+'_'+hanaInstanceNo+' SAPHana SID='+hanaSID+' InstanceNumber='+hanaInstanceNo+' PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true op start timeout=3600 op stop timeout=3600 op monitor interval=61 role="Slave" timeout=700 op monitor interval=59 role="Master" timeout=700 op promote timeout=3600 op demote timeout=3600 master meta notify=true clone-max=2 clone-node-max=1 interleave=true')
CommandArray.append('pcs resource create SAPHana_'+hanaSID+'_OIP aws-vpc-move-ip ip='+HANAVirtualIP+' interface=eth0 routing_table='+RTabId)
CommandArray.append('pcs constraint order SAPHanaTopology_'+hanaSID+'_'+hanaInstanceNo+'-clone then SAPHana_'+hanaSID+'_'+hanaInstanceNo+'-master symmetrical=false')
CommandArray.append('pcs constraint colocation add SAPHana_'+hanaSID+'_OIP with master SAPHana_'+hanaSID+'_'+hanaInstanceNo+'-master 2000')
InstanceIDArray =[HANAPrimaryInstanceID]
CommentStr = 'Setup HANA Cluster Config'
return executeSSMCommands(CommandArray,InstanceIDArray,CommentStr,AWSRegion)
def lambda_handler(input, context):
global responseStr
try:
if (input['RequestType'] == "Update") or (input['RequestType'] == "Create"):
HANAPrimaryInstanceID = input['ResourceProperties']['PrimaryInstanceId']
HANASecondaryInstanceID = input['ResourceProperties']['SecondaryInstanceId']
HANAPrimaryHostname = input['ResourceProperties']['PrimaryHostName']
HANASecondaryHostname = input['ResourceProperties']['SecondaryHostName']
PaceMakerTag = input['ResourceProperties']['PaceMakerTag']
AWSRegion = input['ResourceProperties']['AWSRegion']
HANAVirtualIP = input['ResourceProperties']['VirtualIP']
PrimarySubnetId = input['ResourceProperties']['PrimarySubnetId']
SecondarySubnetId = input['ResourceProperties']['SecondarySubnetId']
hanaSID = input['ResourceProperties']['SID']
hanaInstanceNo = input['ResourceProperties']['InstanceNo']
HANAMasterPass = input['ResourceProperties']['HANAMasterPass']
TempS3Bucket = input['ResourceProperties']['TempS3Bucket']
HANAPrimaryIPAddress = input['ResourceProperties']['HANAPrimaryIPAddress']
HANASecondaryIPAddress = input['ResourceProperties']['HANASecondaryIPAddress']
domainName = input['ResourceProperties']['domainName']
HANAPrimarySite = input['ResourceProperties']['PrimaryHANASite']
HANASecondarySite = input['ResourceProperties']['SecondaryHANASite']
VPCID=input['ResourceProperties']['VPCID']
MyOS = input['ResourceProperties']['MyOS']
MyOS = MyOS.upper()
HANAPrimaryCorosync2ndIP = input['ResourceProperties']['HANAPrimaryCorosync2ndIP']
HANASecondaryCorosync2ndIP = input['ResourceProperties']['HANASecondaryCorosync2ndIP']
sidadm = hanaSID.lower()+"adm"
retValue = setupAWSConfigProfile(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
manageRetValue(retValue,"setupAWSConfigProfile",input, context)
retValue = createPacemakerTag(HANAPrimaryInstanceID,HANASecondaryInstanceID,PaceMakerTag,HANAPrimaryHostname,HANASecondaryHostname,hanaSID,AWSRegion)
manageRetValue(retValue,"createPacemakerTag",input, context)
retValue = disableSourceDestinationCheck(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
manageRetValue(retValue,"disableSourceDestinationCheck",input, context)
RTabId = getRouteTableID(PrimarySubnetId,SecondarySubnetId,VPCID,AWSRegion)
updateRouteTable(HANAPrimaryInstanceID,HANAVirtualIP,RTabId,AWSRegion)
manageRetValue(retValue,"getRouteTableID",input, context)
if 'SUSE' in MyOS :
retValue = installRsyslog(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
responseStr["Status"]["installRsyslog"] = "Success"
retValue = copySSFSFilesFromPrimaryToS3(HANAPrimaryInstanceID,TempS3Bucket,hanaSID,AWSRegion)
manageRetValue(retValue,"copySSFSFilesFromPrimaryToS3",input, context)
retValue = copySSFSFilesFromS3ToSecondary(HANASecondaryInstanceID,TempS3Bucket,hanaSID,AWSRegion)
manageRetValue(retValue,"copySSFSFilesFromS3ToSecondary",input, context)
retValue = disableHANAAutoStartSecondary(HANASecondaryInstanceID,HANASecondaryHostname,hanaSID,hanaInstanceNo,AWSRegion)
manageRetValue(retValue,"disableHANAAutoStartSecondary",input, context)
retValue = disableHANAAutoStartPrimary(HANAPrimaryInstanceID,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion)
manageRetValue(retValue,"disableHANAAutoStartPrimary",input, context)
retValue = updateHostFileSecondary(HANASecondaryInstanceID,HANAPrimaryHostname,HANAPrimaryIPAddress,domainName,AWSRegion)
manageRetValue(retValue,"updateHostFileSecondary",input, context)
retValue = updateHostFilePrimary(HANAPrimaryInstanceID,HANASecondaryHostname,HANASecondaryIPAddress,domainName,AWSRegion)
manageRetValue(retValue,"updateHostFilePrimary",input, context)
retValue = updatePreserveHostName(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
manageRetValue(retValue,"updatePreserveHostName",input, context)
if 'SUSE' in MyOS :
retValue = updateDefaultTasksMax(HANAPrimaryInstanceID,HANASecondaryInstanceID,AWSRegion)
manageRetValue(retValue,"updateDefaultTasksMax",input, context)
retValue = setupHSRPrimary(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimarySite,HANASecondarySite,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion)
manageRetValue(retValue,"setupHSRPrimary",input, context)
retValue = setupHSRSecondary(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimarySite,HANASecondarySite,HANAPrimaryHostname,hanaSID,hanaInstanceNo,AWSRegion)
manageRetValue(retValue,"setupHSRSecondary",input, context)
if 'SUSE' in MyOS :
retValue = setupCoroSyncKeyPrimary(HANAPrimaryInstanceID,HANASecondaryInstanceID,TempS3Bucket,AWSRegion)
manageRetValue(retValue,"setupCoroSyncKeyPrimary",input, context)
retValue = copyCoroSyncKeyToSecondary(HANAPrimaryInstanceID,HANASecondaryInstanceID,TempS3Bucket,AWSRegion)
manageRetValue(retValue,"copyCoroSyncKeyToSecondary",input, context)
retValue = createCoroSyncConfig(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANASecondaryIPAddress,HANAPrimaryIPAddress,HANAPrimaryCorosync2ndIP,HANASecondaryCorosync2ndIP,AWSRegion)
manageRetValue(retValue,"createCoroSyncConfig",input, context)
retValue = setupSUSESAPHanaHook(HANAPrimaryInstanceID,HANASecondaryInstanceID,hanaSID,sidadm,AWSRegion)
manageRetValue(retValue,"setupSUSESAPHanaHook",input, context)
retValue = StartPaceMaker(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAMasterPass,AWSRegion)
manageRetValue(retValue,"StartPaceMaker",input, context)
retValue = CompleteCoroSyncSetup(HANAPrimaryInstanceID,RTabId,HANAVirtualIP,hanaSID,hanaInstanceNo,PaceMakerTag,AWSRegion)
manageRetValue(retValue,"CompleteCoroSyncSetup",input, context)
else:
retValue = RHELStartPCSService(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAMasterPass,AWSRegion)
manageRetValue(retValue,"CompletePCSDServiceRHEL",input, context)
retValue = RHELSetupHANACluster(HANAPrimaryInstanceID,HANASecondaryInstanceID,HANAPrimaryHostname,HANASecondaryHostname,HANAMasterPass,AWSRegion,hanaSID,hanaInstanceNo,HANAVirtualIP,RTabId)
manageRetValue(retValue,"HANAClusterConfigRHEL",input, context)
cfnresponse.send(input, context, cfnresponse.SUCCESS, {'Status':json.dumps(responseStr)})
elif (input['RequestType'] == "Delete"):
AWSRegion = input['ResourceProperties']['AWSRegion']
HANAVirtualIP = input['ResourceProperties']['VirtualIP']
PrimarySubnetId = input['ResourceProperties']['PrimarySubnetId']
SecondarySubnetId = input['ResourceProperties']['SecondarySubnetId']
VPCID=input['ResourceProperties']['VPCID']
RTabId = getRouteTableID(PrimarySubnetId,SecondarySubnetId,VPCID,AWSRegion)
deleteVirtualIPRoute(HANAVirtualIP,RTabId,AWSRegion)
responseStr['Status'] = 'Virtual IP ' + HANAVirtualIP +'Removed From Route Table :' + RTabId
cfnresponse.send(input, context, cfnresponse.SUCCESS, {'Status':json.dumps(responseStr)})
else:
responseStr['Status'] = 'Nothing to do as Request Type is : ' + input['RequestType']
cfnresponse.send(input, context, cfnresponse.SUCCESS, {'Status':json.dumps(responseStr)})
except Exception as e:
responseStr['Status'] = str(e)
cfnresponse.send(input, context, cfnresponse.FAILED, {'Status':json.dumps(responseStr)})
| null |
scripts/HAConfig/HAConfig.py
|
HAConfig.py
|
py
| 39,780 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.Session",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cfnresponse.send",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "cfnresponse.FAILED",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "cfnresponse.send",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "cfnresponse.SUCCESS",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "cfnresponse.send",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "cfnresponse.SUCCESS",
"line_number": 558,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "cfnresponse.send",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "cfnresponse.SUCCESS",
"line_number": 561,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "cfnresponse.send",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "cfnresponse.FAILED",
"line_number": 564,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 564,
"usage_type": "call"
}
] |
131677521
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: csvmixin
# Purpose: csv mixin for dyndmod
#
# Author: jojosati
#
# Created: 05/01/2012
# Copyright: (c) jojosati 2012
# Licence: MIT
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import datetime
from dyndmod import sqla, re
from dyndmod import _unicode, attr_name_dimension
class _CSV_implement(object) :
# _Model mixin class to sunpport CSV
encoding = 'utf8' # 'cp874'
dateformat = '%Y-%m-%d'
timeformat = '%H:%M:%S'
datetimeformat = dateformat+' '+timeformat
numseperator = ','
typecasters = {
sqla.Integer :
lambda cls,v: int(float(v.replace(cls.numseperator,''))),
sqla.Float :
lambda cls,v: float(v.replace(cls.numseperator,'')),
sqla.Date :
lambda cls,v: datetime.datetime.strptime(v,cls.dateformat).date(),
sqla.DateTime :
lambda cls,v: datetime.datetime.strptime(v,cls.datetimeformat),
sqla.Time :
lambda cls,v: datetime.datetime.strptime(v,cls.timeformat).time(),
}
colmappings = {}
class _CSV_mixin(object):
import random
def csv_importer (self,csvfile,**kwargs) :
return self.csv_importer_imp(csvfile,**kwargs)
def csv_importer_imp (self,csvfile,**kwargs) :
'''
implemented method for csv_importer
with built-in transformer function
using csv_imp class to control transform data
or self's class attribute
'''
model = kwargs.get('model') or self
#csvimp = csvimp or self
csvimp = getattr(self,'_csv_implement_class_',None) or self
encoding = kwargs.pop('encoding',csvimp.encoding) # default encoding
rawmode = 0
if ',' in csvfile :
files = csvfile.split(',',1)
csvfile = files.pop(0)
if 'raw' in files :
rawmode = 1
files.remove('raw')
if 'rawonly' in files :
rawmode = 2
files.remove('rawonly')
if files :
encoding = files.pop(0)
fake = 0
while csvfile[0]=='!':
fake += 1
csvfile = csvfile[1:]
if fake :
import random
colnamed = kwargs.get('colnamed')
mappings = csvimp.colmappings
def setdata(data,k,c,v=None) :
# k use as referrence to csv column header, for error reporting
if isinstance(c,tuple) :
# if c is tuple, ignore v
for cv in c :
setdata(data,k,*cv)
return
if isinstance(c,dict):
# if c is dict, ignore v
for cv in c.iteritems() :
setdata(data,k,*cv)
return
if not v :
return
if not c : # support raw mode
if not rawmode :
return
c = k
if callable(colnamed) :
c = colnamed(c,'import')
if not c : # support raw mode
if not rawmode :
return
c = k
n,dim = attr_name_dimension(c)
t = model._fld_dbtype(n,*dim)
if rawmode and t is None :
return
cc = c
fn = csvimp.typecasters.get(t)
if fn :
try:
v = fn(csvimp,v)
except Exception as e :
errmsg = _unicode(_unicode("{0} ({1}/{2})={3!r}").format(str(e),k,c,v))
if model.debug and not rawmode :
raise type(e)(errmsg)
else:
try :
v = model.cast_result_value(t,v)
except :
model.echo_(errmsg)
if model.debug :
raise
model.echo_('suppress to None.')
v = None
if fake and isinstance(v,float):
v = ((v+5000)*2)*(random.random()+0.3)
if v is not None :
data[cc] = v
def transformer(csvdata) :
# if fake skip enable
if fake>=2 and (10 * random.random()) < 0.1 :
return
data = {}
for k,v in csvdata.iteritems() :
if rawmode>=2 :
c = None
else :
c = mappings.get(k)
if callable(c) :
c = c(k,csvdata,data)
setdata(data,k,c,v)
return data
kwargs['encoding'] = encoding
return self.csv_importer_base(csvfile,transformer=transformer,**kwargs)
def csv_importer_base(self,csvfile,transformer=None,**kwargs) :
import csv
model = kwargs.get('model') or self
newtable = kwargs.pop("newtable",False)
encoding = kwargs.pop('encoding','utf8')
limit = kwargs.pop('limit',0)
start = kwargs.pop('start',0)
encoding = encoding or 'utf8'
echo_ = model.echo_
echo_('CSV file: {0} encoding: {1}',csvfile,encoding)
if newtable :
echo_('drop all tables, before create new.')
model.metadata.drop_all()
model.create_all()
session = model.session
csvcnt = 0
rowcnt = 0
errcnt = 0
with open(csvfile) as f:
cf = csv.DictReader(f, delimiter=',')
for data in cf:
csvcnt += 1
if csvcnt < start :
continue
csvdata = {}
for k,v in data.iteritems():
k,v = _unicode(k,encoding).rstrip(),_unicode(v,encoding).rstrip()
csvdata[k] = v
data = csvdata
try :
try:
if transformer :
data = transformer(data)
if isinstance(data,dict) :
data = model.mainbase(data)
if isinstance(data,model.ormbase) :
session.add(data)
if not session.new :
continue
except:
session.expunge_all()
raise
else:
try : session.commit()
except :
session.rollback()
raise
except Exception as e:
errcnt += 1
#errmsg = _unicode("{0} csvrow #{1}".format(str(e),csvcnt))
if model.debug :
for k,v in csvdata.iteritems() :
model.debug_("{0}={1}",k,v)
raise #type(e)(errmsg)
else:
echo_('{0}',e)
continue
rowcnt += 1
if rowcnt % 100 == 0 :
echo_('reading {0}, writing {1} so far...',csvcnt,rowcnt)
if rowcnt == limit : break
echo_('--- end of csv data ---')
echo_('Total read {0}, write {1}, error {2}',csvcnt,rowcnt,errcnt)
return [csvcnt,rowcnt,errcnt]
if __name__ == '__main__':
pass
| null |
csvmixin.py
|
csvmixin.py
|
py
| 7,647 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dyndmod.sqla.Integer",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.sqla",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "dyndmod.sqla.Float",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.sqla",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "dyndmod.sqla.Date",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.sqla",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "dyndmod.sqla.DateTime",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.sqla",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "dyndmod.sqla.Time",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.sqla",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "dyndmod.attr_name_dimension",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "dyndmod._unicode",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "dyndmod._unicode",
"line_number": 176,
"usage_type": "call"
}
] |
49330128
|
from django import forms
from django.core import validators
class Empform(forms.Form):
name = forms.CharField()
salary = forms.IntegerField()
opinion = forms.CharField(widget=forms.Textarea, validators=[validators.MaxLengthValidator(40), validators.MinLengthValidator(10)])
def clean(self):
print("Total form Validation")
total_cleaned_data = super().clean()
inputname = total_cleaned_data['name']
if len(inputname) < 10:
raise forms.ValidationError("Name must be min 10 chars")
inputsal = total_cleaned_data['salary']
if inputsal==0:
raise forms.ValidationError("sal must be > 0")
| null |
corevalidator2/webapp/forms.py
|
forms.py
|
py
| 673 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.Form",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.Textarea",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.core.validators.MaxLengthValidator",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.core.validators",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.core.validators.MinLengthValidator",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 18,
"usage_type": "name"
}
] |
342655923
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prs_project.settings")
import django
django.setup()
import decimal
import pandas as pd
from recommender.models import Similarity
from analytics.models import Rating
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
from datetime import datetime
class ItemSimilarityMatrixBuilder(object):
def __init__(self, min_overlap=15, min_sim=0.2):
self.min_overlap = min_overlap
self.min_sim = min_sim
def save_sparse_matrix(self, sm, index, created=datetime.now()):
start_time = datetime.now()
Similarity.objects.all().delete()
sims = []
no_saved = 0
for i in sm.itertuples():
for j in range(1, len(i)):
row = i[0]
col = sm.columns[j - 1]
sim = i[j]
if sim > self.min_sim:
if len(sims) == 1000:
Similarity.objects.bulk_create(sims)
sims = []
if row != col:
new_similarity = Similarity(
created=created,
source=row,
target=col,
similarity=decimal.Decimal(str(sim))
)
no_saved +=1
sims.append(new_similarity)
Similarity.objects.bulk_create(sims)
print('{} Similarity items saved, done in {} seconds'.format(no_saved, datetime.now() - start_time))
def build(self, ratings, save=True):
print("Calculating similarities ... using {} ratings".format(len(ratings)))
start_time = datetime.now()
ratings['avg'] = ratings.groupby('user_id')['rating'].transform(lambda x: normalize(x))
ratings['avg'] = ratings['avg'].astype(float)
print("normalized ratings.")
rp = ratings.pivot_table(index=['movie_id'], columns=['user_id'], values='avg', fill_value=0)
rp = rp.transpose()
items_to_keep = rp.astype(bool).sum(axis=0) > self.min_overlap
for i, column in zip(rp.columns, items_to_keep):
if not column:
rp.drop(i, axis=1, inplace=True)
print(
f"rating matrix (size {rp.shape[0]}x{rp.shape[1]})finished, done in {datetime.now() - start_time} seconds")
sparsity_level = 1-(ratings.shape[0] / (rp.shape[0] * rp.shape[1]))
print("sparsity level is ", sparsity_level)
start_time = datetime.now()
#cor = cosine_similarity(sparse.csr_matrix(rp.transpose()), dense_output=False)
cor = rp.corr(method='pearson', min_periods=self.min_overlap)
print('correlation is finished, done in {} seconds'.format(datetime.now() - start_time))
if save:
self.save_sparse_matrix(cor, rp.transpose().index)
return cor
def normalize(x):
x = x.astype(float)
if x.std() == 0:
return 0.0
return (x - x.mean()) / (x.max() - x.min())
def split_ratings2(min_rank=3):
print('loading ratings')
ratings = Rating.objects.all()
print('ranking ratings')
df = pd.DataFrame.from_records(ratings.values())
print(df.head())
df['rank'] = df.groupby('user_id')['rating_timestamp'].rank(ascending=False)
return df[df['rank'] <= min_rank]
def load_all_ratings():
columns = ['user_id', 'movie_id', 'rating', 'type']
ratings_data = Rating.objects.all().values(*columns)
ratings = pd.SparseDataFrame.from_records(ratings_data, columns=columns)
ratings['rating'] = ratings['rating'].astype(float)
return ratings
if __name__ == '__main__':
TEST = True
if TEST:
ratings = pd.DataFrame(
[[1, '0011', 5, '2013-10-12 23:20:27+00:00'],
[1, '12', 3, '2014-10-12 23:20:27+00:00'],
[1, '14', 2, '2015-10-12 23:20:27+00:00'],
[2, '0011', 4, '2013-10-12 23:20:27+00:00'],
[2, '12', 3, '2014-10-12 23:20:27+00:00'],
[2, '13', 4, '2015-10-12 23:20:27+00:00'],
[3, '0011', 5, '2013-10-12 23:20:27+00:00'],
[3, '12', 2, '2014-10-12 23:20:27+00:00'],
[3, '13', 5, '2015-10-12 23:20:27+00:00'],
[3, '14', 2, '2016-10-12 23:20:27+00:00'],
[4, '0011', 3, '2013-10-12 23:20:27+00:00'],
[4, '12', 5, '2014-10-12 23:20:27+00:00'],
[4, '13', 3, '2015-10-12 23:20:27+00:00'],
[5, '0011', 3, '2013-10-12 23:20:27+00:00'],
[5, '12', 3, '2014-10-12 23:20:27+00:00'],
[5, '13', 3, '2015-10-12 23:20:27+00:00'],
[5, '14', 2, '2016-10-12 23:20:27+00:00'],
[6, '0011', 2, '2013-10-12 23:20:27+00:00'],
[6, '12', 3, '2014-10-12 23:20:27+00:00'],
[6, '13', 2, '2015-10-12 23:20:27+00:00'],
[6, '14', 3, '2016-10-12 23:20:27+00:00'],
], columns=['user_id', 'movie_id', 'rating', 'rating_timestamp'])
result = ItemSimilarityMatrixBuilder(2).build(ratings)
print(result)
else:
ItemSimilarityMatrixBuilder().build(load_all_ratings())
| null |
builder/item_similarity_calculator.py
|
item_similarity_calculator.py
|
py
| 5,203 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ.setdefault",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "django.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "recommender.models.Similarity.objects.all",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "recommender.models.Similarity.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "recommender.models.Similarity",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "recommender.models.Similarity.objects.bulk_create",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "recommender.models.Similarity.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "recommender.models.Similarity",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "recommender.models.Similarity",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "recommender.models.Similarity.objects.bulk_create",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "recommender.models.Similarity.objects",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "recommender.models.Similarity",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "analytics.models.Rating.objects.all",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "analytics.models.Rating.objects",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "analytics.models.Rating",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "analytics.models.Rating.objects.all",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "analytics.models.Rating.objects",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "analytics.models.Rating",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "pandas.SparseDataFrame.from_records",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pandas.SparseDataFrame",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 125,
"usage_type": "call"
}
] |
52787123
|
#imports
import numpy as np
import string
import re
import csv
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.text import Text
from nltk.stem import WordNetLemmatizer
import copy
#imports end
##train test load
train = np.load("./data/data_train.pkl",allow_pickle=True)
test=np.load("./data/data_test.pkl",allow_pickle=True)
x_train=train[0][:60000]
y_train=train[1][:60000]
x_valid=train[0][60000:]
y_valid=train[1][60000:]
##train test load
##required classes
stop_words = set(stopwords.words('english'))
tokenizer = RegexpTokenizer(r'\w+')
wordnet_lemmatizer = WordNetLemmatizer()
SnowballStemmer = SnowballStemmer("english")
##required class end
class navieBayes():
def __init__(self,x_train,y_train):
self.x_train=x_train
self.y_train=y_train
def TrainNavie(self):
print("Started Traning")
for index,temp in enumerate(self.x_train):
self.x_train[index]=self.preProcess(temp)
self.log_prior=self.calculate_prior(self.y_train)
self.class_frequencies=self.bag_of_word(self.x_train,self.y_train)
#self.class_frequencies=self.curdownFreq(self.class_frequencies)
#print(log_prior)
#print(len(class_frequencies))
#print(class_frequencies[0].keys())
self.class_vocab,self.total_vocab=self.calculate_class_vocab(self.class_frequencies)
print(self.class_vocab)
print(self.total_vocab)
print("Train completed ")
def preProcess(self,content):
content=content.lower() #to lower case
content=re.sub(r'\d+', '', content) #remove digits
content=content.translate(str.maketrans('', '', string.punctuation))#remove puctuations
content=content.strip()#remove extra space
return content
def Tokenize(self,content):
tokens = tokenizer.tokenize(content)## remove if nltk is restricted and deelop new method
tokens = [w for w in tokens if not w in stop_words] #remove stop words
#tokens = [wordnet_lemmatizer.lemmatize(w) for w in tokens] #lemmitization
#tokens = [SnowballStemmer.stem(w) for w in tokens]
NLTKText = Text(tokens)## remove if nltk is restricted develop new method
return NLTKText.vocab()
def calculate_prior(self,y_train):
classes=np.unique(y_train,return_counts=True)
self.unique_class_Names=classes[0]
self.class_counts=classes[1]
log_prior=[]
for i in range (len(classes[0])):
#print()
log_prior.append(np.log(self.class_counts[i]/len(self.unique_class_Names)))
return log_prior
def curdownFreq(self,class_frequencies):
filtered_table=[]
for classtable in class_frequencies:
dummyclassfreq=copy.deepcopy(classtable)
for j in classtable.items():
word=j[0]
#print(j)
wordcount=j[1]
if(wordcount<2):
del dummyclassfreq[word]
filtered_table.append(dummyclassfreq)
return filtered_table
def bag_of_word(self,x_train,y_train):
class_frequencies=[]
for label in self.unique_class_Names:
label_list=np.where(np.array(y_train)==label)[0]
text=""
for i in label_list:
#x_train[i]=x_train[i]
text+=x_train[i]+"\n"
classwordsfrequencies=(self.Tokenize(text))
class_frequencies.append(classwordsfrequencies)
return class_frequencies
def calculate_class_vocab(self,class_frequencies):
vocab=set()
class_vocab=[]
#cl_fre=[]
#class_vocab_names=[]
for rowIndex,data in enumerate(class_frequencies):
class_vocab.append(sum(data.values()))
# class_vocab_names.append(set(data.keys()))
vocab=vocab.union(data.keys())
#for i in len(class_vocab_names):
# cl_fre[i]=vocab-class_vocab_names[i]
return class_vocab,len(vocab)
def predict(self,test_data):
test_data = self.preProcess(test_data)
fre=self.Tokenize(test_data)
label_score=[]
#print(self.class_frequencies[0]['enjoy'])
for i in range(len(self.unique_class_Names)):
word_label_score=[]
class_word_freq=self.class_frequencies[i]
for j in fre.items():
word=j[0]
wordcount=j[1]
class_word_occurence=0
if word in class_word_freq.keys():
class_word_occurence=class_word_freq[word]
p_i=(class_word_occurence+0.25)/(self.class_vocab[i]+(self.total_vocab*0.25))
word_score=wordcount*np.log(p_i)
word_label_score.append(word_score)
label_score.append(sum(word_label_score)+self.log_prior[i])
return label_score.index(max(label_score))
Test=navieBayes(x_train,y_train)
Test.TrainNavie()
#count=0
#for row,i in enumerate(x_train[100:500]):
# test=Test.predict(i)
# pred_label=Test.unique_class_Names[test]
# if(pred_label==y_train[100+row]):
# count+=1
#print(count)
def report_predict_test(test,filename="Submission.csv"):
print("prediction_started")
csvfile=open(filename,'w', newline='')
obj=csv.writer(csvfile)
obj.writerow(("Id","Category"))
for rowIndex,test_sample in enumerate(test):
test=Test.predict(test_sample)
print(rowIndex)
pred_class=Test.unique_class_Names[test]
obj.writerow((rowIndex,pred_class))
csvfile.close()
def validate(x_valid,y_valid):
accuracy=0
#print(len(x_valid))
for rowIndex,test_sample in enumerate(x_valid):
test=Test.predict(test_sample)
print(rowIndex)
pred_class=Test.unique_class_Names[test]
if(pred_class==y_valid[rowIndex]):
accuracy+=1
return accuracy/len(x_valid)
acc=validate(x_valid,y_valid)
print(acc)
#report_predict_test(test,"abc.csv")
#print(len(x_train))
#hyper=range(100)
#hyper/=100
#best_accuracy_h=0
#for i in hyper:
#accuracy=validate(x_valid,y_valid)
#print(accuracy)
#print(accuracy)
#print(x_train[1])
#a=Test.Tokenize(Test.preProcess(x_train[1]))
#print(a.items())
""" y_test=[]
with open('abc.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
else:
y_test.append(row[1])
line_count += 1
#print(f'Processed {line_count} lines.')
accuracy=validate(test,y_test)
print(accuracy)
"""
| null |
naviebayes.py
|
naviebayes.py
|
py
| 6,792 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "nltk.tokenize.RegexpTokenizer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nltk.stem.SnowballStemmer",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "nltk.text.Text",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 146,
"usage_type": "call"
}
] |
298405180
|
import pandas as pd
import numpy as np
import argparse
import os
"""
This script takes the individual UrbanSim input .csv files and
compiles them into an (python 2) .h5 data store object, stored
locally, and used for either estimation, simulation or both in
bayarea_urbansim UrbanSim implementation. The last simulation
step in bayarea_urbansim then converts the updated .h5 back to
individual csv's for use in ActivitySynth and elsewhere.
"""
baseyear = False
beam_bucket = 'urbansim-beam'
csv_fnames = {
'parcels': 'parcels.csv',
'buildings': 'buildings.csv',
'jobs': 'jobs.csv',
'establishments': 'establishments.csv',
'households': 'households.csv',
'persons': 'persons.csv',
'rentals': 'craigslist.csv',
'units': 'units.csv',
'mtc_skims': 'mtc_skims.csv',
'beam_skims_raw': '30.skims-smart-23April2019-baseline.csv.gz',
'zones': 'zones.csv',
# the following nodes and edges .csv's aren't used by bayarea_urbansim
# they're just being loaded here so they can be passed through to the
# output data directory for use in activitysynth
'drive_nodes': 'bay_area_tertiary_strongly_nodes.{0}',
'drive_edges': 'bay_area_tertiary_strongly_edges.{0}',
'walk_nodes': 'bayarea_walk_nodes.{0}',
'walk_edges': 'bayarea_walk_edges.{0}',
}
data_store_fname = 'baus_model_data.h5'
nodes_and_edges = False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make H5 store from csvs.')
parser.add_argument(
'--baseyear', '-b', action='store_true',
help='specify the simulation year')
parser.add_argument(
'--input-data-dir', '-i', action='store', dest='input_data_dir',
help='full (pandas-compatible) path to input data directory',
required=True)
parser.add_argument(
'--output-data-dir', '-o', action='store', dest='output_data_dir',
help='full path to the LOCAL output data directory',
required=True)
parser.add_argument(
'--output-fname', '-f', action='store', dest='output_fname',
help='filename of the .h5 datastore')
parser.add_argument(
'--nodes-and-edges', '-n', action='store_true', dest='nodes_and_edges')
options = parser.parse_args()
if options.baseyear:
baseyear = options.baseyear
if options.nodes_and_edges:
nodes_and_edges = options.nodes_and_edges
if options.output_fname:
data_store_fname = options.output_fname
input_data_dir = options.input_data_dir
output_data_dir = options.output_data_dir
try:
parcels = pd.read_csv(
input_data_dir + csv_fnames['parcels'], index_col='parcel_id',
dtype={'parcel_id': int, 'block_id': str, 'apn': str})
except ValueError:
parcels = pd.read_csv(
input_data_dir + csv_fnames['parcels'], index_col='primary_id',
dtype={'primary_id': int, 'block_id': str, 'apn': str})
buildings = pd.read_csv(
input_data_dir + csv_fnames['buildings'], index_col='building_id',
dtype={'building_id': int, 'parcel_id': int})
buildings['res_sqft_per_unit'] = buildings[
'residential_sqft'] / buildings['residential_units']
buildings['res_sqft_per_unit'][
buildings['res_sqft_per_unit'] == np.inf] = 0
# building_types = pd.read_csv(
# d + 'building_types.csv',
# index_col='building_type_id', dtype={'building_type_id': int})
# building_types.head()
try:
rentals = pd.read_csv(
input_data_dir + csv_fnames['rentals'],
index_col='pid', dtype={
'pid': int, 'date': str, 'region': str,
'neighborhood': str, 'rent': float, 'sqft': float,
'rent_sqft': float, 'longitude': float,
'latitude': float, 'county': str, 'fips_block': str,
'state': str, 'bathrooms': str})
except ValueError:
rentals = pd.read_csv(
input_data_dir + csv_fnames['rentals'],
index_col=0, dtype={
'date': str, 'region': str,
'neighborhood': str, 'rent': float, 'sqft': float,
'rent_sqft': float, 'longitude': float,
'latitude': float, 'county': str, 'fips_block': str,
'state': str, 'bathrooms': str})
units = pd.read_csv(
input_data_dir + csv_fnames['units'], index_col='unit_id',
dtype={'unit_id': int, 'building_id': int})
try:
households = pd.read_csv(
input_data_dir + csv_fnames['households'],
index_col='household_id', dtype={
'household_id': int, 'block_group_id': str, 'state': str,
'county': str, 'tract': str, 'block_group': str,
'building_id': int, 'unit_id': int, 'persons': float})
except ValueError:
households = pd.read_csv(
input_data_dir + csv_fnames['households'],
index_col=0, dtype={
'household_id': int, 'block_group_id': str, 'state': str,
'county': str, 'tract': str, 'block_group': str,
'building_id': int, 'unit_id': int, 'persons': float})
households.index.name = 'household_id'
try:
persons = pd.read_csv(
input_data_dir + csv_fnames['persons'], index_col='person_id',
dtype={'person_id': int, 'household_id': int})
except ValueError:
persons = pd.read_csv(
input_data_dir + csv_fnames['persons'], index_col=0,
dtype={'person_id': int, 'household_id': int})
persons.index.name = 'person_id'
try:
jobs = pd.read_csv(
input_data_dir + csv_fnames['jobs'], index_col='job_id',
dtype={'job_id': int, 'building_id': int})
except ValueError:
jobs = pd.read_csv(
input_data_dir + csv_fnames['jobs'], index_col=0,
dtype={'job_id': int, 'building_id': int})
jobs.index.name = 'job_id'
establishments = pd.read_csv(
input_data_dir + csv_fnames['establishments'],
index_col='establishment_id', dtype={
'establishment_id': int, 'building_id': int,
'primary_id': int})
zones = pd.read_csv(
input_data_dir + 'zones.csv', index_col='zone_id')
mtc_skims = pd.read_csv(
input_data_dir + csv_fnames['mtc_skims'], index_col=0)
beam_skims_raw = pd.read_csv(
input_data_dir + csv_fnames['beam_skims_raw'])
beam_skims_raw.rename(columns={
'generalizedCost': 'gen_cost', 'origTaz': 'from_zone_id',
'destTaz': 'to_zone_id'}, inplace=True)
# this data store is just a temp file that only needs to exist
# while the simulation is running. data is stored as csv's
# before and afterwards. therefore a temporary, relative filepath
# is specified here.
output_filepath = os.path.join(output_data_dir, data_store_fname)
if os.path.exists(output_filepath):
os.remove(output_filepath)
print('Deleting existing data store to create the new one...')
store = pd.HDFStore(output_filepath)
store.put('parcels', parcels, format='t')
store.put('units', units, format='t')
store.put('rentals', rentals, format='t')
# data pre-processing hasn't yet taken place if
# starting with base-year input data
if baseyear:
store.put('households', households, format='t')
store.put('jobs', jobs, format='t')
store.put('buildings', buildings, format='t')
# if starting from non-base-year (i.e. intra-simulation) data
# then the pre-processing data steps should have already
# occurred and we simply rename the main data tables so that
# bayarea_urbansim doesn't try to re-pre-process them
else:
store.put('households_preproc', households, format='t')
store.put('jobs_preproc', jobs, format='t')
store.put('buildings_preproc', buildings, format='t')
store.put('persons', persons, format='t')
store.put('establishments', establishments, format='t')
store.put('mtc_skims', mtc_skims, format='t')
store.put('zones', zones, format='t')
store.put('beam_skims_raw', beam_skims_raw, format='t')
if nodes_and_edges:
drive_nodes = pd.read_csv(
input_data_dir + csv_fnames['drive_nodes']).set_index('osmid')
drive_edges = pd.read_csv(
input_data_dir + csv_fnames['drive_edges']).set_index('uniqueid')
walk_nodes = pd.read_csv(
input_data_dir + csv_fnames['walk_nodes']).set_index('osmid')
walk_edges = pd.read_csv(
input_data_dir + csv_fnames['walk_edges']).set_index('uniqueid')
store.put('drive_nodes', drive_nodes, format='t')
store.put('drive_edges', drive_edges, format='t')
store.put('walk_nodes', walk_nodes, format='t')
store.put('walk_edges', walk_edges, format='t')
store.keys()
store.close()
print('UrbanSim model data now available at {0}'.format(
os.path.abspath(output_filepath)))
| null |
scripts/make_model_data_hdf.py
|
make_model_data_hdf.py
|
py
| 9,053 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "pandas.HDFStore",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 235,
"usage_type": "attribute"
}
] |
47636741
|
from django.urls import path
from cms import views
app_name = 'cms'
urlpatterns = [
#カード
path('card/', views.card_list, name='card_list'), #一覧
path('card/add', views.card_edit, name='card_add'), #登録
path('card/mod/<int:card_id>/', views.card_edit, name='card_mod'), #修正
path('card/del/<int:card_id>/', views.card_del, name='card_del'), #削除
]
| null |
mydeck/cms/urls.py
|
urls.py
|
py
| 396 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cms.views.card_list",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cms.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cms.views.card_edit",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cms.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cms.views.card_edit",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cms.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cms.views.card_del",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cms.views",
"line_number": 11,
"usage_type": "name"
}
] |
31562265
|
import requests
import json
from time import sleep as s
def errorLogger(error):
with open("log", "a") as f:
f.write(f"\n\n{error}")
def makeReadable(number):
if type(number) != int:
return number
numberstring = str(number)
newNumberstring = ""
for x in numberstring:
if len(numberstring) == 7:
newNumberstring += x
if len(newNumberstring) == 1:
newNumberstring += "."
if len(newNumberstring) == 5:
newNumberstring += "."
if len(numberstring) == 6:
newNumberstring += x
if len(newNumberstring) == 3:
newNumberstring += "."
if len(numberstring) == 5:
newNumberstring += x
if len(newNumberstring) == 2:
newNumberstring += "."
if len(numberstring) == 4:
newNumberstring += x
if len(newNumberstring) == 1:
newNumberstring += "."
return newNumberstring
def requestStats():
try:
r = requests.get("https://api.covid19api.com/summary")
returner = json.loads(str(r.text))
return returner
except Exception as e:
print("! While trying to establish a internet-connection, \nan error occured, \ntry disabling your firewall, or adding this program to the whitelist !")
errorLogger(e)
RESPONSE = requestStats()
def globalStats():
rr = RESPONSE
c = ""
x = 0
for x in rr['Global']:
if x == 'NewConfirmed':
continue
if x == 'NewDeaths':
continue
if x == 'NewRecovered':
continue
c += f"{x}: {makeReadable(rr['Global'][x])}\n"
c += f"\nCurrently ill: {makeReadable(rr['Global']['TotalConfirmed'] - (rr['Global']['TotalDeaths'] + rr['Global']['TotalRecovered']))} | {round((rr['Global']['TotalConfirmed'] - (rr['Global']['TotalDeaths'] + rr['Global']['TotalRecovered'])) * 100/rr['Global']['TotalConfirmed'])}%\n"
c += f"\nGlobalLethalityRate : {round((rr['Global']['TotalDeaths']*100)/rr['Global']['TotalConfirmed'], 2)}%\n*not accurate, because there are not tested infectious cases*"
return c
def displayAllCountries():
rr = RESPONSE
c = ""
for x in rr['Countries']:
c += f"{x['Country']} | {x['CountryCode']}\n"
return c
def displayOneCountry(Country):
rr = RESPONSE
c = ""
for x in rr['Countries']:
if x['Country'].lower() == Country.lower():
for y in x:
if y == 'Date':
continue
if y == 'NewConfirmed':
continue
if y == 'NewDeaths':
continue
if y == 'NewRecovered':
continue
if y == 'TotalConfirmed':
c += "\n"
c += f"{y}: {makeReadable(x[y])}\n"
c += f"Currently ill: {makeReadable(x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered']))} | {round((x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered'])) * 100/x['TotalConfirmed'])}%\n"
c += f"\nDeaths from Global: {round((x['TotalDeaths']*100)/rr['Global']['TotalDeaths'], 2)}%\n"
c += f"Cases from Global: {round((x['TotalConfirmed']*100)/rr['Global']['TotalConfirmed'], 2)}%"
c += f"\nRecovered from Global: {round((x['TotalRecovered']*100)/rr['Global']['TotalRecovered'], 2)}%"
c += f"\nLethality rate in {x['Country']}: {round((x['TotalDeaths']*100)/x['TotalConfirmed'], 2)}% \n*not accurate, because there are not tested infectious cases*"
break
if x['Slug'].lower() == Country.lower():
for y in x:
if y == 'Date':
continue
if y == 'NewConfirmed':
continue
if y == 'NewDeaths':
continue
if y == 'NewRecovered':
continue
if y == 'TotalConfirmed':
c += "\n"
c += f"{y}: {makeReadable(x[y])}\n"
c += f"Currently ill: {makeReadable(x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered']))} | {round((x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered'])) * 100/x['TotalConfirmed'])}%\n"
c += f"\nDeaths from Global: {round((x['TotalDeaths']*100)/rr['Global']['TotalDeaths'], 2)}%\n"
c += f"Cases from Global: {round((x['TotalConfirmed']*100)/rr['Global']['TotalConfirmed'], 2)}%"
c += f"\nRecovered from Global: {round((x['TotalRecovered']*100)/rr['Global']['TotalRecovered'], 2)}%"
c += f"\nLethality rate in {x['Country']}: {round((x['TotalDeaths']*100)/x['TotalConfirmed'], 2)}% \n*not accurate, because there are not tested infectious cases*"
break
if x['CountryCode'].lower() == Country.lower():
for y in x:
if y == 'Date':
continue
if y == 'NewConfirmed':
continue
if y == 'NewDeaths':
continue
if y == 'NewRecovered':
continue
if y == 'TotalConfirmed':
c += "\n"
c += f"{y}: {makeReadable(x[y])}\n"
c += f"Currently ill: {makeReadable(x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered']))} | {round((x['TotalConfirmed'] - (x['TotalDeaths'] + x['TotalRecovered'])) * 100/x['TotalConfirmed'])}%\n"
c += f"\nDeaths from Global: {round((x['TotalDeaths']*100)/rr['Global']['TotalDeaths'], 2)}%"
c += f"\nCases from Global: {round((x['TotalConfirmed']*100)/rr['Global']['TotalConfirmed'], 2)}%"
c += f"\nRecovered from Global: {round((x['TotalRecovered']*100)/rr['Global']['TotalRecovered'], 2)}%"
c += f"\nLethality rate in {x['Country']}: {round((x['TotalDeaths']*100)/x['TotalConfirmed'], 2)}% \n*not accurate, because there are not tested infectious cases*"
break
if c == "":
c = "Country not found - try using `c!countries` for a list of all available countries"
return c
def displayLeaderboards(Type):
Type.lower()
if Type == "deaths":
typestring = "TotalDeaths"
elif Type == "cases":
typestring = "TotalConfirmed"
elif Type == "recovered":
typestring = "TotalRecovered"
else:
typestring = "TotalConfirmed"
numberarray = []
for x in RESPONSE['Countries']:
for y in x:
if y == typestring:
numberarray.append(x[typestring])
break
numberarray.sort(reverse=True)
y = 0
topFive = []
for x in numberarray:
if y == 5:
break
topFive.append(x)
y = y + 1
finalTopFive = ""
for x in topFive:
for y in RESPONSE['Countries']:
if x == y[typestring]:
finalTopFive += f"{y['Country']}: {makeReadable(x)}\n"
return finalTopFive
| null |
Covid19/covid_backend.py
|
covid_backend.py
|
py
| 5,892 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
}
] |
129650987
|
from Bio import PDB
from Bio.PDB import PDBParser, PDBIO
from Bio.PDB.Atom import Atom
from Bio.PDB.Residue import Residue
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from Bio.PDB.Structure import Structure
import array
#create structure cytosine
cytosine = Structure('cytosine')
#create model
my_model = Model(0)
cytosine.add(my_model)
#create chain
my_chain = Chain('A')
my_model.add(my_chain)
#create residue
my_residue = Residue((' ', 1, ' '), 'C', '')
my_chain.add(my_residue)
#atoms from task_2.py
atoms = [
{'name': 'N1', 'coord': array.array('f',[64.612, 45.818, 10.877]), 'bfactor': 42.59, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'N1', 'serial_number': 1},
{'name': 'C2', 'coord': array.array('f',[65.472, 46.868, 10.634]), 'bfactor': 44.48, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'C2', 'serial_number': 2},
{'name': 'O2', 'coord': array.array('f',[64.981, 47.978, 10.348]), 'bfactor': 42.73, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'O2', 'serial_number': 3},
{'name': 'N3', 'coord': array.array('f',[66.821, 46.659, 10.722]), 'bfactor': 42.28, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'N3', 'serial_number': 4},
{'name': 'C4', 'coord': array.array('f',[67.275, 45.452, 11.056]), 'bfactor': 43.75, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'C4', 'serial_number': 5},
{'name': 'N4', 'coord': array.array('f',[68.586, 45.272, 11.180]), 'bfactor': 44.57, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'N4', 'serial_number': 6},
{'name': 'C5', 'coord': array.array('f',[66.402, 44.364, 11.291]), 'bfactor': 44.20, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'C5', 'serial_number': 7},
{'name': 'C6', 'coord': array.array('f',[65.095, 44.589, 11.192]), 'bfactor': 44.33, 'occupancy': 1.0, 'altloc': ' ', 'fullname': 'C6', 'serial_number': 8}
]
#create atoms
for atom in atoms:
my_atom = Atom(
atom['name'],
atom['coord'],
atom['bfactor'],
atom['occupancy'],
atom['altloc'],
atom['fullname'],
atom['serial_number']
)
my_residue.add(my_atom)
#save to file
out = PDBIO()
out.set_structure(cytosine)
out.save('my_residue.pdb')
| null |
task2.py
|
task2.py
|
py
| 2,103 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Bio.PDB.Structure.Structure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Bio.PDB.Model.Model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Bio.PDB.Chain.Chain",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Bio.PDB.Residue.Residue",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Bio.PDB.Atom.Atom",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Bio.PDB.PDBIO",
"line_number": 51,
"usage_type": "call"
}
] |
542783565
|
from datetime import timedelta
class Settings:
"A class to store all settings for alen invasion."
def __init__ (self):
"""initialize the game's settings"""
#version
self.version = "1.0.1 git (2020.03.30)"
#screen settings
self.screen_width = 1080
self.screen_height = 720
self.bg_colour = (10, 3, 7)
#ship settings
self.max_ship_speed = 1.9
self.max_ship_acceleration = .02
#bullet settings
self.bullet_speed = 1.9
self.bullet_width = 4
self.bullet_height = 20
self.bullet_colour = ((20, 200, 20),(20, 200, 20),
(90, 200, 20),(90, 200, 20),(200, 200, 20),(200, 200, 20),
(200, 150, 60),(200, 150, 60),(255, 90, 20),(255, 90, 20),
(200, 00, 00),(200, 00, 00))
self.bullets_allowed = 10
self.max_bullet_acceleration = 0.005
self.max_blaster_temp = 12
self.heat_penalty = timedelta(seconds=1.8)
self.shot_cooldown = timedelta(seconds=0.3)
self.cooldown_time = timedelta(seconds=0.15)
| null |
alien_invasion_Alpha_1.0.1/alien_invasion/settings.py
|
settings.py
|
py
| 1,144 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.timedelta",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 35,
"usage_type": "call"
}
] |
11342966
|
from hpsklearn import HyperoptEstimator, xgboost_classification
from hyperopt import tpe
import pandas as pd
def main():
df_train = pd.read_csv('../train_dataset.csv')
df_test = pd.read_csv('../test_dataset.csv')
X_train, y_train = df_train.iloc[:, 2:].values, df_train.iloc[:, 0].values
X_test, y_test = df_test.iloc[:, 2:].values, df_test.iloc[:, 0].values
estim = HyperoptEstimator(classifier=xgboost_classification('myXG'),
algo=tpe.suggest, max_evals=100, trial_timeout=120, verbose=True)
estim.fit(X_train, y_train)
print("\n\n{}\n\n".format(estim.score(X_test, y_test)))
print("\n\n{}\n\n".format(estim.best_model()))
if __name__ == '__main__':
main()
| null |
MachineLearning/supervised_training/XGBOOST/XG_hyperopt.py
|
XG_hyperopt.py
|
py
| 713 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "hpsklearn.HyperoptEstimator",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "hpsklearn.xgboost_classification",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "hyperopt.tpe.suggest",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "hyperopt.tpe",
"line_number": 17,
"usage_type": "name"
}
] |
128867638
|
import sys
if sys.version_info < (3,6):
sys.exit('Sorry, Python < 3.6 is not supported')
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='PyBIS',
version= '1.18.12',
author='Swen Vermeul • ID SIS • ETH Zürich',
author_email='[email protected]',
description='openBIS connection and interaction, optimized for using with Jupyter',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://sissource.ethz.ch/sispub/openbis/tree/master/pybis',
packages=find_packages(),
license='Apache Software License Version 2.0',
install_requires=[
'pytest',
'requests',
'datetime',
'pandas',
'click',
'texttable',
'tabulate',
],
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| null |
pybis/src/python/setup.py
|
setup.py
|
py
| 1,068 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.version_info",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 21,
"usage_type": "call"
}
] |
479449955
|
#!/usr/bin/env python
import argparse
import pprint
from time import sleep
from scapy.all import sendp, sendpfast, hexdump, get_if_hwaddr
from scapy.all import Ether, IP, UDP
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ip", metavar="IP", type=str, help="IP addr of the receiver")
parser.add_argument("time", metavar="Time", type=int, help="Time (in seconds) to send the traffic")
parser.add_argument("--bw", metavar="Bandwidth", default=0.02, type=float, help="Bandwidth (in Mbps) of the traffic (default=20Kbps)")
args = parser.parse_args()
iface = "eth0"
load = ''.join('f' for _ in range(1000))
# Each packet has ~8Kb load
pkt = Ether(src=get_if_hwaddr(iface), dst="ff:ff:ff:ff:ff:ff") / IP(dst=args.ip) / UDP(dport=4321, sport=1234) / load
pkt.show2()
num_packets = args.bw * 1000 * args.time / 8
num_packets = int(1.1 * num_packets)
summary = sendpfast(pkt, iface=iface, mbps=args.bw, loop=num_packets, file_cache=True, parse_results=True)
del summary['warnings']
print("Summary:")
pprint.pprint(summary)
return
if __name__ == '__main__':
main()
| null |
assignment2/send.py
|
send.py
|
py
| 1,163 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scapy.all.Ether",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scapy.all.get_if_hwaddr",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scapy.all.IP",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scapy.all.UDP",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scapy.all.sendpfast",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 32,
"usage_type": "call"
}
] |
482679700
|
# -*- coding: utf-8 -*-
import time
from functools import wraps
import logging
logger = logging.getLogger(__file__)
def fn_timer(function):
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
logger.info("Total time running %s(%s): %s seconds" %
(function.func_name, str(args), str(t1 - t0)))
return result
return function_timer
@fn_timer
def test(a):
import time
time.sleep(1)
if __name__ == "__main__":
test("ssss")
| null |
src/Common/Tools/FNDecorator.py
|
FNDecorator.py
|
py
| 572 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 22,
"usage_type": "call"
}
] |
588621746
|
# Solar Wifi Weather Station
# Very alpha at this stage
# Last updated October 19, 2019
#
# This is heavily based on 3KUdelta's Solar WiFi Weather Station.
# There was much cutting and pasting!
# See https://github.com/3KUdelta/Solar_WiFi_Weather_Station
# This in turn was based on the work of Open Green Energy:
# https://www.instructables.com/id/Solar-Powered-WiFi-Weather-Station-V20/
# Everyone has done very solid work! I am impressed!
#
# I wanted to be able to update the code over WiFi and watch the serial
# output on my machine locally, thus MicroPython.
# It also helps me hone my craft, whatever little I have :)
#
VERSION = '0.5.0'
import time, sys, gc
# DST CALCULATION
SECS_IN_DAY = 86400
SECS_IN_HOUR = 3600
DST_ADJ_AMT = 0 # Amount of seconds to adjust for seasonal time change
# See dst_us.json for sample configuration
curr_dst_status = 0 # Currrent DST Status -- 0 = Winter, 1 = Summer
saved_dst_status = 0 # DST Status stored in SPIFFS
# FORECAST CALCULATION
current_timestamp = 0 # Actual timestamp read from NTPtime
saved_timestamp = 0 # Timestamp stored in SPIFFS
# FORECAST RESULT
accuracy = 0 # Counter, if enough values for accurate forecasting
# END GLOBAL VARIABLES
def LoadConfig():
import json
f = open('config.json', 'r')
return json.loads(f.read())
def ConnectWiFi(CONF_WIFI, SLEEP_TIME_MIN, ERRORFILE):
import network
sta_if = network.WLAN(network.STA_IF)
ap_if = network.WLAN(network.AP_IF)
ap_if.active(False)
if not sta_if.isconnected():
print('Connecting to network...')
sta_if.active(True)
sta_if.connect(CONF_WIFI['ssid'], CONF_WIFI['pass'])
count = 0
while not sta_if.isconnected():
count += 1
print('.', end='')
if count == 15:
from cycle_machine import GoToSleep
print('Could not connect. Taking a nap.')
GoToSleep(SLEEP_TIME_MIN * 60, ERRORFILE, 'Could not connect to network.')
time.sleep(1)
print('network config:', sta_if.ifconfig())
def SetNTPTime(NTP_HOSTS, SLEEP_TIME_MIN, ERRORFILE):
import sntp # modified ntptime
print('Now setting clock with NTP server')
time_is_set = False
count = 0
while not time_is_set:
print('.', end='')
time_is_set = sntp.settime(NTP_HOSTS)
if time_is_set:
print('Set time successfully.')
time.sleep(1)
count += 1
if count == 5:
from cycle_machine import GoToSleep
print('Could not connect to NTP Server!\nSleeping...')
GoToSleep(SLEEP_TIME_MIN * 60, ERRORFILE, 'Could not connect to NTP Server.')
del sys.modules['sntp']
gc.collect()
def CheckForSummerTime(COUNTRY):
global curr_dst_status, current_timestamp, DST_ADJ_AMT
import dst
(curr_dst_status,
current_timestamp,
DST_ADJ_AMT) = dst.SummerTimeAdjustment(COUNTRY,
current_timestamp)
del sys.modules['dst']
gc.collect()
def FmtDateTime(timestamp):
t = time.localtime(timestamp)
fmt = '%02d/%02d/%04d %02d:%02d:%02d'
return fmt % (t[1], t[2], t[0], t[3], t[4], t[5])
def ConfigureTime(CONF_TIME, SLEEP_TIME_MIN, ERRORFILE):
global current_timestamp
SetNTPTime(CONF_TIME['NTP_HOSTS'], SLEEP_TIME_MIN, ERRORFILE)
current_timestamp = time.time() + CONF_TIME['TZ'] * SECS_IN_HOUR
if CONF_TIME['DST']['USE_DST']:
CheckForSummerTime(CONF_TIME['DST']['COUNTRY'])
print('Current UNIX timestamp: %d\nDST Status: %d\nDate & Time: %s'
% (current_timestamp, curr_dst_status, FmtDateTime(current_timestamp)))
def MeasurementEvent(CONF_WEATHER):
import measurement
result = measurement.TakeMeasurement(CONF_WEATHER)
del sys.modules['measurement']
gc.collect()
return result
def FirstTimeRun(CONF_FILE, rel_Pres_Rounded_hPa):
from cycle_machine import ResetMachine
global accuracy
print('---> Starting initialization process')
accuracy = 1
try:
myDataFile = open(CONF_FILE['DATAFILE'], 'w')
except:
print('ERROR: Failed to open datafile.')
print('Stopping process - there is an OS problem here.')
sys.exit()
myDataFile.write('%d\n%d\n%d\n%d\n'
% (current_timestamp,
curr_dst_status,
accuracy,
current_timestamp))
for _ in range(12):
myDataFile.write('%d\n' % rel_Pres_Rounded_hPa)
print('*** Saved initial pressure data. ***')
myDataFile.close()
myDataFile = open(CONF_FILE['VERIFYFILE'], 'w')
myDataFile.write('%d\n' % current_timestamp)
myDataFile.close()
print('Doing a reset now.')
ResetMachine()
def VerifyLastRunCompleted(verify_ts, VERIFYFILE, ERRORFILE):
f = open(VERIFYFILE, 'r')
last_ts = int(f.readline())
f.close()
if last_ts != verify_ts:
import machine
f = open(ERRORFILE, 'a+')
f.write('Reset after %s\tCause: %s\n'
% (FmtDateTime(verify_ts), machine.reset_cause()))
f.close()
def ReadDataFile(CONF_FILE, rel_Pres_Rounded_hPa):
global saved_dst_status, saved_timestamp, accuracy
try:
myDataFile = open(CONF_FILE['DATAFILE'], 'r')
except:
print('Failed to open file for reading -- assuming First Run')
FirstTimeRun(CONF_FILE, rel_Pres_Rounded_hPa)
print('---> Now reading from ESP8266')
saved_timestamp = int(myDataFile.readline())
saved_dst_status = int(myDataFile.readline())
accuracy = int(myDataFile.readline())
verifier = int(myDataFile.readline())
VerifyLastRunCompleted(verifier, CONF_FILE['VERIFYFILE'], CONF_FILE['ERRORFILE'])
print('Saved Timestamp: %d\nSaved DST Status: %d\nSaved Accuracy Value: %d'
% (saved_timestamp, saved_dst_status, accuracy))
pressure_value = []
for _ in range(12):
pval = int(myDataFile.readline())
pressure_value.append(pval)
print('Last 12 saved pressure values:', ('%d; ' * 12)[:-2] % tuple(pressure_value))
myDataFile.close()
return pressure_value
def CheckForTimeChange():
# Has the time just changed?
# Return adjustment to time difference calculation in seconds
if curr_dst_status != saved_dst_status:
if curr_dst_status: # Switch to Summer Time
return DST_ADJ_AMT
else: # Switch to Daylight Saving Time
return -DST_ADJ_AMT
else:
return 0
def WriteDataFile(write_timestamp, DATAFILE, pressure_value):
try:
myDataFile = open(DATAFILE, 'w')
print('---> Now writing to ESP8266')
myDataFile.write('%d\n%d\n%d\n%d\n'
% (write_timestamp,
curr_dst_status,
accuracy,
current_timestamp))
for value in pressure_value:
myDataFile.write('%d\n' % value)
myDataFile.close()
except:
print('ERROR: Failure writing to data file!')
sys.exit()
def ZambrettiPrediction(LANGUAGE, rel_Pres_Rounded_hPa, pressure_value):
import zambretti
month = time.localtime(current_timestamp)[1]
prediction = zambretti.MakePrediction(
LANGUAGE,
rel_Pres_Rounded_hPa,
pressure_value,
accuracy,
month)
del sys.modules['zambretti']
gc.collect()
return prediction
def main():
global accuracy
pressure_value = [] # holds 12 pressure values in hPa (6 hours data, [0] most recent)
print('Start of Solar WiFi Weather Station %s' % VERSION)
print('Free mem: %d' % gc.mem_free())
CONF = LoadConfig()
ConnectWiFi(CONF['wifi'], CONF['other']['SLEEP_TIME_MIN'], CONF['file']['ERRORFILE'])
ConfigureTime(CONF['time'], CONF['other']['SLEEP_TIME_MIN'], CONF['file']['ERRORFILE'])
result = MeasurementEvent(CONF['weather']) #acquire sensor data
pressure_value = ReadDataFile(CONF['file'], result['rel_Pres_Rounded_hPa']) #read stored values and update data if more recent data is available
if CONF['time']['DST']['USE_DST']:
dst_adjustment = CheckForTimeChange()
else:
dst_adjustment = 0
ts_diff = current_timestamp - saved_timestamp + dst_adjustment
print('Timestamp difference: %s' % ts_diff)
if ts_diff >= 6 * SECS_IN_HOUR:
FirstTimeRun(CONF['file'], result['rel_Pres_Rounded_hPa'])
elif ts_diff >= SECS_IN_HOUR / 2:
# prepend list with new pressure value and move it right one notch
pressure_value = [result['rel_Pres_Rounded_hPa']] + pressure_value[:-1]
if accuracy < 12:
accuracy += 1
WriteDataFile(current_timestamp, CONF['file']['DATAFILE'], pressure_value)
else:
WriteDataFile(saved_timestamp + dst_adjustment, CONF['file']['DATAFILE'], pressure_value)
# make sure we record on the half hour
interval = CONF['other']['SLEEP_TIME_MIN'] * 60
diff_from_half_hour = SECS_IN_HOUR / 2 - ts_diff
if diff_from_half_hour >= 0:
if diff_from_half_hour >= interval:
sleep_time_secs = interval
else:
sleep_time_secs = diff_from_half_hour
else:
sleep_time_secs = interval
(ZambrettisWords,
trend_in_words,
accuracy_in_percent) = ZambrettiPrediction(CONF['other']['LANGUAGE'],
result['rel_Pres_Rounded_hPa'],
pressure_value)
package = {
'values':[
result['temp_F'],
result['humidity'],
result['dewPt_F'],
result['dewPtSpread_F'],
result['heatIndex_F'],
result['measured_Pres_inHg'],
result['rel_Pres_inHg'],
result['volt'],
accuracy_in_percent,
ZambrettisWords,
trend_in_words
],
'apps': CONF['apps'],
'sleep_time_secs': sleep_time_secs,
'verify_file': CONF['file']['VERIFYFILE'],
'error_file': CONF['file']['ERRORFILE'],
'timestamp': current_timestamp
}
del CONF
del result
del pressure_value
gc.collect() # take out the garbage
print('Free mem when leaving weather_station: %d' % gc.mem_free())
return package
| null |
weather_station.py
|
weather_station.py
|
py
| 10,654 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.loads",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "network.WLAN",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "network.STA_IF",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "network.WLAN",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "network.AP_IF",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "cycle_machine.GoToSleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sntp.settime",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cycle_machine.GoToSleep",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sys.modules",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "dst.SummerTimeAdjustment",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "sys.modules",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "measurement.TakeMeasurement",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "sys.modules",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cycle_machine.ResetMachine",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "machine.reset_cause",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "zambretti.MakePrediction",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "sys.modules",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "gc.mem_free",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "gc.mem_free",
"line_number": 333,
"usage_type": "call"
}
] |
91697717
|
"""No adversarial training
"""
#import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
from keras.layers import Input, Conv1D, Embedding, Dropout
from keras.layers import MaxPool1D, Dense, Flatten
from keras.models import Model
from utils_dann import flipGradientTF
import numpy as np
from sklearn.metrics.classification import f1_score
from sklearn.metrics import classification_report
# original paper: https://arxiv.org/pdf/1505.07818.pdf
# model reference: https://cloud.githubusercontent.com/assets/7519133/19722698/9d1851fc-9bc3-11e6-96af-c2c845786f28.png
import sys
data_list = [
('vaccine', 'vaccine_year'),
# ('amazon', 'amazon_month'),
('amazon', 'amazon_year'),
# ('dianping', 'dianping_month'),
('dianping', 'dianping_year'),
# ('google', 'economy_month'),
# ('google', 'economy_year'),
# ('google', 'parties_year'),
# ('vaccine', 'vaccine_month'),
# ('yelp_hotel', 'yelp_hotel_month'),
('yelp_hotel', 'yelp_hotel_year'),
# ('yelp_rest', 'yelp_rest_month'),
('yelp_rest', 'yelp_rest_year'),
# ('economy', 'economy_year'),
# ('economy', 'economy_month'),
]
def data_loader(data_name):
train_path = './data/'+ data_name + '_source.txt'
valid_path = './data/' + data_name + '_valid.txt'
test_path = './data/' + data_name + '_target.txt'
domain_data = []
train_data = []
valid_data = []
test_data = []
label_encoder = set()
domain_encoder = set()
for dpath in [train_path, valid_path, test_path]:
with open(dpath) as dfile:
dfile.readline()
for line in dfile:
line = line.strip()
if len(line.strip()) < 5:
continue # filter out blank lines
line = line.split('\t')
dlabel = [int(line[1])]
label = [int(line[0])]
line = [int(item) for item in line[2:]]
label_encoder.add(label[0])
domain_encoder.add(dlabel[0])
if dpath == train_path:
train_data.append(label+line)
if dpath == test_path:
test_data.append(label+line)
if dpath == valid_path:
valid_data.append(label+line)
if dpath in [train_path, valid_path]:
domain_data.append(dlabel + line)
return domain_data, train_data, valid_data, test_data, label_encoder, domain_encoder
def data_gen(docs, batch_size=64):
"""
Batch generator
"""
np.random.shuffle(docs) # random shuffle the training documents
steps = int(len(docs) / batch_size)
if len(docs) % batch_size != 0:
steps += 1
for step in range(steps):
batch_docs = []
batch_labels = []
for idx in range(step*batch_size, (step+1)*batch_size):
if idx > len(docs) -1:
break
batch_docs.append(np.asarray(docs[idx][1:]))
batch_labels.append(int(docs[idx][0]))
# convert to array
batch_docs = np.asarray(batch_docs)
batch_labels = np.asarray(batch_labels)
yield batch_docs, batch_labels
def domain_data_gen(domain_docs, batch_size=64):
""" Generate domain data
"""
# load the data
tmp_docs = np.random.choice(list(range(len(domain_docs))), size=batch_size, replace=False)
tmp_docs = [domain_docs[idx] for idx in tmp_docs]
batch_docs = {'domain_input': []}
batch_labels = {'domain': []}
for tmp_doc in tmp_docs:
batch_docs['domain_input'].append(tmp_doc[1:])
batch_labels['domain'].append(tmp_doc[0])
return batch_docs, batch_labels
def run_dnn(data_pair):
print('Working on: '+data_pair[1])
wt_path = './weights/'+ data_pair[1] + '.npy'
train_path = './data/'+ data_pair[1] + '_source.txt'
valid_path = './data/' + data_pair[1] + '_valid.txt'
test_path = './data/'+ data_pair[1] + '_target.txt'
epoch_num = 15
# parameters
sent_len = 60 # the max length of sentence
# load the data
domain_data, train_data, valid_data, test_data, label_encoder, domain_encoder = data_loader(data_pair[1])
label_encoder = list(sorted(label_encoder))
domain_encoder = list(sorted(domain_encoder))
"""Preprocess"""
# load weights
weights = np.load(wt_path)
# inputs
text_input = Input(shape=(sent_len,), dtype='int32', name='text_input')
domain_input = Input(shape=(sent_len,), dtype='int32', name='domain_input')
# shared embedding
embedding = Embedding(
weights.shape[0], weights.shape[1], # size of data embedding
weights=[weights], input_length=sent_len,
trainable=True,
name='embedding'
)
# shared CNN
conv1 = Conv1D(
filters=300,
kernel_size=5,
padding='valid',
strides=1,
)
conv2 = Conv1D(
filters=200,
kernel_size=7,
padding='valid',
strides=1,
)
max_pool = MaxPool1D()
flatten = Flatten()
# start to share
sent_embed = embedding(text_input)
domain_embed = embedding(domain_input)
sent_conv1 = conv1(sent_embed)
domain_conv1 = conv1(domain_embed)
sent_conv2 = conv2(sent_conv1)
domain_conv2 = conv2(domain_conv1)
sent_pool = max_pool(sent_conv2)
domain_pool = max_pool(domain_conv2)
sent_flat = flatten(sent_pool)
domain_flat = flatten(domain_pool)
# for sentiment clf
dense_1 = Dense(128, activation='relu')(sent_flat)
dense_dp = Dropout(0.2)(dense_1)
# for domain prediction
hp_lambda = 0.01
# flip = flipGradientTF.GradientReversal(hp_lambda)(domain_flat)
dense_da = Dense(128, activation='relu')(domain_flat)
dense_da_dp = Dropout(0.2)(dense_da)
da_preds = Dense(len(domain_encoder), activation='softmax', name='domain')(dense_da_dp) # multiple
if 'dianping' in data_pair[1] or 'amazon' in data_pair[1] or 'yelp' in data_pair[1]:
sentiment_preds = Dense(3, activation='softmax', name='senti')(dense_dp) # multilabels
model_sent = Model(
inputs=[text_input, domain_input], outputs=[sentiment_preds, da_preds],
)
model_sent.compile(
loss={'senti': 'categorical_crossentropy', 'domain':'categorical_crossentropy'},
loss_weights={'senti': 1, 'domain':0.005},
optimizer='adam')
else:
sentiment_preds = Dense(1, activation='sigmoid', name='senti')(dense_dp) # binary
model_sent = Model(
inputs=[text_input, domain_input], outputs=[sentiment_preds, da_preds],
)
model_sent.compile(
loss={'senti': 'binary_crossentropy', 'domain':'categorical_crossentropy'},
loss_weights={'senti': 1, 'domain':0.005},
optimizer='adam')
print(model_sent.summary())
best_valid_f1 = 0.0
# fit the model
for e in range(epoch_num):
accuracy = 0.0
loss = 0.0
step = 1
print('--------------Epoch: {}--------------'.format(e))
train_iter = data_gen(train_data)
# train sentiment
# train on batches
for x_train, y_train in train_iter:
# skip only 1 class in the training data
if len(np.unique(y_train)) == 1:
continue
batch_docs, batch_labels = domain_data_gen(domain_data, len(x_train))
batch_docs['text_input'] = x_train
# encoder the (domain) labels
if len(label_encoder) > 2:
y_train_tmp = []
for idx in range(len(y_train)):
dlabel = [0]*len(label_encoder)
dlabel[label_encoder.index(y_train[idx])] = 1
y_train_tmp.append(dlabel)
y_train = y_train_tmp
dlabels = []
for idx in range(len(batch_labels['domain'])):
dlabel = [0]*len(domain_encoder)
dlabel[domain_encoder.index(batch_labels['domain'][idx])] = 1
dlabels.append(dlabel)
batch_labels['domain'] = dlabels
batch_labels['senti'] = y_train
# convert to arrays
for key in batch_docs:
batch_docs[key] = np.asarray(batch_docs[key])
for key in batch_labels:
batch_labels[key] = np.asarray(batch_labels[key])
# train sentiment model
tmp_senti = model_sent.train_on_batch(
batch_docs,
batch_labels,
class_weight={'senti:': 'auto', 'domain': 'auto'}
)
# calculate loss and accuracy
loss += tmp_senti[0]
loss_avg = loss / step
if step % 40 == 0:
print('Step: {}'.format(step))
print('\tLoss: {}.'.format(loss_avg))
print('-------------------------------------------------')
step += 1
# each epoch try the valid data, get the best valid-weighted-f1 score
print('Validating....................................................')
valid_iter = data_gen(valid_data)
y_preds_valids = []
y_valids = []
for x_valid, y_valid in valid_iter:
x_valid = np.asarray(x_valid)
tmp_preds_valid = model_sent.predict([x_valid, x_valid])
for item_tmp in tmp_preds_valid[0]:
y_preds_valids.append(item_tmp)
for item_tmp in y_valid:
y_valids.append(int(item_tmp))
if len(y_preds_valids[0]) > 2:
y_preds_valids = np.argmax(y_preds_valids, axis=1)
else:
y_preds_valids = [np.round(item[0]) for item in y_preds_valids]
f1_valid = f1_score(y_true=y_valids, y_pred=y_preds_valids, average='weighted')
print('Validating f1-weighted score: ' + str(f1_valid))
# if the validation f1 score is good, then test
if f1_valid > best_valid_f1:
best_valid_f1 = f1_valid
test_iter = data_gen(test_data)
y_preds = []
y_tests = []
for x_test, y_test in test_iter:
x_test = np.asarray(x_test)
tmp_preds = model_sent.predict([x_test, x_test])
for item_tmp in tmp_preds[0]:
y_preds.append(item_tmp)
for item_tmp in y_test:
y_tests.append(int(item_tmp))
if len(y_preds[0]) > 2:
y_preds = np.argmax(y_preds, axis=1)
else:
y_preds = [np.round(item[0]) for item in y_preds]
test_result = open('./results_no.txt', 'a')
test_result.write(data_pair[1] + '\n')
test_result.write('Epoch ' + str(e) + '..................................................\n')
test_result.write(str(f1_score(y_true=y_tests, y_pred=y_preds, average='weighted')) + '\n')
test_result.write('#####\n\n')
test_result.write(classification_report(y_true=y_tests, y_pred=y_preds, digits=3))
test_result.write('...............................................................\n\n')
if __name__ == '__main__':
for data_pair in data_list:
run_dnn(data_pair)
| null |
baselines/DANN/DANN_keras_no.py
|
DANN_keras_no.py
|
py
| 11,343 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.shuffle",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "keras.layers.Embedding",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPool1D",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification.f1_score",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification.f1_score",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 323,
"usage_type": "call"
}
] |
246247736
|
from EligibleAgeChecker import EligibleAgeChecker
from abc import ABC, abstractmethod
import re
import pytest
from WebDataScraper import DataScraper, WebDataScraper
test_wds = WebDataScraper()
url = "https://www.nhs.uk/conditions/coronavirus-covid-19/coronavirus-vaccination/coronavirus-vaccine/?gaclid=Cj0KCQjw16KFBhCgARIsALB0g8Ib9I_i92EiECD35ULdvHx52ozQVLgCMfVzPf-rm9Q-IAh_qVTM-usaAryPEALw_wcB"
test_wds.create_request(url)
test_wds.collect_response()
test_wds.format_response()
test_eac = EligibleAgeChecker(test_wds)
def test_extract_age():
assert test_eac.age == ""
test_eac.extract_age()
assert test_eac.age.isdigit() == True
def test_is_vaccination_open():
assert test_eac.is_eligible == False
test_eac.is_vaccination_open(int(test_eac.age)+1)
assert test_eac.is_eligible == True
test_eac.is_vaccination_open(int(test_eac.age)-1)
assert test_eac.is_eligible == False
def test_exception_extract_age():
test_wds.formatted_response = None
with pytest.raises(Exception) as exception_object:
test_eac.extract_age()
assert str(exception_object.value) == "Something went wrong when extracting age from formatted response string"
| null |
test_EligibleAgeChecker.py
|
test_EligibleAgeChecker.py
|
py
| 1,184 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "WebDataScraper.WebDataScraper",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "EligibleAgeChecker.EligibleAgeChecker",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 28,
"usage_type": "call"
}
] |
172351546
|
#! /usr/bin/env python3
# coding: utf-8
import math
import urllib.parse
import requests
from view.consoleapiview import ConsoleApiView
class OpenFoodFactsInteractions:
""" This class manages all the interactions with the OpenFoodFacts API """
def __init__(self):
self.interface = ConsoleApiView()
#Categories renseignées en dur 'pour le moment'
self.category_list = ['sandwichs', 'barres', 'pizzas', 'biscuits-aperitifs']
def __get_search_url(self, category, page_size, page):
""" This method creates the products url needed """
suffixe_url_element = {
'action' : 'process',
'tagtype_0' : 'categories',
'tag_contains_0' : 'contains',
'tag_0' : category,
'page_size' : page_size,
'page' : page,
'json' : '1'
}
prefixe_url = 'https://fr.openfoodfacts.org/cgi/search.pl?'
return prefixe_url + urllib.parse.urlencode(suffixe_url_element)
def get_product_pages_number(self, category, products_per_page):
""" This method gets the necessary page number to request for a category """
url = self.__get_search_url(category, '20', '1')
request = requests.get(url)
data = request.json()
page_number = math.ceil(int(data['count']) / int(products_per_page))
return page_number
def get_product_page(self, category, page_size, page):
""" This method gets the json linked to a specific page """
url = self.__get_search_url(category, page_size, page)
request = requests.get(url)
return request.json()
| null |
app/controller/api_interaction.py
|
api_interaction.py
|
py
| 1,645 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "view.consoleapiview.ConsoleApiView",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.urlencode",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
}
] |
96994735
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django_countries import data as country_data
from postal.settings import POSTAL_ADDRESS_LINE1, POSTAL_ADDRESS_LINE2, POSTAL_ADDRESS_CITY, POSTAL_ADDRESS_STATE, \
POSTAL_ADDRESS_CODE, POSTAL_USE_CRISPY_FORMS
if POSTAL_USE_CRISPY_FORMS:
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Hidden
def country_sort_key(country_data):
if country_data[0] == 'US':
return 'AAA'
if country_data[0] == 'CA':
return 'AAAA'
return country_data[1]
country_list = sorted([('', '-' * 45)] + list(country_data.COUNTRIES.items()), key=country_sort_key)
form_helpers = {}
def register_postal_form_helper(form_id, form_helper):
form_helpers[form_id] = form_helper
class PostalAddressForm(forms.Form):
line1 = forms.CharField(label=POSTAL_ADDRESS_LINE1[0], required=POSTAL_ADDRESS_LINE1[1], max_length=100)
line2 = forms.CharField(label=POSTAL_ADDRESS_LINE2[0], required=POSTAL_ADDRESS_LINE2[1], max_length=100)
city = forms.CharField(label=POSTAL_ADDRESS_CITY[0], required=POSTAL_ADDRESS_CITY[1], max_length=100)
state = forms.CharField(label=POSTAL_ADDRESS_STATE[0], required=POSTAL_ADDRESS_STATE[1], max_length=100)
code = forms.CharField(label=POSTAL_ADDRESS_CODE[0], required=POSTAL_ADDRESS_CODE[1], max_length=100)
country = forms.ChoiceField(label=_(u"Country"), choices=country_list)
def __init__(self, *args, **kwargs):
prefix = kwargs.pop('prefix', None)
postal_form_id = kwargs.pop('postal_form_id', 'postal-address-form')
if POSTAL_USE_CRISPY_FORMS:
css_id = 'postal_address'
if prefix is not None:
css_id = prefix + '-' + css_id
if postal_form_id in form_helpers:
self.helper = form_helpers[postal_form_id]
else:
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
'country',
'line1',
'line2',
'city',
'state',
'code',
css_id=css_id,
css_class='postal_address'
),
Hidden('postal-form-id', postal_form_id),
)
super().__init__(*args, **kwargs)
def clean_country(self):
data = self.cleaned_data['country']
if data not in country_data.COUNTRIES.keys():
raise forms.ValidationError("You must select a country")
return data
| null |
src/postal/forms/__init__.py
|
__init__.py
|
py
| 2,670 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "postal.settings.POSTAL_USE_CRISPY_FORMS",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django_countries.data",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django_countries.data",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django_countries.data",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django_countries.data.COUNTRIES.items",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django_countries.data.COUNTRIES",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django_countries.data",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "postal.settings.POSTAL_ADDRESS_LINE1",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "postal.settings.POSTAL_ADDRESS_LINE2",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "postal.settings.POSTAL_ADDRESS_CITY",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "postal.settings.POSTAL_ADDRESS_STATE",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "postal.settings.POSTAL_ADDRESS_CODE",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "postal.settings.POSTAL_USE_CRISPY_FORMS",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "crispy_forms.helper.FormHelper",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Layout",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Div",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Hidden",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django_countries.data.COUNTRIES.keys",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django_countries.data.COUNTRIES",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "django_countries.data",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 69,
"usage_type": "name"
}
] |
482851298
|
#-*- coding: utf-8 -*-
"""
FrostyX's Qtile config
Don't be dumb and test it with Xephyr first
https://wiki.archlinux.org/index.php/Xephyr
Xephyr -br -ac -noreset -screen 1600x600 :1 &
DISPLAY=:1 qtile &
DISPLAY=:1 urxvt &
"""
import re
import subprocess
from datetime import date
from os import uname
from os.path import expanduser
from libqtile.config import Key, Screen, Group, Drag, Click, Match, Rule
from libqtile.command import lazy, Client
from libqtile import layout, bar, widget, hook
from contrib import (VimwikiUnfinished,
Newsboat,
DaysCounter,
Mu,
CurrentLayoutTextIcon)
terminal = "gnome-terminal"
run = "gmrun"
vol_cur = "amixer -D pulse get Master"
vol_up = "amixer -q -D pulse sset Master 2%+"
vol_down = "amixer -q -D pulse sset Master 2%-"
mute = "amixer -q -D pulse set Master toggle"
#bright_up = "xbacklight -inc 10"
#bright_down = "xbacklight -dec 10"
bright_up = "light -A 5"
bright_down = "light -U 5"
lock = "gnome-screensaver-command -l"
scrot = ""
scrot_all = ""
battery = "BAT0"
suspend = "systemctl suspend"
player_prev = "playerctl previous --player=spotify"
player_next = "playerctl next --player=spotify"
player_play_pause = "playerctl play-pause --player=spotify"
hostname = uname()[1]
if hostname == "chromie":
battery = "BAT1"
scrot = "/home/jkadlcik/.bin/screenshot.sh"
scrot_all = "/home/jkadlcik/git/qtile-screenshot/qtile-screenshot.py -o /home/jkadlcik/images/scrot"
# https://github.com/FrostyX/qtile-screenshot/blob/master/qtile-screenshot.py
elif hostname == "localhost.localdomain": # New work laptop is not named yet
scrot = "/home/jkadlcik/.bin/screenshot.sh"
terminal = "urxvt256c -e tmux"
lock = "i3lock -i /home/jkadlcik/.dotfiles/.config/qtile/img/bsod.png"
mod = "mod1" # Left alt
sup = "mod4" # Left win-key
keys = [
# Switch window focus to other pane(s) of stack
Key([mod], "Tab", lazy.layout.next()),
Key([mod], "Return", lazy.spawn(terminal)),
Key([mod], "F1", lazy.spawn(terminal)),
Key([mod], "F2", lazy.spawn(run)),
# Toggle between different layouts as defined below
Key([mod], "space", lazy.next_layout()),
Key([mod], "F4", lazy.window.kill()),
Key([mod, "control"], "r", lazy.restart()),
Key([mod, "control"], "q", lazy.shutdown()),
Key([mod], "w", lazy.screen.togglegroup()),
# cycle to previous and next group
Key([mod], "h", lazy.screen.prev_group(skip_managed=True)),
Key([mod], "l", lazy.screen.next_group(skip_managed=True)),
Key([sup], "f", lazy.window.toggle_fullscreen()),
Key([sup], "t", lazy.window.toggle_floating()),
# Process `gnome-screensaver` must run
Key([mod, sup], "l", lazy.spawn(lock)),
# Multihead magic
Key([sup], "h", lazy.prev_screen()),
Key([sup], "l", lazy.next_screen()),
# Function keys
Key([], "XF86AudioRaiseVolume", lazy.spawn(vol_up)),
Key([], "XF86AudioLowerVolume", lazy.spawn(vol_down)),
Key([], "XF86AudioMute", lazy.spawn(mute)),
Key([], "XF86MonBrightnessUp", lazy.spawn(bright_up)),
Key([], "XF86MonBrightnessDown", lazy.spawn(bright_down)),
Key([], "Print", lazy.spawn(scrot)),
Key([sup], "Print", lazy.spawn(scrot_all)),
# Multimedia
Key([sup], "Left", lazy.spawn(player_prev)),
Key([sup], "Right", lazy.spawn(player_next)),
Key([sup], "Down", lazy.spawn(player_play_pause)),
# Quiting
Key([mod], "F10", lazy.spawn(suspend)),
]
# dnf install fontawesome-fonts
# https://fortawesome.github.io/Font-Awesome/cheatsheet/
# For v4.7 see https://fontawesome.com/v4.7.0/cheatsheet/
icons = {
"logo": "", # fa-redhat
"temp": "", # fa-fire-extinguisher
"battery": "", # fa-battery-three-quarters
"light": "", # fa-lightbulb-o
"volume": "", # fa-bullhorn
"rss": "", # fa-rss
"tasks": "", # fa-calendar-check-o
"repeat": "", # fa-repeat
"email": "", # fa-at
"gmail": "", # fa-google
"chat": "", # fa-comment-dots
"web": "", # fa-internet-explorer
"terminal": "", # fa-keyboard
"dev": "", # fa-heart
"doc": "", # fa-folder
"misc": "", # fa-file
"ssh": "", # fa-hashtag
"virtual": "", # fa-cogs
"games": "", # fa-playstation
"music": "", # fa-headphones
"max": "", # fa-window-maximize
"monadtall": "", # fa-columns
"treetab": "", # fa-tree
}
def get_layout_icon(name):
return {
"max": icons["max"],
"monadtall": icons["monadtall"],
"treetab": icons["treetab"],
}.get(name, name)
workspaces = [
{"name": "i", "key": "i", "label": icons["chat"], "matches": [Match(wm_class=["Pidgin"])]},
{"name": "r", "key": "r", "label": icons["web"], "matches": [Match(wm_class=["Chromium-browser", "Firefox", "Google-chrome"])]},
{"name": "f", "key": "f", "label": icons["terminal"], "matches": [Match(wm_class=["dolphin", "Thunar", "File-roller"])]},
{"name": "d", "key": "d", "label": icons["dev"], "matches": [Match(wm_class=["Lispworks", "jetbrains-pycharm", "Eclipse" ])]},
{"name": "q", "key": "q", "label": icons["doc"], "matches": [Match(wm_class=["Acroread", "Zathura", "Evince"])]},
{"name": "n", "key": "n", "label": icons["misc"], "matches": [Match(wm_class=["Claws-mail"])]},
{"name": "c", "key": "c", "label": icons["ssh"]},
{"name": "v", "key": "v", "label": icons["virtual"], "matches": [Match(wm_class=["VirtualBox"])]},
{"name": "g", "key": "g", "label": icons["games"], "matches": [Match(wm_class=["Wine", "Python2.7", "Steam", "Progress"])]}, # Python2.7 is playonlinux; Progress is steam updater
{"name": "o", "key": "o", "label": icons["music"], "matches": [Match(wm_class=["Vlc", "Totem"])]},
]
groups = []
for workspace in workspaces:
matches = workspace["matches"] if "matches" in workspace else None
groups.append(Group(workspace["name"], label=workspace["label"], matches=matches, layout="max"))
keys.append(Key([mod], workspace["key"], lazy.group[workspace["name"]].toscreen()))
keys.append(Key([mod, sup], workspace["key"], lazy.window.togroup(workspace["name"])))
# float dialog windows
@hook.subscribe.client_new
def dialogs(window):
floating = ["gmrun", "gcr-prompter"]
try:
wm_type = window.window.get_wm_type()
wm_class = window.window.get_wm_class()[0]
transient_for = window.window.get_wm_transient_for()
if wm_type == 'dialog' or transient_for or wm_class in floating:
window.floating = True
except:
pass
# Preivew: https://chriskempson.github.io/base16/#eighties
# Codes: https://chriskempson.github.io/base16/css/base16-eighties.css
colors = {
"greybg": "#2d2d2d",
"greyfg": "#d3d0c8",
"red": "#f2777a",
"blue": "#6699cc",
"lgrey": "#747369",
"green": "#99cc99",
}
base16_chalk = {
"black" : "#151515",
"red": "#fb9fb1",
"green": "#acc267",
"yellow": "#ddb26f",
"blue": "#6fc2ef",
"magenta": "#e1a3ee",
"cyan": "#12cfc0",
"white": "#d0d0d0",
"gray": "#505050",
}
# http://docs.qtile.org/en/latest/manual/ref/layouts.html
layout_theme = {
"border_width": 1,
"border_focus": colors["blue"],
"border_normal": colors["lgrey"],
"margin": 10,
"single_margin": 10,
}
layouts = [
layout.MonadTall(**layout_theme),
layout.TreeTab(**layout_theme),
layout.xmonad.MonadTall(ratio=0.75, **layout_theme),
layout.max.Max(**layout_theme),
]
floating_layout = layout.Floating(**layout_theme)
widget_defaults = dict(
font='Arial',
fontsize=12,
padding=3,
)
def num_screens():
process = subprocess.Popen(["xrandr"], stdout=subprocess.PIPE)
out = str(process.communicate()[0]).split("\n")
i = 0
for line in out:
if " connected " in line:
i += 1
return i
style = {
"padding": 5,
}
sep = {
"foreground": colors["lgrey"],
"padding": 15,
}
screens = [
Screen(
# Let's have a gap on the bottom, but instead of showing a wallpaper,
# make it seamless with emacs and termianl backgrounds
bottom=bar.Bar([widget.TextBox("")], 15, background=base16_chalk["black"]),
top=bar.Bar([
widget.Spacer(length=5),
# Logo
widget.TextBox(
text=icons["logo"],
fontsize=14,
mouse_callbacks = {'Button1': lambda qtile: qtile.cmd_spawn("urxvt")},
foreground=base16_chalk["magenta"],
padding_y=5,
**style
),
widget.Sep(**sep),
# Workspaces
widget.GroupBox(
highlight_method="text",
urgent_alert_method="text",
this_current_screen_border=base16_chalk["blue"],
active=base16_chalk["white"],
inactive=base16_chalk["gray"],
rounded=False,
padding_x=6,
padding_y=5,
margin=0,
fontsize=14,
hide_unused=True,
),
widget.Sep(**sep),
# Current layout
CurrentLayoutTextIcon(
fun=get_layout_icon,
length=20,
foreground=base16_chalk["green"],
**style
),
widget.Sep(**sep),
widget.TaskList(
icon_size=0,
background=colors["greybg"],
foreground=base16_chalk["white"],
highlight_method="text",
border=base16_chalk["blue"],
urgent_border=base16_chalk["red"],
),
# Notify
# We want low priority color to be also red because some
# applications (not looking at you Spotify) are using that color for
# highlights.
widget.Spacer(length=100),
widget.Notify(
default_timeout=15,
foreground=base16_chalk["white"],
foreground_low=base16_chalk["red"],
foreground_urgent=base16_chalk["red"],
**style
),
widget.Spacer(length=100),
# Emails
widget.TextBox(
text=icons["email"],
foreground=base16_chalk["green"],
**style
),
Mu(
"/home/jkadlcik/Mail",
"/seznam/I/BOX",
"[email protected]",
foreground=base16_chalk["green"],
**style
),
widget.TextBox(
text=icons["gmail"],
foreground=base16_chalk["green"],
**style
),
Mu(
"/home/jkadlcik/Mail",
"/gmail/*",
"[email protected]",
foreground=base16_chalk["green"],
**style
),
widget.Sep(**sep),
# Temp
widget.TextBox(
text=icons["temp"],
foreground=base16_chalk["yellow"],
**style
),
widget.ThermalSensor(
threshold=65,
foreground=base16_chalk["yellow"],
foreground_alert=colors["red"],
**style
),
widget.Sep(**sep),
# Battery
widget.TextBox(
text=icons["battery"],
foreground=base16_chalk["magenta"],
**style
),
widget.Battery(
battery_name=battery,
foreground=base16_chalk["magenta"],
format="{percent:2.0%}",
low_foreground=colors["red"],
**style
),
widget.Sep(**sep),
# Light
widget.TextBox(
text=icons["light"],
foreground=base16_chalk["blue"],
**style
),
widget.Backlight(
brightness_file="/sys/class/backlight/intel_backlight/actual_brightness",
max_brightness_file="/sys/class/backlight/intel_backlight/max_brightness",
foreground=base16_chalk["blue"],
**style
),
widget.Sep(**sep),
# Volume
widget.TextBox(
text=icons["volume"],
foreground=base16_chalk["green"],
**style
),
widget.Volume(
get_volume_command=vol_cur.split(),
foreground=base16_chalk["green"],
**style
),
widget.Sep(**sep),
# Unread news count
widget.TextBox(
text=icons["rss"],
foreground=base16_chalk["yellow"],
**style
),
Newsboat(
dbfile="/home/jkadlcik/.newsboat/cache.db",
foreground=base16_chalk["yellow"],
**style
),
widget.Sep(**sep),
# Time
widget.Clock(
timezone="Europe/Prague",
format="%H:%M",
foreground=base16_chalk["magenta"],
**style
),
widget.Sep(**sep),
# Date
widget.Clock(
timezone="Europe/Prague",
format="%d. %m. (%b) %Y",
foreground=base16_chalk["blue"],
**style
),
widget.Sep(**sep),
# Week
widget.Clock(
timezone="Europe/Prague",
format="#%W",
foreground=base16_chalk["green"],
**style
),
widget.Sep(**sep),
# The meaning of this date is a private matter
DaysCounter(
starting_date=date(year=2019, month=2, day=3),
foreground=base16_chalk["yellow"],
),
widget.Sep(**sep),
# Systray
widget.Systray(),
widget.Spacer(length=5),
], 25, background=colors["greybg"]),
)
]
if num_screens() == 2:
screens.append(
Screen(
bottom=bar.Bar([
widget.GroupBox(highlight_method="block", this_current_screen_border=colors["blue"], active=colors["greyfg"], inactive=colors["lgrey"], **style),
widget.Sep(**sep),
widget.CurrentLayout(**style),
widget.Sep(**sep),
widget.Prompt(),
widget.WindowTabs(separator=" | ", **style),
widget.Systray(),
], 25, background=colors["greybg"])))
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
follow_mouse_focus = False
bring_front_click = False
dgroups_key_binder = None
dgroups_app_rules = [
# floating windows
Rule(Match(wm_class=['Synfigstudio', 'Wine', 'Xephyr', 'postal2-bin']), float=True),
]
main = None
cursor_warp = False
auto_fullscreen = True
wmname = "LG3D"
# Autostart
@hook.subscribe.startup_once
def autostart():
home = expanduser("~")
subprocess.Popen([home + "/.config/qtile/autostart.sh"])
# xrandr --output DP2 --auto --right-of eDP1
@hook.subscribe.screen_change
def restart_on_randr(qtile, ev):
# qtile.cmd_restart()
pass
| null |
.config/qtile/examples/FrostyX.py
|
FrostyX.py
|
py
| 15,911 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.uname",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Key",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.layout.next",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.layout",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.next_layout",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.kill",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.restart",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.shutdown",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen.togglegroup",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen.prev_group",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen.next_group",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.screen",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.toggle_fullscreen",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.toggle_floating",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.prev_screen",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.next_screen",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.spawn",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Match",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Group",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Key",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.group",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Key",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.togroup",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "libqtile.hook.subscribe",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "libqtile.hook",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "libqtile.layout.MonadTall",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "libqtile.layout",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "libqtile.layout.TreeTab",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "libqtile.layout",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "libqtile.layout.xmonad.MonadTall",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "libqtile.layout.xmonad",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "libqtile.layout",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "libqtile.layout.max.Max",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "libqtile.layout.max",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "libqtile.layout",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "libqtile.layout.Floating",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "libqtile.layout",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "libqtile.config.Screen",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "libqtile.bar.Bar",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "libqtile.bar",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "libqtile.bar.Bar",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "libqtile.bar",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Spacer",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.GroupBox",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "contrib.CurrentLayoutTextIcon",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TaskList",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Spacer",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Notify",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Spacer",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "contrib.Mu",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "contrib.Mu",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.ThermalSensor",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Battery",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Backlight",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Volume",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.TextBox",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "contrib.Newsboat",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 431,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Clock",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Clock",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Clock",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "contrib.DaysCounter",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Systray",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Spacer",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Screen",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "libqtile.bar.Bar",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "libqtile.bar",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.GroupBox",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.CurrentLayout",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Sep",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 488,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Prompt",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.WindowTabs",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "libqtile.widget.Systray",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "libqtile.widget",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Drag",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.set_position_floating",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 497,
"usage_type": "name"
},
{
"api_name": "libqtile.command.lazy.window.get_position",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Drag",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.set_size_floating",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "libqtile.command.lazy.window.get_size",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Click",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window.bring_to_front",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "libqtile.command.lazy.window",
"line_number": 499,
"usage_type": "attribute"
},
{
"api_name": "libqtile.command.lazy",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "libqtile.config.Rule",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "libqtile.config.Match",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "libqtile.hook.subscribe",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "libqtile.hook",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "libqtile.hook.subscribe",
"line_number": 523,
"usage_type": "attribute"
},
{
"api_name": "libqtile.hook",
"line_number": 523,
"usage_type": "name"
}
] |
474279302
|
import time
import datetime
import socket
import asyncio
import matplotlib.pyplot as plt
import numpy as np
import argparse
import select
import selectors
import sys
from sklearn import datasets
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_svmlight_file
from sklearn.datasets import load_iris
from sklearn.model_selection import validation_curve
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
plt.ion()
plt.rcParams['figure.figsize'] = (16, 9)
def clear_logfile(f_name):
with open(f_name, "r+") as writer:
writer.truncate(0)
class Sleeper:
def __init__(self, duration, df, config_file):
#TODO: add timescale functionality
self.duration = int(duration)
self.time_start = datetime.datetime.now()
self.config_file = config_file
self.dyn_x = []
self.dyn_y = []
f = open(config_file, "r")
self.name = f.readline()[:-1]
self.wakeup_span = int(f.readline()) # float val of num minutes to perform the wakeup cycle
self.predictive_max = f.readline() # earliest wakeup where a state transition will be permitted
self.num_divisions = int(f.readline()) # how many steps to break wakeup into, bounded [1, 100] on Windows and [1, 1000] on Unix due to clock limits (for now)
self.port = int(f.readline()) # port for local communication to send control signals
self.log_file = f.readline()[:-1] # ouput logfile
self.use_data = f.readline() # whether or not to use predictive wakeup feature
self.data_file = f.readline()[:-1] # (optional) dataset for predictive optimal wakeup
self.sets = f.readline().split(';') # (optional) additional datasets for prediction
f.close()
if (df != "__no__"):
self.data_file = df
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('localhost', self.port))
self.max = 0
self.bk_avg_counter = 0
self.bucket = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prev_buckets_avg = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.state = "LIGHT "
self.do_g = True
#self.prev_state = "LIGHT "
for k in range(10):
self.bucket.append(float(0))
self.prev_buckets_avg.append(float(0))
#self.clf = svm.SVC(gamma = 0.001, C = 100.0)
#self.train_data = load_svmlight_file(self.data_file) # caveat: data must be of form line by line < [identifier] [feature-id]:[value] ... >
#self.clf.fit(self.train_data.data[:-1], self.train_data.target[:-1]) # fit the datafile to a dataset which can now facilitate predictions
# plt.xlabel("HRV")
# plt.ylabel("Time (s)")
# plt.show()
def enable_graph(self):
self.do_g = True
def disable_graph(self):
self.do_g = False
def fill_bucket(self, pos, val):
if val > self.max:
self.max = val
self.bucket[pos % 10] = val
def get_bucket_avg(self):
tmp = float(0)
for i in range(10):
tmp = tmp + self.bucket[i]
tmp = tmp / 10
return tmp
def fill_bucket_avg(self, pos, val):
if (pos % 10 == 0):
self.prev_buckets_avg[self.bk_avg_counter % 10] = self.get_bucket_avg()
self.bk_avg_counter = self.bk_avg_counter + 1
def get_state(self, pos):
n_count = int(0)
my_sum = float(0)
for ii in range(10):
if (self.prev_buckets_avg[ii] != 0) and (ii != pos):
my_sum = my_sum + self.prev_buckets_avg[ii]
n_count = n_count + 1
my_sum = my_sum / n_count
if(self.prev_buckets_avg[pos % 10] > my_sum):
if(self.prev_buckets_avg[pos % 10] > (my_sum * 1.0)):
self.state = "LIGHT "
else:
if(self.prev_buckets_avg[pos % 10] < (my_sum * 1.0)):
self.state = "DEEP "
print(str(self.prev_buckets_avg[pos % 10]) + " " + str(my_sum))
def log(self, line):
with open(self.log_file, "a+") as writer:
writer.write(str(line+"\n"))
def graph_baseline(self): # graph historical dataset files using format
self.fig, (self.ax, self.ax2) = plt.subplots(2)
self.x = []
self.y = []
self.x_acc = []
self.y_acc = []
n_counted = []
for i in range(2000):
n_counted.append(int(0))
#self.x.append(float(0))
#self.y.append(float(0))
#self.x_acc.append(float(0))
#self.y_acc.append(float(0))
for num in self.sets:
print(num)
# self.x = []
# self.y = []
fd = open(str("datasets/hrv_fake_" + num + ".txt"), "r")
dataset = fd.readlines()
fd.close()
lc = 0
for line in dataset[1:-1]:
split_tupple = line.split(';')
split_tupple[1] = split_tupple[1].split('\n')[0]
if len(self.x) > lc: # average the datasets
n_counted[lc] = n_counted[lc] + 1
self.y[lc] = (float(self.y[lc]*(n_counted[lc]-1)) + float(split_tupple[1])) / n_counted[lc]
#("acc: "+str(self.y[lc]))
else:
n_counted[lc] = 1
self.x.append(split_tupple[0])
self.y.append(split_tupple[1])
# print("append")
lc = lc + 1
# plt.plot(x, y, color = 'blue', linestyle = 'solid', linewidth = 2)
# marker = 'o', markerfacecolor = 'blue', markersize = 5)
#self.ax.plot(self.x, self.y, color = 'blue', linestyle = 'solid', linewidth = 1)
# self.x = []
# self.y = []
fd = open(str("datasets/acc_fake_"+ num + ".txt"), "r")
dataset = fd.readlines()
fd.close()
lc = int(0)
for line in dataset[1:-1]:
#print(line)
split_tupple = line.split(';')
split_tupple[1] = split_tupple[1].split('\n')[0]
if len(self.x_acc) > lc:
n_counted[lc] = n_counted[lc] + 1
# self.x_acc[lc] = (self.x_acc[lc]*(n_counted[lc]-1) + split_tupple[0]) / n_counted[lc]
self.y_acc[lc] = (self.y_acc[lc]*int(n_counted[lc]-1) + float(split_tupple[1])) / n_counted[lc]
else:
n_counted[lc] = 1
self.x_acc.append(split_tupple[0])
self.y_acc.append(float(split_tupple[1]))
lc = lc + 1
#print(str(lc) + " " + str(self.y_acc[lc]))
print('-_-')
print(str(len(self.x_acc)))
print(str(len(self.y_acc)))
print(str(len(self.x)))
self.ax.set_autoscaley_on(True)
l1 = self.ax.plot(self.x_acc, self.y_acc, color = 'blue', linestyle = 'solid', linewidth = 1, label = 'Acceleration Data')
self.ax.tick_params(axis = 'y', labelcolor = 'blue')
self.ax3 = self.ax.twinx()
self.ax3.set_autoscaley_on(True)
l2 = self.ax3.plot(self.x, self.y, color = 'green', linestyle = 'solid', linewidth = 1, label = 'Heart Rate Variability Data')
self.ax3.tick_params(axis = 'y', labelcolor = 'green')
self.ax.autoscale_view()
self.ax3.autoscale_view()
#self.ax.set_yticks(self.ax.get_yticks()[::10])
self.ax.set_xticks(self.ax.get_xticks()[::10])
self.ax3.set_yticks(self.ax3.get_yticks()[::25])
#self.ax3.set_xticks(self.ax3.get_xticks()[::100])
t_lines = l1 + l2
lbs = [ll.get_label() for ll in t_lines]
self.ax.legend(t_lines, lbs, loc = 0)
plt.gcf().autofmt_xdate()
plt.xlabel('Time')
#plt.ylabel('HRV(B) & ACC(G)')
plt.title('HRV and ACC Composite Data')
plt.show()
def mdecode(self, msg):
tmp = msg.rstrip('\n')
tmp = msg.split(' ')
return [int(tmp[0]), float(tmp[1])]
def send_com(self, msg):
n_sent = 0
n_max = len(msg)
while n_sent < n_max:
status = self.sock.send(msg[n_sent:])
if status == 0:
print("Socket dropped")
self.sock.shutdown()
return None
n_sent = n_sent + status
def update(self, data_file, regex):
with open(data_file) as reader:
new_data = reader.readlines()
for line in new_data:
split_tupple = line.split(regex)
self.dyn_x.append(split_tupple[0])
self.dyn_y.append(str(datetime.datetime.now()))
self.ax3 = plt.subplots()
self.ax3.plot(self.dyn_x, self.dyn_y, color = 'green', linestype = 'dashdot', linewidth = 1)
self.ax3.set_ylabel('Live HRV')
self.ax3.set_xticks([]) # adjust later after tests
plt.show()
def simple(self): # simple wakeup with no real-time predictive analysis
print("Sleep ", str(60*(self.duration - self.wakeup_span)))
#time.sleep(60*(self.duration - self.wakeup_span))
print(datetime.datetime.now())
# n_hrv_per_interval = int(len(self.x) / self.duration)
# it = int(0)
for zz in range(self.num_divisions):
self.send_com(str(datetime.datetime.now()).encode()) # send time for debug purposes, normally send a float on [0, 1] representing intensity
time.sleep(1/self.num_divisions)
self.send_com("WAKEUP".encode())
y2 = []
it = int(0);
start = len(self.x) - int(float(float(self.wakeup_span) / float(self.duration)) * len(self.x))
print("Start", start)
for k in range(len(self.x)):
if k < start:
print('set 0')
y2.append(0)
else:
print('set val')
y2.append(it / self.num_divisions)
it = it + 1
self.ax4 = self.ax.twinx()
self.ax4.plot(self.x, y2, color = 'red', linestyle = 'dashed', linewidth = 1)
self.ax4.set_ylabel('Light Intensity')
self.ax4.set_xticks([])
# self.ax2.ylim(0, 1)
# plt.figure(figsize = (13.3, 10))
plt.rcParams['figure.figsize'] = [13.3, 10]
plt.show()
def sim(self, speedup, run_file):
# Data points will be read at a rate of 1 * speedup per second
predict_begin = False
wake_begin = False
split_tupple = []
since_last_max = 0
prev_5 = [0, 0, 0, 0, 0]
prev_5_avg = 0
prev_5_counter = 0
#sec_elapsed = int(0)
intensity = float(0.0)
prev_t = 0
cur_t = 0
del_t = 0
if self.do_g == True:
#print("Good")
#time.sleep(10)
self.ax_int = self.ax2.twinx()
sim_x = []
sim_y = []
# g1, ax3 = plt.subplots()
linez, = self.ax2.plot([],[], color = 'blue', linewidth = 1)
line_intensity, = self.ax_int.plot([],[], color = 'red', linestyle = 'dashed', linewidth = 1)
self.ax_int.set_ylim([-0.025, 1])
self.ax2.set_autoscaley_on(True)
# self.ax_int.set_autoscaley_on(True)
# self.ax_int.grid()
self.ax2.grid()
with open(self.data_file) as reader:
data = reader.readlines()
wk_start = len(data) - int(float(float(self.wakeup_span) / float(self.duration)) * len(data))
pr_start = wk_start - int(float(float(self.wakeup_span) / float(self.duration)) * len(data))
dur = (len(data) - 2) * 60
t_elapsed = 0
t_wake_sec = len(data) - wk_start - 1
print(wk_start)
print(len(data))
elapsed = 0
#intensity = 0
for line in data[1:-1]:
time.sleep(1/speedup)
split_tupple = line.split(';')
split_tupple[1] = split_tupple[1][:-1]
elapsed = elapsed + 1
t_elapsed = int(split_tupple[0])
# TODO swap from elapsed to t_elapsed
print(split_tupple[1])
#split_tupple[1] = split_tupple[1].split('\n')[0]
if self.do_g == True:
linez.set_xdata(np.append(linez.get_xdata(), float(t_elapsed)))
linez.set_ydata(np.append(linez.get_ydata(), float(split_tupple[1])))
self.fill_bucket(elapsed, float(split_tupple[1]))
if (elapsed % 10 == 0):
self.fill_bucket_avg(elapsed, self.get_bucket_avg())
self.get_state(self.bk_avg_counter)
if (intensity / (len(data)-wk_start-1) <= 0):
self.log(self.state + str(intensity / (len(data)-wk_start-1)))
self.send_com(str(self.state + str(intensity / (len(data)-wk_start-1))).encode())
if (intensity / (len(data)-wk_start-1) > 0):
self.log(self.state + str(100 * (intensity / (len(data)-wk_start-1)))[:6])
self.send_com(str(self.state + str(100 * (intensity / (len(data)-wk_start-1)))[:6]).encode())
if self.do_g == True:
line_intensity.set_xdata(np.append(line_intensity.get_xdata(), t_elapsed))
if elapsed >= pr_start:
predict_begin = True
if predict_begin == True:
prev_5_avg = 0
for jj in range(5):
prev_5_avg = prev_5_avg + prev_5[jj]
prev_5_avg = prev_5_avg / 5
if(prev_5_counter > 4) and (split_tupple[1] > (2 * prev_5_avg)):
wake_begin = True
predict_begin = False
t_wake_sec = float(dur - t_elapsed)
prev_5[prev_5_counter % 5] = float(split_tupple[1])
prev_5_coutner = prev_5_counter + 1
if elapsed >= wk_start:
predict_begin = False
wake_begin = True
if wake_begin == True:
val = intensity / (len(data) - wk_start - 1)
if self.do_g == True:
line_intensity.set_ydata(np.append(line_intensity.get_ydata(), (intensity / (len(data) - wk_start - 1))))
print(intensity / (len(data) - wk_start))
intensity = intensity + 1
#self.send_com(str("DEEP " + str(100 * val)).encode())
else:
if self.do_g == True:
line_intensity.set_ydata(np.append(line_intensity.get_ydata(), 0))
if self.do_g == True:
self.ax_int.relim()
self.ax_int.autoscale_view()
self.ax2.relim()
self.ax2.autoscale_view()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
#if elapsed % 20 == 0 and elapsed < wk_start:
# self.send_com(str("DEEP 0.0").encode())
#print("Sim")
#plt.show()
def manage(self, duration_sec, duration_wakeup_minimum, duration_predict): # manage control signals from the ECG feeding to the State Machine while asleep
SIZE = 1024
pos = 0
r_lines = []
data = []
tupple = []
maxim = 0
since_last_max = 0
prev_5 = [0, 0, 0, 0, 0]
prev_5_avg = 0
prev_5_counter = 0
intensity = float(0.0)
sec_elapsed = int(0)
prev_t = 0
cur_t = 0
del_t = 0
start_force_wake = duration_sec - duration_wakeup_minimum
start_predict_time = start_force_wake - duration_predict
trigger = False
predict_begin = False
wake_begin = False
time_wake_sec = float(duration_wakeup_minimum)
#self.sock.listen(1)
if self.do_g == True:
self.ax_int = self.ax2.twinx()
linez, = self.ax2.plot([],[], color = 'blue', linewidth = 1)
line_intensity, = self.ax_int.plot([],[], color = 'red', linestyle = 'dashed', linewidth = 1)
self.ax_int.set_ylim([-0.025, 1])
self.ax2.set_autoscaley_on(True)
self.ax2.grid()
while True: # Poll Loop
trigger = False
with open('../AccelData/AccelData.txt') as reader:
# with open('Parse_Test/FakeAccel.txt') as reader: # Can swap this and the above line out for the purpose of testing
r_lines = reader.readlines()
for line in r_lines[pos:]:
tupple = self.mdecode(line)
print(tupple)
data.append(tupple)
self.fill_bucket(tupple[0], tupple[1])
prev_t = cur_t
cur_t = tupple[0]
del_t = int(cur_t - prev_t)
# Important: with this line the program depends on an accurate elapsed time in the read file
sec_elapsed = tupple[0]
# In the first part of the night, search for a maximum to compare against
if(tupple[1] > maxim):
maxim = tupple[1]
since_last_max = 0
else:
since_last_max = since_last_max + 1
if (predict_begin == True):
prev_5_avg = 0
for jj in range(5):
prev_5_avg = prev_5_avg + prev_5[jj]
prev_5_avg = prev_5_avg / 5
# This is arbitrary, one might even say Byzantine behavior
if (prev_5_counter > 4) and (tupple[1] > (2 * prev_5_avg)):
wake_begin = True
predict_begin = False
time_wake_sec = float(duration_sec - sec_elapsed)
prev_5[prev_5_counter % 5] = tupple[1]
prev_5_counter = prev_5_counter + 1
# Trigger indicates a probability of wakeup beginning
if(tupple[1] > maxim * 0.75):
trigger = True
# If in wakeup, appropriately increment the intensity and send a message
if (wake_begin == True):
time.sleep(0.05) # make sure socket has enough time to read msg, then log and send msg on socket
if (intensity < time_wake_sec):
intensity = intensity + del_t
if (intensity > time_wake_sec):
intensity = time_wake_sec
self.log(self.state + str(100 * (intensity/time_wake_sec))[:6])
self.send_com(str(self.state + str(100 * (intensity/time_wake_sec))[:6]).encode())
if (int(sec_elapsed) >= int(start_predict_time)):
predict_begin = True
if (int(sec_elapsed) >= int(start_force_wake)):
wake_begin = True
if (wake_begin == False):
# If it's been over 2.5 hours since the last max value and we are over 3/4 through the sleep cycle, check for an early wakeup
if (since_last_max >= 600 and sec_elapsed > ((duration_sec * 3) / 4)):
# If light sleep and relatively high movement, start the wakeup early
if (self.state == "LIGHT " and trigger == True):
wake_begin = True
time_wake_sec = float(duration_sec - sec_elapsed)
print(intensity)
print(time_wake_sec)
print('---')
# Graph stuff
if self.do_g == True:
linez.set_xdata(np.append(linez.get_xdata(), float(tupple[0])))
linez.set_ydata(np.append(linez.get_ydata(), float(tupple[1])))
line_intensity.set_xdata(np.append(line_intensity.get_xdata(), float(tupple[0])))
line_intensity.set_ydata(np.append(line_intensity.get_ydata(), intensity/time_wake_sec))
self.ax_int.relim()
self.ax_int.autoscale_view()
self.ax2.relim()
self.ax2.autoscale_view()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if (intensity >= time_wake_sec):
return
ppos = pos
pos = len(r_lines)
for jj in range(ppos, pos):
if (jj % 10) == 0:
self.fill_bucket_avg(jj, self.get_bucket_avg())
self.get_state(self.bk_avg_counter)
self.log(self.state + str(100 * (intensity/time_wake_sec))[:6])
self.send_com(str(self.state + str(100 * (intensity/time_wake_sec))[:6]).encode())
time.sleep(0.2)
print(wake_begin)
print(sec_elapsed)
print(start_force_wake)
#if (int(sec_elapsed) >= int(start_force_wake)):
# wake_begin = True
#
#if (wake_begin == False):
# # If it's been over 2.5 hours since the last max value and we are over 3/4 through the sleep cycle, check for an early wakeup
# if (since_last_max >= 600 and sec_elapsed > ((duration_sec * 3) / 4)):
# # If light sleep and relatively high movement, start the wakeup early
# if (self.state == "LIGHT " and trigger == True):
# wake_begin = True
# time_wake_sec = float(duration_sec - sec_elapsed)
time.sleep(15)
# time.sleep(2) # for testing, normally 15 second poll intervals
# sec_elapsed = sec_elapsed + 15
if __name__ == '__main__':
data_f = None
do_g = False
do_verbose = False
dot_sim = False
dot_live = False
do_sim = ''
g_flag = False
use_argv = False
sleeper_time = 0
wake_time = 0
pred_time = 0
if (len(sys.argv) <= 1):
print("Specify Dataset? (Y/N)")
use_ds = str(input())
if use_ds == 'y' or use_ds == 'Y':
print("Enter datafile:")
df = str(input())
S1 = Sleeper(100, df, "sleep_config.txt")
else:
S1 = Sleeper(100, "__no__", "sleep_config.txt")
print("Enable Graphing (Slow)? (Y/N)")
do_g = str(input())
if do_g == 'n' or do_g == 'N':
S1.disable_graph()
#plt.ioff()
else:
S1.enable_graph()
plt.ion()
elif (len(sys.argv) > 1):
use_argv = True
for jj in sys.argv[1:]:
if (jj.strip() == '-h' or jj.strip() == '--h'):
print("Usage: <python3 parse_data.py> or <python3 parse_data.py [-h] [-verbose] [-no_g] [-sim OR -live] [-file:<filename>] [-t:<time[s]>] [-p:<time[s]>] [-w:<time[s]>]")
if (jj.strip() == '-verbose'):
do_verbose = True
if (jj.strip() == '-no_g'):
g_flag = True
if (jj.strip()[0:5] == '-file:'):
data_f = jj.strip()[6:]
if (jj.strip() == '-sim'):
dot_sim = True
if (jj.strip() == '-live'):
dot_live = True
if (jj.strip()[0:2] == '-t'):
sleeper_time = int(jj.strip()[3:])
if (jj.strip()[0:2] == '-p'):
pred_time = int(jj.strip()[3:])
if (jj.strip()[0:2] == '-w'):
wake_time = int(jj.strip()[3:])
print(sleeper_time)
print(pred_time)
print(wake_time)
time.sleep(5)
if data_f is not None:
S1 = Sleeper(100, data_f, "sleep_config.txt")
else:
S1 = Sleeper(100, "__no__", "sleep_config.txt")
if (g_flag == True):
S1.disable_graph()
else:
S1.enable_graph()
plt.ion()
clear_logfile(S1.log_file)
dset = []
with open('config.txt', 'r') as reader:
dset = reader.readline().split(';')
if (wake_time > 0 and pred_time > 0):
print(dset[3])
print(dset[4])
pred_time = int(dset[3])
wake_time = int(dset[4])
while True:
if use_argv == False:
print("Run Simulation or Live Transfer? (S/L)")
do_sim = str(input())
S1.send_com("DONE 0".encode())
time.sleep(0.25)
if do_g != 'n' and do_sim != 'N':
S1.graph_baseline()
# S1.simple
# S1.manage
S1.log(str("DONE 0"))
if do_sim == 'y' or do_sim == 'Y' or do_sim == 's' or do_sim == 'S' or dot_sim == True:
S1.sim(10000, "datasets/hrv_fake_4.txt")
S1.send_com(str(S1.state + "100.0").encode())
S1.log(str(S1.state + "100.0"))
time.sleep(0.25)
S1.send_com("DONE 100.0".encode())
S1.log("DONE 100.0")
elif do_sim == 'n' or do_sim == 'N' or do_sim == 'l' or do_sim == 'L' or dot_live == True:
if use_argv == False:
print("Enter duration of sleep in seconds:")
sleep_time = int(input())
print("Enter minimum duration of wakeup in seconds:")
wake_time = int(input())
S1.manage(sleeper_time, wake_time, pred_time)
S1.send_com(str(S1.state + "100.0").encode())
S1.log(str(S1.state + "100.0"))
time.sleep(0.25)
S1.send_com("DONE 100.0".encode())
S1.log("DONE 100.0")
input()
| null |
StateMachine/parse_data.py
|
parse_data.py
|
py
| 26,924 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 573,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 591,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 593,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 627,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 675,
"usage_type": "call"
}
] |
320262697
|
import matplotlib.pyplot as plt
import torch
from IPython import display
import numpy as np
import random
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.randn(num_examples, num_inputs,
dtype=torch.float32)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)
# print(features)
# 所有行的第一个列,index = 0
# print(features[:, 0])
# 第一行
# print(features[0])
def use_svg_display():
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(5, 5)):
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
# plt.show()
# 每次返回batch_size(批量大小)个随机样本的特征和标签。
def data_iter(batch_size, features, labels):
num_examples = len(features)
# 创建一个从0到num_examples的数组,用来做下标
indices = list(range(num_examples))
random.shuffle(indices)
print(indices)
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i:min(i + batch_size, num_examples)])
# 返回第0维度,即以行为索引,下标为j的数据,如 第j = torch.tensor[0,1] ,就是返回第一行,第二行的数据
yield features.index_select(0, j), labels.index_select(0, j)
batch_size = 5
for X, y in data_iter(batch_size, features, labels):
print(X, y)
# 权重初始化成均值为0、标准差为0.01的正态随机数,偏差则初始化成0。
# np.random.normal()的意思是一个正态分布,normal这里是正态的意思。一个例子:numpy.random.normal(loc=0,scale=1e-2,size=shape) ,意义如下:
# 参数loc(float):正态分布的均值,对应着这个分布的中心。loc=0说明这一个以Y轴为对称轴的正态分布,
# 参数scale(float):正态分布的标准差,对应分布的宽度,scale越大,正态分布的曲线越矮胖,scale越小,曲线越高瘦。
# 参数size(int 或者整数元组):输出的值赋在shape里,默认为None。
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
# 定义模型
def linreg(X, w, b):
return torch.mm(X, w) + b
# 定义损失函数
def squared_loss(y_hat, y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
# 定义优化算法
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X,y in data_iter(batch_size,features,labels):
l = loss(net(X,w,b),y).sum()# l是有关小批量X和y的损失
l.backward()# 小批量的损失对模型参数求梯度
sgd([w,b],lr,batch_size)# 使用小批量随机梯度下降迭代模型参数
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features,w,b),labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
| null |
Coding/Chapter03.py
|
Chapter03.py
|
py
| 3,258 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.randn",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "IPython.display.set_matplotlib_formats",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "IPython.display",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "torch.mm",
"line_number": 70,
"usage_type": "call"
}
] |
629404196
|
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from urllib import unquote
from falcon import HTTP_201, HTTPError, HTTPBadRequest
from ujson import dumps as json_dumps
from ...utils import load_json_body
from ...auth import login_required, check_team_auth
from ... import db
HOUR = 60 * 60
WEEK = 24 * HOUR * 7
simple_ev_lengths = set([WEEK, 2 * WEEK])
simple_12hr_num_events = set([7, 14])
columns = {
'id': '`schedule`.`id` as `id`',
'roster': '`roster`.`name` as `roster`, `roster`.`id` AS `roster_id`',
'auto_populate_threshold': '`schedule`.`auto_populate_threshold` as `auto_populate_threshold`',
'role': '`role`.`name` as `role`, `role`.`id` AS `role_id`',
'team': '`team`.`name` as `team`, `team`.`id` AS `team_id`',
'events': '`schedule_event`.`start`, `schedule_event`.`duration`, `schedule`.`id` AS `schedule_id`',
'advanced_mode': '`schedule`.`advanced_mode` AS `advanced_mode`',
'timezone': '`team`.`scheduling_timezone` AS `timezone`'
}
all_columns = columns.keys()
constraints = {
'id': '`schedule`.`id` = %s',
'id__eq': '`schedule`.`id` = %s',
'id__ge': '`schedule`.`id` >= %s',
'id__gt': '`schedule`.`id` > %s',
'id__le': '`schedule`.`id` <= %s',
'id__lt': '`schedule`.`id` < %s',
'id__ne': '`schedule`.`id` != %s',
'name': '`roster`.`name` = %s',
'name__contains': '`roster`.`name` LIKE CONCAT("%%", %s, "%%")',
'name__endswith': '`roster`.`name` LIKE CONCAT("%%", %s)',
'name__eq': '`roster`.`name` = %s',
'name__startswith': '`roster`.`name` LIKE CONCAT(%s, "%%")',
'role': '`role`.`name` = %s',
'role__contains': '`role`.`name` LIKE CONCAT("%%", %s, "%%")',
'role__endswith': '`role`.`name` LIKE CONCAT("%%", %s)',
'role__eq': '`role`.`name` = %s',
'role__startswith': '`role`.`name` LIKE CONCAT(%s, "%%")',
'team': '`team`.`name` = %s',
'team__contains': '`team`.`name` LIKE CONCAT("%%", %s, "%%")',
'team__endswith': '`team`.`name` LIKE CONCAT("%%", %s)',
'team__eq': '`team`.`name` = %s',
'team__startswith': '`team`.`name` LIKE CONCAT(%s, "%%")',
'team_id': '`schedule`.`team_id` = %s',
'roster_id': '`schedule`.`roster_id` = %s'
}
def validate_simple_schedule(events):
'''
Return boolean whether a schedule can be represented in simple mode. Simple schedules can have:
1. One event that is one week long
2. One event that is two weeks long
3. Seven events that are 12 hours long
4. Fourteen events that are 12 hours long
'''
if len(events) == 1 and events[0]['duration'] in simple_ev_lengths:
return True
else:
return len(events) in simple_12hr_num_events and all([ev['duration'] == 12 * HOUR for ev in events])
def get_schedules(filter_params, dbinfo=None, fields=None):
"""
Get schedule data for a request
"""
events = False
from_clause = ['`schedule`']
if fields is None:
fields = columns.keys()
if any(f not in columns for f in fields):
raise HTTPBadRequest('Bad fields', 'One or more invalid fields')
if 'roster' in fields:
from_clause.append('JOIN `roster` ON `roster`.`id` = `schedule`.`roster_id`')
if 'team' in fields or 'timezone' in fields:
from_clause.append('JOIN `team` ON `team`.`id` = `schedule`.`team_id`')
if 'role' in fields:
from_clause.append('JOIN `role` ON `role`.`id` = `schedule`.`role_id`')
if 'events' in fields:
from_clause.append('LEFT JOIN `schedule_event` ON `schedule_event`.`schedule_id` = `schedule`.`id`')
events = True
fields = map(columns.__getitem__, fields)
cols = ', '.join(fields)
from_clause = ' '.join(from_clause)
connection_opened = False
if dbinfo is None:
connection = db.connect()
connection_opened = True
cursor = connection.cursor(db.DictCursor)
else:
connection, cursor = dbinfo
where = ' AND '.join(constraints[key] % connection.escape(value)
for key, value in filter_params.iteritems()
if key in constraints)
query = 'SELECT %s FROM %s' % (cols, from_clause)
if where:
query = '%s WHERE %s' % (query, where)
cursor.execute(query)
data = cursor.fetchall()
if connection_opened:
cursor.close()
connection.close()
# Format schedule events
if events:
# end result accumulator
ret = {}
for row in data:
schedule_id = row.pop('schedule_id')
# add data row into accumulator only if not already there
if schedule_id not in ret:
ret[schedule_id] = row
ret[schedule_id]['events'] = []
start = row.pop('start')
duration = row.pop('duration')
ret[schedule_id]['events'].append({'start': start, 'duration': duration})
data = ret.values()
return data
def insert_schedule_events(schedule_id, events, cursor):
insert_events = '''INSERT INTO `schedule_event` (`schedule_id`, `start`, `duration`)
VALUES (%(schedule)s, %(start)s, %(duration)s)'''
# Merge consecutive events for db storage
raw_events = sorted(events, key=lambda e: e['start'])
new_events = []
for e in raw_events:
if len(new_events) > 0 and e['start'] == new_events[-1]['start'] + new_events[-1]['duration']:
new_events[-1]['duration'] += e['duration']
else:
new_events.append(e)
for e in new_events:
e['schedule'] = schedule_id
cursor.executemany(insert_events, new_events)
def on_get(req, resp, team, roster):
team = unquote(team)
roster = unquote(roster)
fields = req.get_param_as_list('fields')
if not fields:
fields = all_columns
params = req.params
params['team'] = team
params['roster'] = roster
data = get_schedules(params, fields=fields)
resp.body = json_dumps(data)
required_params = frozenset(['events', 'role', 'advanced_mode'])
@login_required
def on_post(req, resp, team, roster):
'''
See below for sample JSON requests.
Weekly 7*24 shift that starts at Monday 6PM PST:
.. code-block:: javascript
{
'role': 'primary'
'auto_populate_threshold': 21,
'events':[
{'start': SECONDS_IN_A_DAY + 18 * SECONDS_IN_AN_HOUR,
'duration': SECONDS_IN_A_WEEK}
],
'advanced_mode': 0
}
Weekly 7*12 shift that starts at Monday 8AM PST:
.. code-block:: javascript
{
'role': 'oncall',
'events':[
{'start': SECONDS_IN_A_DAY + 8 * SECONDS_IN_AN_HOUR,
'duration': 12 * SECONDS_IN_AN_HOUR},
{'start': 2 * SECONDS_IN_A_DAY + 8 * SECONDS_IN_AN_HOUR,
'duration': 12 * SECONDS_IN_AN_HOUR} ... *5 more*
],
'advanced_mode': 1
}
'''
data = load_json_body(req)
data['team'] = unquote(team)
data['roster'] = unquote(roster)
check_team_auth(data['team'], req)
missing_params = required_params - set(data.keys())
if missing_params:
raise HTTPBadRequest('invalid schedule',
'missing required parameters: %s' % ', '.join(missing_params))
schedule_events = data.pop('events')
for sev in schedule_events:
if 'start' not in sev or 'duration' not in sev:
raise HTTPBadRequest('invalid schedule',
'schedule event requires both start and duration fields')
if 'auto_populate_threshold' not in data:
# default to autopopulate 3 weeks forward
data['auto_populate_threshold'] = 21
if not data['advanced_mode']:
if not validate_simple_schedule(schedule_events):
raise HTTPBadRequest('invalid schedule', 'invalid advanced mode setting')
insert_schedule = '''INSERT INTO `schedule` (`roster_id`,`team_id`,`role_id`,
`auto_populate_threshold`, `advanced_mode`)
VALUES ((SELECT `roster`.`id` FROM `roster`
JOIN `team` ON `roster`.`team_id` = `team`.`id`
WHERE `roster`.`name` = %(roster)s AND `team`.`name` = %(team)s),
(SELECT `id` FROM `team` WHERE `name` = %(team)s),
(SELECT `id` FROM `role` WHERE `name` = %(role)s),
%(auto_populate_threshold)s,
%(advanced_mode)s)'''
connection = db.connect()
cursor = connection.cursor(db.DictCursor)
try:
cursor.execute(insert_schedule, data)
schedule_id = cursor.lastrowid
insert_schedule_events(schedule_id, schedule_events, cursor)
except db.IntegrityError as e:
err_msg = str(e.args[1])
if err_msg == 'Column \'roster_id\' cannot be null':
err_msg = 'roster "%s" not found' % roster
raise HTTPError('422 Unprocessable Entity', 'IntegrityError', err_msg)
connection.commit()
cursor.close()
connection.close()
resp.status = HTTP_201
resp.body = json_dumps({'id': schedule_id})
| null |
src/oncall/api/v0/schedules.py
|
schedules.py
|
py
| 9,398 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "falcon.HTTPBadRequest",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "urllib.unquote",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "urllib.unquote",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "utils.load_json_body",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "urllib.unquote",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "urllib.unquote",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "auth.check_team_auth",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "falcon.HTTPBadRequest",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "falcon.HTTPBadRequest",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "falcon.HTTPBadRequest",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "falcon.HTTPError",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "falcon.HTTP_201",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "ujson.dumps",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "auth.login_required",
"line_number": 169,
"usage_type": "name"
}
] |
254119100
|
import json
import os
products = []
with open("scans.json", "r") as f:
data = json.load(f)
for e in data:
id = e['id']
name = e['name']
type = e['type']
t = str(e["timestamp"])
l = e["location"]
l = [str(i) for i in l]
l = ' '.join(l)
found = False
for p in products:
if p[0] == id:
p[3].append(t+" "+l)
found = True
break
if not found:
products.append(
[id, name, type, [t+" "+l]]
)
os.chdir("Packages")
for i in products:
name = i[0]
with open(name+".pac", "w") as f:
for j in i[:3]:
print(j, file=f)
for j in i[3]:
print(j, file=f)
| null |
out/production/resources/pathfinder.py
|
pathfinder.py
|
py
| 778 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 26,
"usage_type": "call"
}
] |
329737460
|
import xlsxwriter
def main():
workbook = xlsxwriter.Workbook('dados_gRPC.xlsx')
worksheet = workbook.add_worksheet()
arq = open('tempos_gRPC.txt', 'r')
texto = arq.readlines()
inicializaWorksheet(texto, worksheet)
row = 0
col = 0
for linha in texto :
if(row%21 == 0):
row = 0
col = col + 1
worksheet.write(row, col, linha.split(' ')[1].replace("\n", "").replace(".", ","))
row += 1
arq.close()
def inicializaWorksheet(texto, worksheet):
col = 0
row = 0
for linha in texto:
worksheet.write(row, col, linha.split(' ')[0])
row += 1
if(row == 21):
break
if __name__ == "__main__":
main()
| null |
escrevePlanilha.py
|
escrevePlanilha.py
|
py
| 771 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xlsxwriter.Workbook",
"line_number": 5,
"usage_type": "call"
}
] |
163006824
|
__author__ = 'damienpuig'
from setuptools import setup
setup(name='damienpuig',
version='0.1',
description='The funniest joke',
url='http://github.com/damienpuig/library_test',
author='Damien PUIG',
author_email='[email protected]',
license='MIT',
packages=['damienpuig'],
install_requires=[
'termcolor'
],
zip_safe=False)
| null |
pypi_install_script/damienpuig-0.1.tar/setup.py
|
setup.py
|
py
| 399 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "setuptools.setup",
"line_number": 5,
"usage_type": "call"
}
] |
276380495
|
# Copyright (C) 2008-2015 Ruben Decrop
# Copyright (C) 2015-2016 Chessdevil Consulting
import io
import sys
import urllib.request
import zipfile
from reddevil.models import rdr
from bjk2016.models import init_db
fidetitle = {
'': 0, 'WCM': 1, 'WFM': 2, 'CM': 3, 'FM': 4, 'WIM': 5, 'WGM': 6, 'IM': 7,
'GM': 8
}
def fetchfide():
"""
fetches the curretn fide list
:return: None
"""
print('fetching fide player list')
url = 'http://ratings.fide.com/download/players_list.zip'
try:
f = urllib.request.urlopen(url)
except urllib.request.URLError:
print('Cannot open url %s', url)
sys.exit(1)
fdata = f.read()
f.close()
fs1 = io.BytesIO(fdata)
zipfile_fide(fs1)
def zipfile_fide(fs1):
"""
reads the ratinglist zipfile, decrompress it and store all active
players in the fideplayer collection
:param fs1: filename (or file stream) of the zipfile
:return: None
"""
# read the zipfile in pldata and convert it to a byte stream
print('decompressing zip file and inserting players')
zf = zipfile.ZipFile(fs1, mode='r')
plist = zf.open(zf.namelist()[0]) # read first file in zipfile
plist.readline() # skip header line # Read all the players
# recreate the collection
col = rdr.db['fideplayer']
col.remove()
col.ensure_index('id_fide')
i = 0
# read every dbase record
for row in plist:
line = row.decode('utf-8')
p = dict()
p['id_fide'] = line[0:15].strip()
nfn = line[15:76].split(',')
p['name'] = nfn[0].strip()
p['firstname'] = nfn[1].strip() if len(nfn) == 2 else ''
p['fidenation'] = line[76:79]
p['gender'] = line[80]
p['chesstitle'] = fidetitle.get(line[84:89].strip(), 0)
i += 1
p['fiderating'] = int(line[113:117].strip() or 0)
col.insert(p)
i += 1
zf.close()
print('{0:d} players created to fideplayer'.format(i))
if __name__ == '__main__':
init_db()
fetchfide()
| null |
bjk2016/scripts/fideplayer.py
|
fideplayer.py
|
py
| 2,043 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.request.urlopen",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "urllib.request.request",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "reddevil.models.rdr.db",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "reddevil.models.rdr",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "bjk2016.models.init_db",
"line_number": 73,
"usage_type": "call"
}
] |
503809346
|
# What should be take care of here:
# - Security
# - Transaction control
# - Audit.
# - Customization
# - Model intelligence
# - Logging
LOGAUDIT = False
import medhist.models as md
import medhist.session as ss
from django.db import transaction as trc
import datetime as dt
current_audit = None
class Audit():
def __init__(self,operation,description):
self.operation = operation
self.description = description
def save(self,event,error = None):
if LOGAUDIT:
if ss.current_session:
print("AUDIT:[{}][{}]{}:{}:{}".format(
ss.current_session.schema,
ss.current_session.user,
self.operation,
self.description,
event))
else:
print("AUDIT:{}:{}:{}".format(self.operation,self.description,event))
if error:
print(error)
def __enter__(self):
global current_audit
if not current_audit:
current_audit = self
self.save('START')
return self
else:
return None
def __exit__(self, type, value, tb):
global current_audit
if current_audit == self:
if tb is None:
self.save('END')
else:
self.save('ERROR',(type,value))
import traceback
traceback.print_exc()
current_audit = None
BHV_Classes = {}
class baseBHV():
model = None
#
# NON BHV SPECIFIC METHODS
@classmethod
def noun(cls):
return cls.model._meta.verbose_name
@classmethod
def nouns(cls):
return cls.model._meta.verbose_name_plural
@classmethod
def security(cls,bhv,args,context):
method_name = '{0}_{1}'.format(bhv,'security')
if hasattr(cls,method_name):
getattr(cls,method_name)(args,context)
@classmethod
def pre_hook(cls,bhv,args,context):
method_name = '{0}_{1}'.format(bhv,'pre_hook')
if hasattr(cls,method_name):
getattr(cls,method_name)(args,context)
@classmethod
def post_hook(cls,bhv,args,context):
method_name = '{0}_{1}'.format(bhv,'post_hook')
if hasattr(cls,method_name):
getattr(cls,method_name)(args,context)
#
# BHV ADD
@classmethod
@trc.atomic
def add(cls,template):
bhv = 'add'
with Audit('{0} {1}'.format(bhv,cls.noun()),''):
context = {}
cls.security(bhv,template,context=context)
# pre_hook is meant to work for customizations
cls.pre_hook(bhv,template,context=context)
# The one that actualy does something
cls.add_do(template,context=context)
# post_hook is meant to work for customizations
cls.post_hook(bhv,template,context=context)
return context['added_object']
@classmethod
def add_do(cls,template,context):
if issubclass(cls.model,md.SchemaModel):
obj = cls.model(schema = ss.current_schema(),**template)
else:
obj = cls.model(**template)
obj.save()
context['added_object'] = obj
#
# BHV EDIT
@classmethod
@trc.atomic
def update(cls,id,template):
bhv = 'update'
with Audit('{0} {1}'.format(bhv,cls.noun()),''):
context = {'id' : id}
cls.security(bhv,template,context=context)
context['updated_object'] = cls.model.objects.get(pk=id)
# pre_hook is meant to work for customizations
cls.pre_hook(bhv,template,context=context)
# The one that actualy does something
cls.update_do(template,context=context)
# post_hook is meant to work for customizations
cls.post_hook(bhv,template,context=context)
return context['updated_object']
@classmethod
def update_do(cls,template,context):
obj = context['updated_object']
for att,val in template.items():
setattr(obj,att,val)
obj.save()
#
# BHV QUERY
@classmethod
def query(cls,args,kwargs):
bhv = 'query'
with Audit('{0} {1}'.format(bhv,cls.noun()),''):
context = {}
cls.security(bhv,(args,kwargs),context=context)
# pre_hook is meant to work for customizations
cls.query_pre_hook(bhv,args,kwargs,context=context)
# The one that actualy does something
cls.query_do(args,kwargs,context=context)
# post_hook is meant to work for customizations
cls.query_post_hook(bhv,args,kwargs,context=context)
return context['result']
@classmethod
def query_do(cls,args,kwargs,context):
if issubclass(cls.model,md.SchemaModel):
schkwargs = kwargs.copy()
schkwargs['schema'] = ss.current_schema()
else:
schkwargs = kwargs
if not args and not schkwargs:
context['result'] = cls.model.objects.all()
else:
context['result'] = cls.model.objects.filter(*args,**schkwargs)
@classmethod
def query_pre_hook(cls,bhv,args,kwargs,context):
pass
@classmethod
def query_post_hook(cls,bhv,args,kwargs,context):
pass
#
# BHV QUERY
@classmethod
def query(cls,args,kwargs):
bhv = 'query'
with Audit('{0} {1}'.format(bhv,cls.noun()),''):
context = {}
cls.security(bhv,(args,kwargs),context=context)
# pre_hook is meant to work for customizations
cls.query_pre_hook(bhv,args,kwargs,context=context)
# The one that actualy does something
cls.query_do(args,kwargs,context=context)
# post_hook is meant to work for customizations
cls.query_post_hook(bhv,args,kwargs,context=context)
return context['result']
#
# BHV GET
@classmethod
def get_do(cls,args,kwargs,context):
if issubclass(cls.model,md.SchemaModel):
skwargs = kwargs.copy()
skwargs["schema"] = ss.current_schema()
else:
skwargs = kwargs
if not args and not skwargs:
raise RuntimeError("get must have filter")
else:
context['result'] = cls.model.objects.get(*args,**skwargs)
@classmethod
def get(cls,**kwargs):
with Audit('{0} {1}'.format("get",cls.noun()),''):
context = {}
cls.security("query",([],kwargs),context=context)
# pre_hook is meant to work for customizations
cls.query_pre_hook("query",args,kwargs,context=context)
# The one that actualy does something
cls.get_do(args,kwargs,context=context)
# post_hook is meant to work for customizations
cls.query_post_hook("query",args,kwargs,context=context)
return context['result']
class DoctorAPI(baseBHV):
model = md.Doctor
BHV_Classes['Doctor'] = DoctorAPI
class PatientAPI(baseBHV):
model = md.Patient
BHV_Classes['Patient'] = PatientAPI
class InsurerAPI(baseBHV):
model = md.Insurer
BHV_Classes['Insurer'] = InsurerAPI
class ProcedureAPI(baseBHV):
model = md.Procedure
BHV_Classes['Procedure'] = ProcedureAPI
class MedicineAPI(baseBHV):
model = md.Medicine
BHV_Classes['Medicine'] = MedicineAPI
class StateAPI(baseBHV):
model = md.State
BHV_Classes['State'] = StateAPI
class CityAPI(baseBHV):
model = md.City
BHV_Classes['City'] = CityAPI
#
# BHV functions
#
def add(object_name,template):
return BHV_Classes[object_name].add(template)
def query(object_name,*args,**kwargs):
return BHV_Classes[object_name].query(args,kwargs)
def update(object_name,id,template):
return BHV_Classes[object_name].update(id,template)
| null |
medsoft_proj/medhist/modelapi.py
|
modelapi.py
|
py
| 8,081 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "medhist.session.current_session",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "medhist.session",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "medhist.session.current_session",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "medhist.session",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "medhist.session.current_session",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "medhist.session",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "medhist.models.SchemaModel",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "medhist.session.current_schema",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "medhist.session",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "medhist.models.SchemaModel",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "medhist.session.current_schema",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "medhist.session",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "medhist.models.SchemaModel",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "medhist.session.current_schema",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "medhist.session",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "medhist.models.Doctor",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "medhist.models.Patient",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "medhist.models.Insurer",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "medhist.models.Procedure",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "medhist.models.Medicine",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "medhist.models.State",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "medhist.models.City",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "medhist.models",
"line_number": 250,
"usage_type": "name"
}
] |
83444363
|
#!/usr/bin/env python3
# encoding: utf-8
import argparse
import errno
import os
import re
import requests
import sys
from timeit import default_timer as timer
import urllib
class Subreddit:
"""
Scrapes a subreddit and downloads the .jpg and .png images.
Attributes:
subreddit (str) : the subreddit to scrape.
type_sort (str) : a sort of the subreddit (default '').
url (str) : the url of the subreddit.
image_links (set) : the .jpg and .png links to images.
Methods:
__init__ : initializes class Subreddit.
__repr__ : returns a string of the image_links.
_get_image_links : scrapes a subreddit for .jpg and .png images.
download_images : downloads a subreddit's .jpg and .png images.
_is_image_sub : determines if a subreddit is an image sub.
Examples:
>>> foo = Subreddit("bar")
>>> foo.download_images()
>>> foo = Subreddit("bar", "baz")
>>> foo.download_images()
>>> foo = Subreddit("bar", type_sort="baz")
>>> foo.download_images()
"""
def __init__(self, subreddit, type_sort=''):
"""
Initializes Subreddit.
Args:
subreddit (str) : a subreddit to scrape.
type_sort (str) : a subreddit sort (default '').
Returns:
SEE: _get_image_links()
"""
self.subreddit = subreddit
NON_TIME_OPTION = ("hot", "new", "rising")
TIME_OPTION = ("top", "controversial")
# Reddit's default subreddit sort is "hot"
if not type_sort:
self.url = f"https://reddit.com/r/{subreddit}"
# Subreddit sorts that do not allow for time sort
# (i.e., past 24 hours, past week, past month, past year, all-time)
elif type_sort in NON_TIME_OPTION:
self.url = f"https://reddit.com/r/{subreddit}/{type_sort}"
elif type_sort in TIME_OPTION:
# All-time sort instead of past 24 hours, week, month, or year
self.url = f"https://reddit.com/r/{subreddit}/{type_sort}/?t=all"
else:
print("Not a valid type sort...\n" \
"Reverting to default Reddit sort...")
self.url = f"https://reddit.com/r/{subreddit}"
self._get_image_links()
def __repr__(self):
return str(self.image_links)
def _get_image_links(self):
"""
Scrapes a subreddit for its .jpg and .png url links.
Returns:
A set of image links if len(set) >= 5 else an empty set.
"""
resp = requests.get(self.url, headers={"User-Agent": "Piccit"})
# Could use BeautifulSoup to parse, but regex is faster.
string_pattern = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|"\
"(?:%[0-9a-fA-F][0-9a-fA-F]))+"
retrieved_links = re.findall(string_pattern, resp.text)
# Make sure only image files are downloaded (else, some random mp3
# and other file formats because of ads will download).
# Using set() instead of [] or list() removes need to handle any
# duplicate links.
self.image_links = set(link for link in retrieved_links
if len(link) <= 40 # longest links are about 35
and ("i.redd" in link or "imgur" in link)
and (".jpg" in link or ".png" in link))
if self._is_image_sub():
return self.image_links
else:
print(f"{self.subreddit} is either not primarily an image " \
"subreddit or it may not exist.")
return set()
def download_images(self):
"""
Downloads a subreddit's .jpg and .png images.
Returns:
None
"""
if not self.image_links:
return
images_processed = 0
start = timer()
if len(self.image_links) > 0:
Directory.create_directory(self)
for link in self.image_links:
url = urllib.parse.urlparse(link)
file_name = os.path.basename(url.path)
save_directory = Directory.get_current_directory(self)
complete_name = os.path.join(save_directory, file_name)
with open(complete_name, "wb") as file:
response = requests.get(link)
file.write(response.content)
images_processed += 1
_progress_bar(images_processed, len(self.image_links))
end = timer()
# new line so the finished download message is not on the same line
# as the progress bar
print(f"\nDownload complete. {images_processed} images downloaded." \
f"\nDownload time: {end-start : .3f} seconds.")
def _is_image_sub(self):
"""
Checks if a subreddit has 5 or more .jpg and .png images.
Subreddits that require age verification, subreddits that are
quaranteed or banned, and subreddits that do not exist count
as a non image subreddit.
Returns:
A bool for image_links <= 5.
"""
return len(self.image_links) >= 5
class Directory:
"""
Handles directory logic.
Methods:
create_directory : create a directory if it does not exist.
get_current_directory : return the current working directory.
Examples:
>>> Directory.create_directory()
>>> current_directory = Directory.get_current_directory()
"""
def create_directory(self):
"""
Creates a directory "PiccitPhotos" in the ~/Pictures directory.
Raises:
OSError: If the path does not exist.
Returns:
Directory if directory exists else None.
"""
try:
os.chdir("./Pictures")
except OSError as e:
if e.errno != e.errno.EEXIST:
sys.exit(e)
directory = "PiccitPhotos"
try:
if not os.path.isdir(directory):
os.mkdir(directory)
os.chdir(directory)
else:
os.chdir(directory)
return directory
except OSError as e:
if e.errno != e.errno.EEXIST:
sys.exit(e)
def get_current_directory(self):
"""
Gets the current working directory.
Returns:
The current working directory.
"""
return os.getcwd()
class Formatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def _progress_bar(start, end, bar_length=25):
"""
Outputs the progress of the download to the console.
Args:
start (int) : current number of downloaded photos
end (int) : total number of photos to be downloaded
bar_length (int) : length of bar displayed on the console
(default 25)
Raises:
ValueError: if start is greater than end
Returns:
None
"""
if int(start) > int(end):
raise ValueError("start must be less than or equal to end")
percent = float(start / end)
fill = "\u2588" * int(round(percent * bar_length)) # full block: █
space = " " * (bar_length - len(fill))
progress = fill + space
percent_complete = int(round(percent*100))
sys.stdout.write(f"\rProgress: |{progress}| {percent}")
sys.stdout.flush()
def main():
"""Handles the command line logic."""
description_message = "download .jpg and .png images on the " \
"subreddit's first page"
usage_message = "piccit.py subreddit_name [-h] [-t type_sort]"
parser = argparse.ArgumentParser(description=description_message,
usage=usage_message,
formatter_class=Formatter)
parser.add_argument("subreddit_name",
type=str,
help="a subreddit to scrape")
type_sort_help_message = "sort of subreddit with options: hot, new, " \
"rising, top, controversial"
parser.add_argument("-t", "--type_sort",
type=str,
default="hot",
help=type_sort_help_message)
args = parser.parse_args()
subreddit_images = Subreddit(args.subreddit_name, args.type_sort)
subreddit_images.download_images()
sys.exit(0)
if __name__ == '__main__':
main()
| null |
src/piccit.py
|
piccit.py
|
py
| 8,538 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 279,
"usage_type": "call"
}
] |
409823890
|
# -*- encoding: utf-8 -*- # Especificamos que nuestro archivo .py está codificado en UTF-8
#!/usr/bin/python
"""
Plataforma de experimentacion
SERGIO DE LA CRUZ GUTIERREZ
ALGORITMO MARTINELLI
Parametros:
Succion motor (50-100)
Duracion de la experimentacion (en muestras)
Tiempo de apertura de cada válvula (en segundos)
Sentencia de ejecucion: tgs2600martinelli.py succion muestras switch
"""
## Declaracion de librerias a utilizar.
import Adafruit_BBIO.ADC as ADC
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
import Adafruit_DHT
import sys
import os
import time
import math
import random
import numpy
from scipy import stats
from datetime import datetime, date
import _thread
## ASIGNACION DE PUERTOS.
electrovalve1 = 'P8_10' #Electrovalvula 1 (METANOL)
electrovalve2 = 'P8_12' #Electrovalvula 2 (ETANOL)
electrovalve3 = 'P8_14' #Electrovalvula 3 (BUTANOL)
electrovalve4 = 'P8_16' #Electrovalvula 4 (AIRE)
motorPin = 'P9_21' #Motor de succion
sensorPin555 = 'P9_12' #Sensor TGS2600 tras conversion con 555 (Puerto GPIO_60)
heatPin2600 = 'P9_14' #Calentamiento sensor TGS2600 (Puerto GPIO_50)
sensorTemp22 = Adafruit_DHT.DHT22
Temp22 = 'P8_11' #Pin de lectura de temperatura/Humedad con DHT22 (Puerto GPIO_45)
## DECLARACION DE ENTRADAS/SALIDAS DEL SISTEMA.
GPIO.setup("P8_10",GPIO.OUT)
GPIO.setup("P8_12",GPIO.OUT)
GPIO.setup("P8_14",GPIO.OUT)
GPIO.setup("P8_16",GPIO.OUT)
GPIO.setup("P9_12",GPIO.IN) #Salida 555
## DECLARACION DE VARIABLES DEL SISTEMA.
# Martinelli.
SAMPLESINICIO=1 #Capturas iniciales
heat2600=100 #Temperatura de calentamiento del sensor
x=[] #Almacena el numero de muestra
up=[] #Almacena el tiempo de los pulsos en estado alto
down=[] #Almacena el tiempo de los pulsos en estado bajo
tempTGS2600=[] #Almacena las temperaturas de calentamiento del sensor
duracion=[] #Almacena la duracion de cada pulso
duracion_k=[] #Almacena la duracion de k pulso
# Sensor TGS2600. Lectura ADC
concentTGS2600=[] #Almacena las muestras de odorante (lectura ADC)
SLEEP_ADC = 1
NM=10 #Numero lecturas ADC
T=0.1 #Las NM lecturas se hacen en T segundos
tsub=T/NM; #Subdivisiones de tiempo para las lecturas ADC
Rl_2600=27000 #Ohm
Vc=5 #V
# Measure temperatura y humedad
SLEEP_tyh = 59
## NOMBRE Y RUTA DE LOS FICHEROS DE SALIDA.
nameFile = "Martinelli_TGS2600.txt" #Fichero de informacion con parámetros y lecturas de la experimentacion
nameFile_data1 = "Martinelli_data1.dat" #Fichero de salida de datos, duracion de los pulsos Martinelli
nameFile_data2 = 'Martinelli_data2.dat' #Fichero da salida de datos, duracion de los k pulsos
nameFile_tyh = "TyH_TGS2600.dat" #Fichero de informacion de temperatura y humedad dela experimentacion
fileString = time.strftime("%a%d%b%Y-%HH%MM%SS", time.localtime())+'_' +nameFile
fileString_data1 = time.strftime("%a%d%b%Y-%HH%MM%SS", time.localtime())+'_' +nameFile_data1
fileString_data2 = time.strftime("%a%d%b%Y-%HH%MM%SS", time.localtime())+'_' +nameFile_data2
fileString_tyh = time.strftime("%a%d%b%Y-%HH%MM%SS", time.localtime())+'_' +nameFile_tyh
ahora=datetime.now()
ruta = "/media/microSD/FRECUENCIA/MARTINELLI/"+str(ahora.month)+"/"+str(ahora.day)+"/" #Ruta de salida
if not os.path.exists(ruta): os.makedirs(ruta) #Si la ruta no existe, se crea
ruta_fichero = ruta.strip() + str(fileString)
ruta_fichero_data1 = ruta.strip() + str(fileString_data1)
ruta_fichero_data2 = ruta.strip() + str(fileString_data2)
ruta_fichero_tyh = ruta.strip() + str(fileString_tyh)
## Creacion de ficheros del experimento.
g=open(ruta_fichero_data1, "w+") #fichero de datos del experimento, Duracion de cada pulso.
k=open(ruta_fichero_data2, "w+") #fichero que guarda la duracion de los 16 pulsos
h = open(ruta_fichero_tyh, "w+") #fichero que guarda la temperatura y humedad
## FUNCION de creacion del fichero salida sensor TGS2600 (modo escritura) y cabecera.
def file_TGS2600(vec_open_valve,succion,heat2600,SAMPLES):
f = open(ruta_fichero, "w+")
f.write ('/////////////////////////////////////////////////////////////////////\n')
f.write ('\n\nPlataforma de experimentacion: \n\n\n\n')
f.write ('Sensor TGS2600\n')
f.write ('Algoritmo MARTINELLI\n')
date=time.strftime("%a%d%b%Y-%HH%MM%SS", time.localtime())
f.write ('Fecha y hora de inicio: ' + str(date) + '\n')
f.write ('Ruta del fichero: ' + str(ruta) + '\n\n')
f.write ('Nombre del fichero: ' + str(fileString) + '\n')
f.write ('Nombre del fichero de datos 1: ' + str(fileString_data1) + '\n')
f.write ('Nombre del fichero de datos 2: ' + str(fileString_data2) + '\n\n')
f.write ('Nombre del fichero de TyH: ' + str(nameFile_tyh) + '\n')
f.write ('Electrovalvula (1-METANOL, 2-ETANOL, 3-BUTANOL, 4-AIRE ) \n\n')
f.write ('Conmutacion entre electrovalvulas: ' +str(vec_open_valve)+ '\n')
f.write ('\n')
f.write ('Parametros para la experimetacion, se introducen como argumentos \n')
f.write ('Succion motor (50-100) >>> '+ str(succion)+ '%\n')
f.write ('Duracion del experimento: ' +str(SAMPLES)+ ' muestras\n')
f.write ('/////////////////////////////////////////////////////////////////////\n\n')
f.write (' CAPTURA DE DATOS:\n\n')
## DECLARACION DE FUNCIONES DEL SISTEMA.
# Apertura electrovalvula.
def apertura(electrovalvula):
if electrovalvula ==1:
print ('electrovalvula 1\n')
GPIO.output(electrovalve1, GPIO.LOW)
GPIO.output(electrovalve2, GPIO.HIGH)
GPIO.output(electrovalve3, GPIO.HIGH)
GPIO.output(electrovalve4, GPIO.HIGH)
elif electrovalvula == 2:
print ('electrovalvula 2\n')
GPIO.output(electrovalve1, GPIO.HIGH)
GPIO.output(electrovalve2, GPIO.LOW)
GPIO.output(electrovalve3, GPIO.HIGH)
GPIO.output(electrovalve4, GPIO.HIGH)
elif electrovalvula == 3:
print ('electrovalvula 3\n')
GPIO.output(electrovalve1, GPIO.HIGH)
GPIO.output(electrovalve2, GPIO.HIGH)
GPIO.output(electrovalve3, GPIO.LOW)
GPIO.output(electrovalve4, GPIO.HIGH)
elif electrovalvula == 4:
print ('electrovalvula 4\n')
GPIO.output(electrovalve1, GPIO.HIGH)
GPIO.output(electrovalve2, GPIO.HIGH)
GPIO.output(electrovalve3, GPIO.HIGH)
GPIO.output(electrovalve4, GPIO.LOW)
def cerrarElectrovalvula():
GPIO.output(electrovalve1, GPIO.LOW)
GPIO.output(electrovalve2, GPIO.LOW)
GPIO.output(electrovalve3, GPIO.LOW)
GPIO.output(electrovalve4, GPIO.LOW)
# Arranque motor.
def motor_start(succion):
PWM.start(motorPin,succion)
# Funcion de medida de temperatura y humedad
def measure_tyh(tiempo):
j=0
time.sleep(9)
#Lectura humedad y temperatura
while (tiempo == True):
tick_HT = time.time()
humidity, temperature = Adafruit_DHT.read_retry(sensorTemp22, Temp22)
instante = datetime.now()
tack_HT=time.time()
t_HT=tack_HT-tick_HT
#Cuanto duerme en funcion de lo que tarde en H y T
if t_HT > SLEEP_tyh:
print ("Tiempo medicion H y T > SLEEP:", t_HT)
else:
print ("Tiempo medicion H y T:", t_HT)
if humidity is not None and temperature is not None:
print('\nSensor DHT22: ' +str(sensorTemp22))
print('>>> '+str(instante)+' Temp = ' +str(temperature)+ ' Humidity = ' +str(humidity)+'\n')
h = open(ruta_fichero_tyh, "a")
wline_h=str(instante)+' '+str(temperature)+' '+str(humidity)+'\n'
h.writelines(wline_h)
h.flush()
else :
print ('Failed to get reading, Try again!')
h = open(ruta_fichero_tyh, "a")
wline_h=str(instante)+' '+str(temperature)+' '+str(humidity)+'\n'
h.writelines(wline_h)
h.flush()
tack=time.time()
time.sleep(SLEEP_tyh-(tack-tick_HT))
j+=1
# Samplesinicio Martinelli 2600
def samplesinicio_martinelli_TGS2600():
count = 0
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de caida
#print '0\n'
ini_pulso = time.time()
while count <= 7 :
temperature_TGS2600=60
PWM.set_duty_cycle(heatPin2600, temperature_TGS2600)
value_up = GPIO.wait_for_edge(sensorPin555, GPIO.RISING) #Espera pulso de subida
ini_up = time.time()
time_down = ini_up - ini_pulso #Duracion del pulso en estado bajo
#print '1\n'
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de bajada
fin_pulso = time.time()
instante_captura=datetime.now()
time_up = fin_pulso - ini_up #Duracion del pulso en estado alto
time_pulso = fin_pulso-ini_pulso #Duracion del pulso completo
ini_pulso = time.time()
#print '0\n'
x.append(count)
up.append(time_up)
down.append(time_down)
duracion.append(time_pulso)
#Escritura por pantalla de la lectura
print ('Muestra Martinelli_ini['+str(count)+']\n>> t_up='+str(time_up)+' >> t_down: '+str(time_down)+ ' >> Durancion pulso: '+str(time_pulso))
print ('>> Heat: '+str(temperature_TGS2600)+' >> '+str(instante_captura)+'\n')
#Se escriben los datos en el fichero
f=open(ruta_fichero, "a")
g = open(ruta_fichero_data1, "a")
wline_g=str(count)+' '+str(time_up)+' '+str(time_down)+' '+str(time_pulso)+' '+str(temperature_TGS2600)+' '+str(instante_captura)+'\n'
wline_f='Muestra_ini['+str(count)+'] t_up='+str(time_up)+' t_down: '+str(time_down)+ ' Durancion pulso: '+str(time_pulso)+' Heat: '+str(temperature_TGS2600)+'>> '+str(instante_captura)+'\n'
g.writelines(wline_g)
g.flush()
f.writelines(wline_f)
f.flush()
count+=1
while count > 7 and count <= 15 :
temperature_TGS2600=100
PWM.set_duty_cycle(heatPin2600, temperature_TGS2600)
value_up = GPIO.wait_for_edge(sensorPin555, GPIO.RISING) #Espera pulso de subida
ini_up = time.time()
time_down = ini_up - ini_pulso #Duracion del pulso en estado bajo
#print '1\n'
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de bajada
fin_pulso = time.time()
instante_captura=datetime.now()
time_up = fin_pulso - ini_up #Duracion del pulso en estado alto
time_pulso = fin_pulso-ini_pulso #Duracion del pulso completo
ini_pulso = time.time()
#print '0\n'
x.append(count)
up.append(time_up)
down.append(time_down)
duracion.append(time_pulso)
#Escritura por pantalla de la lectura
print ('Muestra Martinelli_ini['+str(count)+']\n>> t_up='+str(time_up)+' >> t_down: '+str(time_down)+ ' >> Durancion pulso: '+str(time_pulso))
print ('>> Heat: '+str(temperature_TGS2600)+' >> '+str(instante_captura)+'\n')
#Se escriben los datos en el fichero
f=open(ruta_fichero, "a")
g = open(ruta_fichero_data1, "a")
wline_g=str(count)+'\t'+str(time_up)+'\t'+str(time_down)+'\t'+str(time_pulso)+'\t'+str(temperature_TGS2600)+'\t'+str(instante_captura)+'\n'
wline_f='Muestra_ini['+str(count)+'] t_up='+str(time_up)+' t_down: '+str(time_down)+ ' Durancion pulso: '+str(time_pulso)+' Heat: '+str(temperature_TGS2600)+'>> '+str(instante_captura)+'\n'
g.writelines(wline_g)
g.flush()
f.writelines(wline_f)
f.flush()
count+=1
# Martinelli TGS2600
def martinelli_TGS2600():
count = 0
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de bajada
#print '0\n'
ini_pulso = time.time()
while count <= 7 :
temperature_TGS2600=60
PWM.set_duty_cycle(heatPin2600, temperature_TGS2600)
value_up = GPIO.wait_for_edge(sensorPin555, GPIO.RISING) #Espera pulso de subida
ini_up = time.time()
time_down = ini_up - ini_pulso #Duracion del pulso en estado bajo
#print '1\n'
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de bajada
fin_pulso = time.time()
instante_captura=datetime.now()
time_up = fin_pulso - ini_up #Duracion del pulso en estado alto
time_pulso = fin_pulso-ini_pulso #Duracion del pulso completo
ini_pulso = time.time()
#print '0\n'
x.append(count)
up.append(time_up)
down.append(time_down)
duracion.append(time_pulso)
#Escritura por pantalla de la lectura
print ('Muestra Martinelli['+str(count)+']\n>> t_up='+str(time_up)+' >> t_down: '+str(time_down)+ ' >> Durancion pulso: '+str(time_pulso))
print ('>> Heat: '+str(temperature_TGS2600)+' >> '+str(instante_captura)+'\n')
#Se escriben los datos en el fichero
f=open(ruta_fichero, "a")
g = open(ruta_fichero_data1, "a")
wline_g=str(count)+'\t'+str(time_up)+'\t'+str(time_down)+'\t'+str(time_pulso)+'\t'+str(temperature_TGS2600)+'\t'+str(instante_captura)+'\n'
wline_f='Muestra['+str(count)+'] t_up='+str(time_up)+' t_down: '+str(time_down)+ ' Durancion pulso: '+str(time_pulso)+' Heat: '+str(temperature_TGS2600)+'>> '+str(instante_captura)+'\n'
g.writelines(wline_g)
g.flush()
f.writelines(wline_f)
f.flush()
count+=1
while count > 7 and count <= 15 :
temperature_TGS2600=100
PWM.set_duty_cycle(heatPin2600, temperature_TGS2600)
value_up = GPIO.wait_for_edge(sensorPin555, GPIO.RISING) #Espera pulso de subida
ini_up = time.time()
time_down = ini_up - ini_pulso #Duracion del pulso en estado bajo
#print '1\n'
value_down = GPIO.wait_for_edge(sensorPin555, GPIO.FALLING) #Espera pulso de bajada
fin_pulso = time.time()
instante_captura=datetime.now()
time_up = fin_pulso - ini_up #Duracion del pulso de subida
time_pulso = fin_pulso-ini_pulso #Duracion del pulso completo
ini_pulso = time.time()
#print '0\n'
x.append(count)
up.append(time_up)
down.append(time_down)
duracion.append(time_pulso)
#Escritura por pantalla de la lectura
print ('Muestra Martinelli['+str(count)+']\n>> t_up='+str(time_up)+' >> t_down: '+str(time_down)+ ' >> Durancion pulso: '+str(time_pulso))
print ('>> Heat: '+str(temperature_TGS2600)+' >> '+str(instante_captura)+'\n')
#Se escriben los datos en el fichero
f=open(ruta_fichero, "a")
g = open(ruta_fichero_data1, "a")
wline_g=str(count)+'\t'+str(time_up)+'\t'+str(time_down)+'\t'+str(time_pulso)+'\t'+str(temperature_TGS2600)+'\t'+str(instante_captura)+'\n'
wline_f='Muestra['+str(count)+'] t_up='+str(time_up)+' t_down: '+str(time_down)+ ' Durancion pulso: '+str(time_pulso)+' Heat: '+str(temperature_TGS2600)+'>> '+str(instante_captura)+'\n'
g.writelines(wline_g)
g.flush()
f.writelines(wline_f)
f.flush()
count+=1
# Cierre
def cierre():
PWM.stop(heatPin2600)
PWM.stop(motorPin)
PWM.cleanup()
_thread.exit()
cerrarElectrovalvula()
fecha_fin=datetime.now()
print('\nEXPERIMENTO FINALIZADO CON EXITO')
print ('Experimento terminado: '+str(fecha_fin))
f = open(ruta_fichero, "a")
f.write('\n\XPERIMENTO FINALIZADO CON EXITO\n')
f.write('Fecha y hora de fin de experimentacion: ' +str(fecha_fin))
f.flush()
f.close()
g.close()
h.close()
k.close()
## CUERPO DEL CODIGO.
def main():
if(len(sys.argv)<3):
print ('\n\nPARAMETROS INCORRECTOS. \n' \
'Los parametros deben ser:\n' \
'Succion motor (50-100) \n' \
'Duracion de la experimentacion (en muestras)\n'\
'Tiempo de apertura de cada válvula (en segundos)')
return 0
#SUCCION MOTOR: 1-100%
succion = float(sys.argv[1])
if succion > 100 or succion < 50:
print ('\n\nPARAMETRO MOTOR INCORRECTO.\n' \
'Los parametros deben ser:\n' \
'Succion motor (50-100) \n' \
'Duracion de la experimentacion (en muestras)\n'\
'Tiempo de apertura de cada válvula (en segundos)')
return 0
else :
print ('Succion motor al ' +str(succion)+ '% Motor Pin PWM : ' +motorPin)
#DURACION DEL EXPERIMENTO
SAMPLES = float(sys.argv[2])
if SAMPLES < 1:
print ('\n\nPARAMETRO TIEMPO EXPERIMENTACION.\n' \
'Los parametros deben ser:\n' \
'Succion motor (50-100) \n' \
'Duracion de la experimentacion (en muestras)\n'\
'Tiempo de apertura de cada válvula (en segundos)')
return 0
else :
print ('Duracion de la experimentacion ' +str(SAMPLES)+ ' muestras')
#TIEMPO DE APERTURA DE CADA VÁLVULA
conmutacion = float(sys.argv[3])
if conmutacion < SAMPLES:
print ('\n\nPARAMETRO TIEMPO EXPERIMENTACION.\n' \
'Los parametros deben ser:\n' \
'Succion motor (50-100) \n' \
'Duracion de la experimentacion (en muestras)\n')
return 0
else :
print ('Tiempo de apertura de cada electrovalvula ' +str(conmutacion)+ ' segundos\n')
#Se calcula de forma aleatoria la conmutacion de electrovalvulas
vec_open_valve = numpy.random.randint(4,5,SAMPLES)
print ('Conmutacion entre electrovalvulas: ' +str(vec_open_valve)+ '\n')
#Llamada al thread de medida de temperatura y humedad
_thread.start_new_thread(measure_tyh, (True,))
#Se realiza el Setup de los puertos ADC, PWM, GPIO
motor_start(succion)
ADC.setup()
PWM.start(heatPin2600,heat2600,20000,0)
#Se crea el fichero de informacion de la experimentacion
file_TGS2600(vec_open_valve,succion,heat2600,SAMPLES)
print ('\nComienza la adquisicion. Muestras inciales: ' +str(SAMPLESINICIO)+ '\n\n')
f = open(ruta_fichero, "a")
f.write('\n\nComienza la adquisicion. Muestras inciales: ' +str(SAMPLESINICIO)+ '\n\n')
f.flush()
#Calentamiento del sensor, se toman SAMPLESINICIO medidas antes de comenzar la experimentacion
i=1
while i <= SAMPLESINICIO:
time_martinelli_ini = time.time()
samplesinicio_martinelli_TGS2600()
time_martinelli_fin = time.time()
instante_captura=datetime.now()
time_martinelli = time_martinelli_fin-time_martinelli_ini #Duracion de los k=16 pulsos
duracion_k.append(time_martinelli)
print ('\nSamplesinicio_Iteracion['+str(i)+'] Duracion de los k=16 pulsos: '+str(time_martinelli)+' >> '+str(instante_captura)+'\n')
wline_f = 'Samplesinicio_Iteracion['+str(i)+'] Duracion de los k=16 pulsos: '+str(time_martinelli)+' >> '+str(instante_captura)+'\n'
wline_k = str(i)+' '+str(time_martinelli)+' '+str(instante_captura)+'\n'
f = open(ruta_fichero, "a")
k = open(ruta_fichero_data2, "a")
f.writelines(wline_f)
k.writelines(wline_k)
f.flush()
k.flush()
i+=1
#Comienza la experimentacion
print ('\n\nSE VAN A CAPTURAR: ' +str(SAMPLES)+' muestras\n\n')
f.write('\n\nSE VAN A CAPTURAR: ' +str(SAMPLES)+' muestras\n\n')
f.flush()
iteracion = 0
while iteracion <= SAMPLES-1:
electrovalvula = vec_open_valve[iteracion]
if electrovalvula == 1:
print ('ELECTROVALVULA: '+str(electrovalvula)+' METANOL\n')
f.write ('ELECTROVALVULA: '+str(electrovalvula)+' METANOL\n')
f.flush()
apertura(electrovalvula)
elif electrovalvula == 2:
print ('ELECTROVALVULA: '+str(electrovalvula)+' ETANOL\n')
f.write ('ELECTROVALVULA: '+str(electrovalvula)+' ETANOL\n')
f.flush()
apertura(electrovalvula)
elif electrovalvula == 3:
print ('ELECTROVALVULA: '+str(electrovalvula)+' BUTANOL\n')
f.write ('ELECTROVALVULA: '+str(electrovalvula)+' BUTANOL\n')
f.flush()
apertura(electrovalvula)
tiempo_valve = 0
while tiempo_valve <= conmutacion:
time_martinelli_ini = time.time()
martinelli_TGS2600()
time_martinelli_fin = time.time()
instante_captura=datetime.now()
time_martinelli = time_martinelli_fin-time_martinelli_ini #Duracion de los k=16 pulsos
duracion_k.append(time_martinelli)
print ('\nIteracion['+str(iteracion)+'] Duracion de los k=16 pulsos: '+str(time_martinelli)+' >> '+str(instante_captura)+'\n')
wline_f = 'Iteracion['+str(iteracion)+'] Duracion de los k=16 pulsos: '+str(time_martinelli)+' >> '+str(instante_captura)+'\n'
wline_k = str(iteracion)+' '+str(time_martinelli)+' '+str(instante_captura)+'\n'
f = open(ruta_fichero, "a")
k = open(ruta_fichero_data2, "a")
f.writelines(wline_f)
k.writelines(wline_k)
f.flush()
k.flush()
tiempo_valve += time_martinelli
print ('Tiempo acumulado = '+str(tiempo_valve))
iteracion +=1
#Terminar
cierre()
return 0
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print ('Interrupted')
cierre()
sys.exit(0)
| null |
Ficheros Carlos/tgs2600martinelli.py
|
tgs2600martinelli.py
|
py
| 19,579 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Adafruit_DHT.DHT22",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.setup",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.OUT",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.setup",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.OUT",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.setup",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.OUT",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.setup",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.OUT",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.setup",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.IN",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 127,
"usage_type": "argument"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.HIGH",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.GPIO.output",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.LOW",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "Adafruit_BBIO.PWM.start",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "Adafruit_DHT.read_retry",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM.set_duty_cycle",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.RISING",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM.set_duty_cycle",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.RISING",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM.set_duty_cycle",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.RISING",
"line_number": 330,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM.set_duty_cycle",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.RISING",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO.wait_for_edge",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.GPIO",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.GPIO.FALLING",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM.stop",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.PWM.stop",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.PWM.cleanup",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "_thread.exit",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 442,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "_thread.start_new_thread",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.ADC.setup",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.ADC",
"line_number": 488,
"usage_type": "name"
},
{
"api_name": "Adafruit_BBIO.PWM.start",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "Adafruit_BBIO.PWM",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 549,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 582,
"usage_type": "call"
}
] |
350662447
|
from __future__ import absolute_import
from __future__ import print_function
from keras import objectives
from keras.models import model_from_json
from keras.optimizers import SGD, RMSprop, Adam, Adamax , Nadam
from keras.callbacks import *
from keras.utils.generic_utils import slice_arrays
from .callbacks import *
from .constraints import Clip
from .layers.core import *
from .layers.masking import *
from .layers.sampling import *
from .layers.cov import *
from .layers.pooling import *
from .layers.scoring import *
from .layers.tensor_manipulation import *
from .layers.feats import *
def get_keras_custom_obj():
custom_obj = {
'Bias': Bias,
'Constant': Constant,
'TiledConstant': TiledConstant,
'ConstTriu': ConstTriu,
'TiledConstTriu': TiledConstTriu,
'Invert': Invert,
'Exp': Exp,
'ExpTaylor': ExpTaylor,
'Log': Log,
'NegLog': NegLog,
'NegSoftplus': NegSoftplus,
'Add1': Add1,
'Add01': Add01,
'Log1': Log1,
'NegLog1': NegLog1,
'Repeat': Repeat,
'CreateMask': CreateMask,
'GlobalMaskedMaxPooling1D': GlobalMaskedMaxPooling1D,
'GlobalMaskedAveragePooling1D': GlobalMaskedAveragePooling1D,
'GlobalWeightedAveragePooling1D': GlobalWeightedAveragePooling1D,
'GlobalWeightedSumPooling1D': GlobalWeightedSumPooling1D,
'GlobalWeightedMeanStdPooling1D': GlobalWeightedMeanStdPooling1D,
'GlobalWeightedMeanLogVarPooling1D': GlobalWeightedMeanLogVarPooling1D,
'GlobalSumPooling1D': GlobalSumPooling1D,
'GlobalSumWeights': GlobalSumWeights,
'LDE1D': LDE1D,
'GlobalNormalDiagCovPostStdPriorPooling1D': GlobalNormalDiagCovPostStdPriorPooling1D,
'GlobalDiagNormalPostStdPriorPooling1D': GlobalDiagNormalPostStdPriorPooling1D,
'GlobalProdRenormDiagNormalStdPrior': GlobalProdRenormDiagNormalStdPrior,
'GlobalProdRenormDiagNormalStdPrior2': GlobalProdRenormDiagNormalStdPrior2,
'GlobalProdRenormDiagNormalStdPrior3': GlobalProdRenormDiagNormalStdPrior3,
'GlobalProdRenormDiagNormalCommonCovStdPrior': GlobalProdRenormDiagNormalCommonCovStdPrior,
'GlobalProdRenormDiagNormalConstCovStdPrior': GlobalProdRenormDiagNormalConstCovStdPrior,
'GlobalProdRenormDiagNormalConstCovStdPrior2': GlobalProdRenormDiagNormalConstCovStdPrior2,
'GlobalProdRenormDiagNormalConstCovStdPrior3': GlobalProdRenormDiagNormalConstCovStdPrior3,
'GlobalProdRenormDiagNormalConstCovStdPrior4': GlobalProdRenormDiagNormalConstCovStdPrior4,
'GlobalProdRenormNormalConstCovStdPrior': GlobalProdRenormNormalConstCovStdPrior,
'MultConstDiagCov': MultConstDiagCov,
'MultConstDiagCovStdPrior': MultConstDiagCovStdPrior,
'MultConstCovStdPrior': MultConstCovStdPrior,
'BernoulliSampler': BernoulliSampler,
'NormalDiagCovSampler': NormalDiagCovSampler,
'DiagNormalSampler': DiagNormalSampler,
'DiagNormalSamplerFromSeqLevel': DiagNormalSamplerFromSeqLevel,
'CatQScoringDiagNormalPostStdPrior': CatQScoringDiagNormalPostStdPrior,
'CatQScoringDiagNormalHomoPostStdPrior': CatQScoringDiagNormalHomoPostStdPrior,
'Repeat': Repeat,
'ExpandAndTile': ExpandAndTile,
'Clip': Clip,
'DCT': DCT,
'MelFB': MelFB,
'Liftering': Liftering}
return custom_obj
def load_model_arch(file_path):
return model_from_json(open(file_path,'r').read(), get_keras_custom_obj())
def save_model_arch(file_path, model):
open(file_path,'w').write(model.to_json())
# def filter_optimizer_args(**kwargs):
# return dict((k, kwargs[k])
# for k in ('opt_type', 'lr', 'momentum', 'decay',
# 'rho', 'epsilon', 'beta_1', 'beta_2',
# 'clipnorm', 'clipvalue') if k in kwargs)
# def create_optimizer(opt_type, lr, momentum=0, decay=0.,
# rho=0.9, epsilon=0., beta_1=0.9, beta_2=0.999,
# clipnorm=10, clipvalue=100):
# if opt_type == 'sgd':
# return SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False,
# clipnorm=clipnorm, clipvalue=clipvalue)
# if opt_type == 'nsgd':
# return SGD(lr=lr, momentum=momentum, decay=decay, nesterov=True,
# clipnorm=clipnorm, clipvalue=clipvalue)
# if opt_type == 'rmsprop':
# return RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=decay,
# clipnorm=clipnorm, clipvalue=clipvalue)
# if opt_type == 'adam':
# return Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
# decay=decay,
# clipnorm=clipnorm, clipvalue=clipvalue)
# if opt_type == 'nadam':
# return Nadam(lr=lr, beta_1=beta_1, beta_2=beta_2,
# epsilon=epsilon, schedule_decay=decay,
# clipnorm=clipnorm, clipvalue=clipvalue)
# if opt_type == 'adamax':
# return Adamax(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
# decay=decay,
# clipnorm=clipnorm, clipvalue=clipvalue)
# def filter_callbacks_args(**kwargs):
# return dict((k, kwargs[k])
# for k in ('save_best_only', 'mode',
# 'monitor', 'patience', 'min_delta',
# 'lr_steps', 'lr_patience', 'lr_factor',
# 'min_lr', 'log_append') if k in kwargs)
# def create_basic_callbacks(model, file_path, save_best_only=True, mode='min',
# monitor = 'val_loss', patience=None, min_delta=1e-4,
# lr_steps = None,
# lr_patience = None, lr_factor=0.1, min_lr=1e-5,
# log_append=False):
# if save_best_only == True:
# file_path_model = file_path + '/model.best'
# else:
# file_path_model = file_path + '/model.{epoch:04d}'
# cb = HypModelCheckpoint(model, file_path_model, monitor=monitor, verbose=1,
# save_best_only=save_best_only,
# save_weights_only=False, mode=mode)
# cbs = [cb]
# file_path_csv = file_path + '/train.log'
# cb = CSVLogger(file_path_csv, separator=',', append=log_append)
# cbs.append(cb)
# if patience is not None:
# cb = EarlyStopping(monitor=monitor, patience=patience,
# min_delta=min_delta, verbose=1, mode=mode)
# cbs.append(cb)
# if lr_steps is not None:
# cb = LearningRateSteps(lr_steps)
# cbs.append(cb)
# if lr_patience is not None:
# cb = ReduceLROnPlateau(monitor=monitor,
# factor=lr_factor, patience=lr_patience,
# verbose=1, mode=mode, epsilon=min_delta,
# cooldown=0, min_lr=min_lr)
# cbs.append(cb)
# return cbs
def weighted_objective_per_sample(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
axis=list(range(1, weight_ndim))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(weights, axis=axis, keepdims=True)
return K.mean(score_array, axis=axis)
return weighted
def make_eval_function(model, loss, loss_weights=None, **kwargs):
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(model.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in model.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(model.output_names))
loss_weights_list = []
for name in model.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(model.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(model.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) +
' - expected a list of dicts.')
# prepare loss functions
if isinstance(loss, dict):
for name in loss:
if name not in model.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(model.output_names))
loss_functions = []
for name in model.output_names:
if name not in loss:
raise ValueError('Output "' + name +
'" missing from loss dictionary.')
loss_functions.append(objectives.get(loss[name]))
elif isinstance(loss, list):
if len(loss) != len(model.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(model.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(model.outputs))]
weighted_losses = [weighted_objective_per_sample(fn) for fn in loss_functions]
# compute total loss
total_loss = None
for i in range(len(model.outputs)):
y_true = model.targets[i]
y_pred = model.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = model.sample_weights[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred, sample_weight)
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = model.inputs + model.targets + model.sample_weights + [K.learning_phase()]
else:
inputs = model.inputs + model.targets + model.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
eval_function = K.function(inputs,
[total_loss],
updates=model.state_updates,
**kwargs)
return eval_function
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
num_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batch)]
def _eval_loop(f, ins, batch_size=32):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
num_sample = ins[0].shape[0]
outs = []
batches = make_batches(num_sample, batch_size)
index_array = np.arange(num_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(np.zeros((num_sample,)))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_ids] = batch_out
else:
if batch_index == 0:
outs.append(np.zeros((num_sample,)))
outs[i][batch_ids] = batch_out
if len(outs) == 1:
return outs[0]
return outs
def eval_loss(model, loss_function, x, y, batch_size=32, sample_weight=None):
x, y, sample_weights = model._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if model.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
return _eval_loop(loss_function, ins, batch_size=batch_size)
| null |
hyperion/keras/keras_utils.py
|
keras_utils.py
|
py
| 14,051 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "constraints.Clip",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "keras.models.model_from_json",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "keras.objectives.get",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "keras.objectives",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "keras.objectives.get",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "keras.objectives",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "keras.objectives.get",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "keras.objectives",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "keras.utils.generic_utils.slice_arrays",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "keras.utils.generic_utils.slice_arrays",
"line_number": 318,
"usage_type": "call"
}
] |
329688417
|
import sys
import pyric.pyw as pyw
from colorama import Fore, Style
def interface_command(interface, verbose):
faces = pyw.winterfaces() if interface == "all" else [interface]
for face in faces:
if face not in pyw.winterfaces():
sys.exit(f"{face} is not an interface")
print(f"{Fore.GREEN}Interfaces:{Fore.YELLOW}")
for interface in faces:
face = pyw.getcard(interface)
up = Fore.YELLOW if pyw.isup(face) else Fore.RED
print(f" {up}{interface:<10} {Style.RESET_ALL}")
if verbose >= 1:
iinfo = pyw.ifinfo(face)
for i in iinfo:
print(
f"\t{i.title():<15} {Fore.CYAN}{iinfo[i]}{Style.RESET_ALL}"
)
if verbose >= 2:
dinfo = pyw.devinfo(face)
for d in dinfo:
print(
f"\t{d.title():<15} {Fore.CYAN}{dinfo[d]}{Style.RESET_ALL}"
)
if verbose >= 3:
pinfo = pyw.phyinfo(face)
for p in pinfo:
if type(pinfo[p]) == list:
print(
f"\t{p.title():<15} {Fore.CYAN}{', '.join(pinfo[p])}{Style.RESET_ALL}"
)
elif p == "bands":
print(
f"\t{p.title():<15} {Fore.CYAN}{', '.join(pinfo[p].keys())}{Style.RESET_ALL}"
)
| null |
boop/tools/interfaces.py
|
interfaces.py
|
py
| 1,422 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyric.pyw.winterfaces",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pyric.pyw.winterfaces",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "colorama.Fore.GREEN",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pyric.pyw.getcard",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pyric.pyw.isup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyric.pyw.ifinfo",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyric.pyw.devinfo",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pyric.pyw.phyinfo",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pyric.pyw",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 41,
"usage_type": "name"
}
] |
45950816
|
from sklearn.model_selection import train_test_split
import datetime
class Timer:
def __init__(self):
self.start_time = datetime.datetime.now()
def start(self):
self.start_time = datetime.datetime.now()
def stop(self):
interval = datetime.datetime.now() - self.start_time
return interval
def getClassificationErrorRate(result, y_test):
lines = len(result)
wrong = 0
index = []
for i in range(lines):
if(result[i] != y_test[i]):
wrong += 1
index.append(i)
error_rate = wrong / lines
return error_rate, index
def getRegressionErrorRate(result, y_test):
lines = len(result)
gap = []
for i in range(lines):
gap.append(abs(result[i] - y_test[i]))
return sum(gap) / sum(result)
def crossValidation(model, data, target, times=1, test_size=0.3, random_state=1):
rate_list = []
for i in range(times):
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=test_size, random_state=random_state + i)
model.train(X_train, y_train)
result = model.test(X_test)
rate = getClassificationErrorRate(result,y_test)
rate_list.append(rate[0])
return sum(rate_list)/len(rate_list), rate_list
| null |
TestTools.py
|
TestTools.py
|
py
| 1,298 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 43,
"usage_type": "call"
}
] |
633101333
|
# applying all functions to solve integrals
import numpy as np
import matplotlib.pyplot as plt
import time
import copy
from data import aero_data, grid, f100, transpose, nodes_z, nodes_x, times_z
from integrator import def_integral, indef_integral
from interpolator import spline_coefficient, spline_interpolator
from interpolator import cubic_interpolator, cubic_coefficients
import matplotlib.cm as cm
""" This calculates the n'th integral (with minimum of n=1). It is structured so that the program first calculates the definite integral from z=0 till z=C_a= -0.505.
Then, it calculates the indefinite integral along dx. The n'th integral (if n>=2) will than be the definite integral for x=0 till x=l_a=1.611
res is the resolution. Higher value = more accurate, but longer runtime """
def integral_z(n, x_final=1.611, z_sc=None, res=1000):
# --------------------- input data --------------------------------
newgrid = copy.deepcopy(grid)
""" boundaries of the integration """
x1, x2 = 0, 1.611
z1, z2 = 0, 0.505
if z_sc != None:
aero_data_z = times_z(aero_data, nodes_z, z_sc)
newgrid = transpose(aero_data_z)
coord_sys=(1.611,0,-0.505,0)
plt.imshow(transpose(newgrid), extent=coord_sys,interpolation='nearest', cmap=cm.gist_rainbow)
plt.colorbar()
plt.show()
# ------------------ main program ---------------------------
start_time = time.time() # to calculate runtime of the program
""" The program can only calculate integrals of functions, not matrixes or wathever.
This function can only have one variable as input: x-value. It also outputs only one value: y-value (=interpolated aero_data)
The following defenitinion makes such a function that can later be used in the integral"""
def cubic_function(x):
y = cubic_interpolator(matrix, nodes,row, x)
return y
""" the function 'spline_coefficient(nodes,row)' converts an array of x-values (=nodes) and an array of y-values (=column of the aero_data) into a matrix. This matrix is necessary to use the function 'spline_interpolator'. (see interpolation file for explenation) """
nodes = nodes_z
solution = []
for row in newgrid:
matrix = cubic_coefficients(nodes, row)
""" This calculates the definite integral from z1 to z2 of 'function' """
a = def_integral(cubic_function, z1, z2, res)
solution.append(a)
""" The result is a 1D array of data corresponding to the values of the definite integrals of interpolated columns of the aero_data """
""" This can be used to check the results for when n=1 """
if n == 1:
x = np.linspace(0, 1.611, len(solution))
plt.xlabel('x-axis')
plt.plot(x, solution)
plt.show()
return solution
nodes = nodes_x
if n == 2:
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = def_integral(cubic_function, x1, x_final, res)
else:
for i in range(n - 2):
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = indef_integral(cubic_function, x1, x2, res)
nodes = np.linspace(x1, x2, len(solution))
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = def_integral(cubic_function, x1, x_final, res)
end_time = time.time()
run_time = end_time - start_time # print run_time to see the time it took the program to compute
return solution
def integral_x(n, res=1000):
newgrid = copy.deepcopy(aero_data)
x1, x2 = 0, 1.611
z1, z2 = 0, 0.505
def cubic_function(x):
y = cubic_interpolator(matrix, nodes, row, x)
return y
nodes = nodes_x
solution = []
for row in newgrid:
matrix = cubic_coefficients(nodes, row)
a = def_integral(cubic_function, x1, x2, res)
solution.append(a)
if n == 1:
x = np.linspace(0, 1.611, len(solution))
plt.xlabel('z-axis')
plt.plot(x, solution)
plt.show()
return solution
nodes = nodes_z
if n == 2:
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = def_integral(cubic_function, z1, z2, res)
else:
for i in range(n - 2):
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = indef_integral(cubic_function, z1, z2, res)
nodes = np.linspace(z1, z2, len(solution))
row = solution
matrix = cubic_coefficients(nodes, solution)
solution = def_integral(cubic_function, z1, z2, res)
return solution
| null |
Code/Verification integration/COMBINED.py
|
COMBINED.py
|
py
| 4,813 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "copy.deepcopy",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "data.grid",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "data.times_z",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "data.aero_data",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "data.nodes_z",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "data.transpose",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "data.transpose",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm.gist_rainbow",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "interpolator.cubic_interpolator",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "data.nodes_z",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "data.nodes_x",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "integrator.indef_integral",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "data.aero_data",
"line_number": 88,
"usage_type": "argument"
},
{
"api_name": "interpolator.cubic_interpolator",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "data.nodes_x",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "data.nodes_z",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "integrator.indef_integral",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "interpolator.cubic_coefficients",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "integrator.def_integral",
"line_number": 125,
"usage_type": "call"
}
] |
553081879
|
# code your activity here
# or replace this file with your python activity file
# import statements
"""
class exampleActivity(activity.Activity):
"""
# Sugar Imports
from sugar3.activity.activity import Activity
from sugar3.activity.widgets import StopButton
from sugar3.activity.widgets import ActivityButton
from PIL import Image
# Gtk Import
from gi.repository import Gtk
from gettext import gettext as _
class Example(Activity):
def __init__(self, sugar_handle):
Activity.__init__(self, sugar_handle)
# Create a Toolbar
toolbar = Gtk.Toolbar()
# Add toolbar to Sugar Activity Toolbar Space
self.set_toolbar_box(toolbar)
# Add Activity Button
toolbar.insert(ActivityButton(self), -1)
# Create & Add Separator
separator = Gtk.SeparatorToolItem(draw=False)
separator.set_expand(True)
toolbar.insert(separator, -1)
# Add Stop Button
toolbar.insert(StopButton(self), -1)
# Create Container
grid = Gtk.Grid()
# Add grid to Sugar Activity GtkWindow
self.set_canvas(grid)
# Create & Add Label
label = Gtk.Label(label=_("Weather: "))
grid.attach(label, 0, 0, 1, 1)
# Add Output Label
output = Gtk.Label()
grid.attach(output, 0, 6, 1, 1)
# Create & Add Text Entry x2
entry = Gtk.Entry()
grid.attach(entry, 0, 1, 1, 1)
entry2 = Gtk.Entry()
grid.attach(entry2, 0, 2, 1, 1)
# Empty output on keypress in entry
entry.connect('key-release-event', self.emptyout, output)
entry2.connect('key-release-event', self.emptyout, output)
# Add buttons
sunnyButton = Gtk.Button(label=_("Sunny"))
grid.attach(sunnyButton, 0, 3, 1, 1)
cloudyButton = Gtk.Button(label=_("Cloudy"))
grid.attach(cloudyButton, 1, 3, 1, 1)
rainyButton = Gtk.Button(label=_("Rainy"))
grid.attach(rainyButton, 2, 3, 1, 1)
snowyButton = Gtk.Button(label=_("Snowy"))
grid.attach(snowyButton, 3, 3, 1, 1)
# Tell the buttons to run a class method
sunnyButton.connect('clicked', self.showWeather, "Sunny", entry, entry2, output)
cloudyButton.connect('clicked', self.showWeather, "Cloudy", entry, entry2, output)
rainyButton.connect('clicked', self.showWeather, "Rainy", entry, entry2, output)
snowyButton.connect('clicked', self.showWeather, "Snowy", entry, entry2, output)
# Show all components (otherwise none will be displayed)
self.show_all()
def greeter(self, button, entry, entry2, output):
if len(entry.get_text()) > 0:
output.set_text("WEATHER TODAY IS: \n" + entry.get_text() + "\n" + entry2.get_text())
else:
output.set_text("Enter the weather.")
def showWeather(self, button, state, entry, entry2, output):
image = Image.open("activity/art/HotSun.png")
output.set_text("Weather State is: " + state + ". " + "Temperature is " + entry.get_text() + ". Humidity is " + entry2.get_text())
def emptyout(self, entry, entry2, event, output):
output.set_text("")
| null |
activity.py
|
activity.py
|
py
| 3,237 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sugar3.activity.activity.Activity",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sugar3.activity.activity.Activity.__init__",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sugar3.activity.activity.Activity",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Toolbar",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sugar3.activity.widgets.ActivityButton",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.SeparatorToolItem",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sugar3.activity.widgets.StopButton",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Grid",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "gettext.gettext",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Entry",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Entry",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "gettext.gettext",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "gettext.gettext",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "gettext.gettext",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "gettext.gettext",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 96,
"usage_type": "name"
}
] |
14500492
|
# URL Handler for the website
from django.conf.urls import url,include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('Personal.urls')),
url(r'^home/', include('Personal.urls')),
url(r'^learn/', include('Learn.urls')),
url(r'^questions/', include('Questions.urls')),
#url(r'^interviews/', include('Interviews.urls')),
url(r'^qa/', include('qa.urls')),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
]
| null |
codesquest/urls.py
|
urls.py
|
py
| 524 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 22,
"usage_type": "call"
}
] |
558537848
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from .models import Category
# Create your tests here.
class CategoryMethodTests(TestCase):
def test_ensure_views_are_positive(self):
cat = Category(name='test', views=-1, likes=0)
cat.save()
self.assertEqual((cat.views >= 0), True)
def test_slug_line_creation(self):
cat = Category(name='Random Category String')
cat.save()
self.assertEqual(cat.slug, 'random-category-string')
class IndexViewTests(TestCase):
def test_index_view_with_no_category(self):
response = self.client.get(reverse('rango:rango_default'))
self.assertEqual(response.status_code, 200)
# self.assertContains(response, "There are no categories present.")
# self.assertQuerySetEqual(response.context['categories'], [])
# def test_index_view_with_category(self):
# add_cat('test1', 1, 1)
# add_cat('test2', 2, 2)
# add_cat('test3', 3, 3)
# add_cat('test4', 4, 4)
# response = self.client.get(reverse('rango:rango_default'))
# self.assertEqual(response.status_code, 200)
# self.assertContains(response, "test")
| null |
project_rango/apps/rango/tests.py
|
tests.py
|
py
| 1,223 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Category",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 21,
"usage_type": "call"
}
] |
547973984
|
# Authors: Hagen Blix
# Script for generating sentences with collective predicates
# import pattern.en
# from pattern.en import conjugate as pconj
from utils.conjugate2 import *
from utils.string_utils import remove_extra_whitespace
from random import choice
import numpy as np
import os
# initialize output file
rel_output_path = "outputs/plurals/environment=collectivepredicates.tsv"
project_root = "/".join(os.path.join(os.path.dirname(os.path.abspath(__file__))).split("/")[:-2])
output = open(os.path.join(project_root, rel_output_path), "w")
# set total number of paradigms to generate
number_to_generate = 2000
sentences = set()
# gather word classes that will be accessed frequently
all_irregular_nouns = get_all_conjunctive([("category", "N"), ("irrpl", "1")])
# all_irregular_nouns_pl = get_all("pl", "1", all_irregular_nouns)
all_regular_nouns = get_all_conjunctive([("category", "N"), ("irrpl", "")])
all_regular_nouns_sg = get_all("sg", "1", all_regular_nouns)
all_regular_nouns_animate = get_all("animate", "1", all_regular_nouns_sg)
all_regular_nouns_inanimate = get_all("animate", "0", all_regular_nouns_sg)
# all_regular_nouns_pl = get_all("pl", "1", all_regular_nouns)
all_coll_pred = get_all("category_2", "IV_ag_pl")
all_ncoll_pred = get_all("category_2", "IV_ag")
while len(sentences) < number_to_generate/3:
Nirr_sg = choice(all_irregular_nouns)
while Nirr_sg["sgequalspl"] == "1": # Exclude sg=pl nouns
Nirr_sg = choice(all_irregular_nouns)
Nirr_pl = Nirr_sg.copy()
Nirr_pl[0] = Nirr_pl["pluralform"]
Nirr_pl["sg"] = 0
Nirr_pl["pl"] = 1
if Nirr_sg["animate"] == "1":
Nreg_sg = choice(all_regular_nouns_animate)
while " " in Nreg_sg:
Nreg_sg = choice(all_regular_nouns_animate)
else:
Nreg_sg = choice(all_regular_nouns_inanimate)
while " " in Nreg_sg:
Nreg_sg = choice(all_regular_nouns_inanimate)
Nreg_pl = Nreg_sg.copy()
Nreg_pl[0] = pattern.en.pluralize(Nreg_pl[0])
Nreg_pl["sg"] = 0
Nreg_pl["pl"] = 1
# Apparently this isn't coded?
# coll_pred = choice(get_matched_by(Nirr_sg, "arg_1", all_coll_pred))
# ncoll_pred = choice(get_matched_by(Nirr_sg, "arg_1", all_ncoll_pred))
coll_pred = choice(all_coll_pred)
ncoll_pred = choice(all_ncoll_pred)
while " " in ncoll_pred[0]:
ncoll_pred = choice(all_ncoll_pred) # Avoid things I can't inflect
# TODO Doesn't match the noun and the verb for animacy etc?
# TODO: You might want to exhaust the list of irregular nouns?
# Determiners (just strings):
definiteness = np.random.choice([True, False])
if definiteness:
# Definites:
det_def_abstract = np.random.choice([1, 2, 3], p=[0.9, 0.05, 0.05])
if det_def_abstract == 1:
Dreg_sg = "the"
Dirr_sg = "the"
Dreg_pl = "the"
Dirr_pl = "the"
elif det_def_abstract == 2:
Dreg_sg = "this"
Dirr_sg = "this"
Dreg_pl = "these"
Dirr_pl = "these"
elif det_def_abstract == 3:
Dreg_sg = "that"
Dirr_sg = "that"
Dreg_pl = "those"
Dirr_pl = "those"
else:
# Indefinites:
det_indef_abstract = np.random.choice([True, False], p=[0.85, 0.15]) # True = indef article, False = some
if det_indef_abstract:
Dreg_pl = ""
try:
if Nreg_sg["start_with_vowel"] == 1:
Dreg_sg = "an"
else:
Dreg_sg = "a"
except:
if Nreg_sg[0][0] in ["a", "e", "i", "o"]:
Dreg_sg = "an"
else:
Dreg_sg = "a"
if Nirr_sg[0][0] in ["a", "e", "i", "o"]:
Dirr_sg = "an"
else:
Dirr_sg = "a"
Dirr_pl = ""
else:
Dreg_sg = "some"
Dirr_sg = "some"
Dreg_pl = "some"
Dirr_pl = "some"
# Build Paradigms
# Step 1: Generate conjugation pattern:
the_aux = np.random.choice([0,1])
the_tense = np.random.choice([0,1])
the_neg = np.random.choice([0,1], p=[0.8, 0.2])
if the_tense == 0:
tensestring = "true"
else:
tensestring = "false"
copy_verb = coll_pred.copy()
conjugate2(copy_verb,Nreg_sg,the_aux,the_tense,the_neg)
sentence_1 = remove_extra_whitespace(Dreg_sg + " " + Nreg_sg[0] + " " + copy_verb[0])
sentence_1_meta = "experiment=plurals_env=collective_predicates_reg=1_sg=1_coll=1" + "_present=" + tensestring
sentence_1_grammaticality = 0
copy_verb = ncoll_pred.copy()
conjugate2(copy_verb,Nreg_sg,the_aux,the_tense,the_neg)
sentence_2 = remove_extra_whitespace(Dreg_sg + " " + Nreg_sg[0] + " " + copy_verb[0])
sentence_2_meta = "experiment=plurals_env=collective_predicates_reg=1_sg=1_coll=0" + "_present=" + tensestring
sentence_2_grammaticality = 1
copy_verb = coll_pred.copy()
conjugate2(copy_verb, Nreg_pl, the_aux, the_tense, the_neg)
sentence_3 = remove_extra_whitespace(Dreg_pl + " " + Nreg_pl[0] + " " + copy_verb[0])
sentence_3_meta = "experiment=plurals_env=collective_predicates_reg=1_sg=0_coll=1" + "_present=" + tensestring
sentence_3_grammaticality = 1
copy_verb = ncoll_pred.copy()
conjugate2(copy_verb, Nreg_pl, the_aux, the_tense, the_neg)
sentence_4 = remove_extra_whitespace(Dreg_pl + " " + Nreg_pl[0] + " " + copy_verb[0])
sentence_4_meta = "experiment=plurals_env=collective_predicates_reg=1_sg=0_coll=0" + "_present=" + tensestring
sentence_4_grammaticality = 1
copy_verb = coll_pred.copy()
conjugate2(copy_verb, Nirr_sg, the_aux, the_tense, the_neg)
sentence_5 = remove_extra_whitespace(Dirr_sg + " " + Nirr_sg[0] + " " + copy_verb[0])
sentence_5_meta = "experiment=plurals_env=collective_predicates_reg=0_sg=1_coll=1" + "_present=" + tensestring
sentence_5_grammaticality = 0
copy_verb = ncoll_pred.copy()
conjugate2(copy_verb, Nirr_sg, the_aux, the_tense, the_neg)
sentence_6 = remove_extra_whitespace(Dirr_sg + " " + Nirr_sg[0] + " " + copy_verb[0])
sentence_6_meta = "experiment=plurals_env=collective_predicates_reg=0_sg=1_coll=0" + "_present=" + tensestring
sentence_6_grammaticality = 1
copy_verb = coll_pred.copy()
conjugate2(copy_verb, Nirr_pl, the_aux, the_tense, the_neg)
sentence_7 = remove_extra_whitespace(Dirr_pl + " " + Nirr_pl[0] + " " + copy_verb[0])
sentence_7_meta = "experiment=plurals_env=collective_predicates_reg=0_sg=0_coll=1" + "_present=" + tensestring
sentence_7_grammaticality = 1
copy_verb = ncoll_pred.copy()
conjugate2(copy_verb, Nirr_pl, the_aux, the_tense, the_neg)
sentence_8 = remove_extra_whitespace(Dirr_pl + " " + Nirr_pl[0] + " " + copy_verb[0])
sentence_8_meta = "experiment=plurals_env=collective_predicates_reg=0_sg=0_coll=0" + "_present=" + tensestring
sentence_8_grammaticality = 1
if sentence_1 not in sentences and sentence_2 not in sentences and sentence_5 not in sentences:
# sentences 1-4 have quantifiers with UE restrictor
output.write("%s\t%d\t\t%s\n" % (sentence_1_meta, sentence_1_grammaticality, sentence_1))
output.write("%s\t%d\t\t%s\n" % (sentence_2_meta, sentence_2_grammaticality, sentence_2))
output.write("%s\t%d\t\t%s\n" % (sentence_3_meta, sentence_3_grammaticality, sentence_3))
output.write("%s\t%d\t\t%s\n" % (sentence_4_meta, sentence_4_grammaticality, sentence_4))
output.write("%s\t%d\t\t%s\n" % (sentence_5_meta, sentence_5_grammaticality, sentence_5))
output.write("%s\t%d\t\t%s\n" % (sentence_6_meta, sentence_6_grammaticality, sentence_6))
output.write("%s\t%d\t\t%s\n" % (sentence_7_meta, sentence_7_grammaticality, sentence_7))
output.write("%s\t%d\t\t%s\n" % (sentence_8_meta, sentence_8_grammaticality, sentence_8))
# keep track of which sentences have already been generated
sentences.add(sentence_1)
sentences.add(sentence_2)
sentences.add(sentence_5)
output.close()
| null |
generation_projects/plurality/collective_predicates.py
|
collective_predicates.py
|
py
| 8,130 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "utils.string_utils.remove_extra_whitespace",
"line_number": 175,
"usage_type": "call"
}
] |
490386672
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import UserForm
from error_pages.http import Http400
def home(request, name, number_games):
if number_games > 10 or number_games < 1:
raise Http400
number_games = number_games - 1
variables = {
"winner": "je suis un winner",
"ia_choise": "Pierre",
"number_games": number_games
}
return render(request, "shifumi/shifumi.html", variables)
def user(request):
form = UserForm(request.POST or None)
if form.is_valid():
name = form.cleaned_data['name']
number_games = form.cleaned_data['number_games']
return redirect(home, name, number_games)
return render(request, "shifumi/user.html", locals())
def player_bord(request):
return HttpResponse('''
<h1>Liste des parties</h1>
''')
| null |
testunitaire/shifumi/views.py
|
views.py
|
py
| 891 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "error_pages.http.Http400",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "forms.UserForm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 32,
"usage_type": "call"
}
] |
397437664
|
'''
start a thread capture frames
save frames to redis server
'''
import sys
import traceback
from queue import Queue
from threading import Thread
import threading
import cv2 as cv
import logging
import datetime
import time
# sys.path.append('../')
from utils_ken.log.getlog import get_logger
log_cam = get_logger(logname="cam", logfile='./logs/cam.log')
class VideoStream():
'''
Instance used to capture video's frames
'''
def __init__(self, cam_url, step=3):
'''
Initialize new object for capturing
Args:
cam_cfg -> dict:
information of camera (camera id, camera address, process fps, ...)
Return:
None
'''
self.__cam_addr = cam_url
self.__step = step
self.__frame_queue = Queue(maxsize=4)
self.__stopped = False
self.__stopped_lock = threading.Lock()
def start(self):
'''
Get the object started
and create new thread to run
Args:
None
Return:
None
'''
self.thread_c = Thread(target=self.__update, args=())
self.thread_c.daemon = True
self.thread_c.start()
# return self
def __update(self):
'''
Repeated grab new frame from Camera IP
and run on another thread created before
Args:
None
Return:
None
'''
while not self.get_stopped():
try:
cnt = 0
# capture or recapture
log_cam.info("Start capture video")
capturer = cv.VideoCapture(self.__cam_addr)
while not self.get_stopped() :
success, frame = capturer.read()
cnt +=1
if not success :
log_cam.info("break to recapture ")
time.sleep(10)
break
if cnt >= self.__step:
# print(cnt)
if self.__frame_queue.full():
self.__frame_queue.get()
log_cam.info("queue full and waiting ")
time.sleep(0.1)
cnt = 0
log_cam.info("queue put ")
self.__frame_queue.put(frame)
# log_cam.info('Cam : {} break Reconnection '.format(self.__cam_id))
while not self.__frame_queue.empty() :
self.__frame_queue.get()
except Exception as ex:
traceback.print_exc()
traceback.print_tb(ex.__traceback__)
log_cam.info("Cam lose connection")
# capturer.release()
finally:
capturer.release()
log_cam.info('Cam release Reconnection ')
while not self.__frame_queue.empty():
self.__frame_queue.get()
log_cam.info('Cam is Stoped')
def stop(self):
'''
stop the camera thread
'''
self.__stopped_lock.acquire()
self.__stopped = True
self.__stopped_lock.release()
while not self.__frame_queue.empty():
self.__frame_queue.get()
time_close = datetime.datetime.now()
# log_cam.info("Join Thread capture at at {}:{}".format(time_close.hour,time_close.minute))
# self.thread_c.join()
log_cam.info("Cam terminateed ")
def get_stopped(self):
'''
return true if thread need to stop, false if vice versa
'''
self.__stopped_lock.acquire()
stopped = self.__stopped
self.__stopped_lock.release()
return stopped
def read(self):
'''
Read a frame from Queue and return
Args:
None
Return:
frame -> np.array((H, W, 3) ):
frame from Camera if available
otherwise None
'''
log_cam.info("queue get ")
return self.__frame_queue.get()
| null |
utils_ken/video/video_stream.py
|
video_stream.py
|
py
| 4,226 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils_ken.log.getlog.get_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 120,
"usage_type": "attribute"
}
] |
247946889
|
import matplotlib
import gzip
import pandas as pd
# import seaborn as sns
from mnist import MNIST
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
filenames = 'train-images-idx3-ubyte t10k-images-idx3-ubyte train-labels-idx1-ubyte t10k-labels-idx1-ubyte'.split() # noqa
for i, filename in enumerate(filenames):
pathin = 'lessons/shared-resources/mnist/' + filename + '.gz'
pathout = pathin[:-3]
with gzip.open(pathin) as fin:
print("Reading file #{}: {}".format(i, pathin))
with open('lessons/shared-resources/mnist/' + filename, 'wb') as fout:
print("Writing file #{}: {}".format(i, pathout))
fout.write(fin.read())
mnistdb = MNIST('lessons/shared-resources/mnist/')
x_train, y_train = mnistdb.load_training()
x_test, y_test = mnistdb.load_testing()
df_train = pd.DataFrame(list(zip(x_train, y_train)), columns=['X', 'y'])
df_test = pd.DataFrame(list(zip(x_test, y_test)), columns=['X', 'y'])
df_train_image = pd.DataFrame(list(df_train.X.values))
pca = PCA(n_components=15).fit(df_train_image)
df_pca = pca.transform(df_train_image)
kmeans = KMeans(n_clusters=10).fit(df_pca)
df_pca['cluster_id'] = kmeans.predict(df_pca[:, :15])
df_pca['digit_id'] = df_train.y
| null |
mnist_solver.py
|
mnist_solver.py
|
py
| 1,252 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gzip.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mnist.MNIST",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 35,
"usage_type": "call"
}
] |
458135955
|
"""
dariah.topics.modeling
~~~~~~~~~~~~~~~~~~~~~~
This module implements low-level LDA modeling functions.
"""
from pathlib import Path
import tempfile
import os
import logging
import multiprocessing
import shutil
from typing import Optional, Union
import cophi
import lda
import numpy as np
import pandas as pd
from dariah.mallet import MALLET
from dariah.core import utils
logging.getLogger("lda").setLevel(logging.WARNING)
class LDA:
"""Latent Dirichlet allocation.
Args:
num_topics: The number of topics.
num_iterations: The number of iterations.
alpha:
eta:
random_state:
mallet:
"""
def __init__(
self,
num_topics: int,
num_iterations: int = 1000,
alpha: float = 0.1,
eta: float = 0.01,
random_state: int = None,
mallet: Optional[Union[str, Path]] = None,
) -> None:
self.num_topics = num_topics
self.num_iterations = num_iterations
self.alpha = alpha
self.eta = eta
self.random_state = random_state
self.mallet = mallet
if mallet:
if not Path(self.mallet).exists():
# Check if MALLET is in environment variable:
if not os.environ.get(self.mallet):
raise OSError(
"MALLET executable was not found. "
"'{}' does not exist".format(self.mallet)
)
self.mallet = os.environ.get(self.mallet)
if not Path(self.mallet).is_file():
raise OSError(
"'{}' is not a file. "
"Point to the 'mallet/bin/mallet' file.".format(self.mallet)
)
else:
self._model = lda.LDA(
n_topics=self.num_topics,
n_iter=self.num_iterations,
alpha=self.alpha,
eta=self.eta,
random_state=self.random_state,
)
def fit(self, dtm: pd.DataFrame) -> None:
"""Fit the model.
Args:
dtm: The document-term matrix.
"""
self._vocabulary = list(dtm.columns)
self._documents = list(dtm.index)
dtm = dtm.fillna(0).astype(int)
if self.mallet:
self._mallet_lda(dtm)
else:
self._riddell_lda(dtm.values)
@property
def topics(self):
"""Topics with 200 top words.
"""
if self.mallet:
return self._mallet_topics
else:
return self._riddell_topics
@property
def topic_word(self):
"""Topic-word distributions.
"""
if self.mallet:
return self._mallet_topic_word
else:
return self._riddell_topic_word
@property
def topic_document(self):
"""Topic-document distributions.
"""
if self.mallet:
return self._mallet_topic_document
else:
return self._riddell_topic_document
@property
def topic_similarities(self):
"""Topic similarity matrix.
"""
data = self.topic_document.T.copy()
return self._similarities(data)
@property
def document_similarities(self):
"""Document similarity matrix.
"""
data = self.topic_document.copy()
return self._similarities(data)
@staticmethod
def _similarities(data: pd.DataFrame) -> pd.DataFrame:
"""Calculate cosine simliarity matrix.
Args:
data: A matrix to calculate similarities for.
Returns:
A similarity matrix.
"""
descriptors = data.columns
d = data.T @ data
norm = (data * data).sum(0) ** 0.5
similarities = d / norm / norm.T
return pd.DataFrame(similarities, index=descriptors, columns=descriptors)
def _riddell_lda(self, dtm: pd.DataFrame) -> None:
"""Fit the Riddell LDA model.
Args:
dtm: The document-term matrix.
"""
self._model.fit(dtm)
@property
def _riddell_topics(self):
"""Topics of the Riddell LDA model.
"""
maximum = len(self._vocabulary)
num_words = 200 if maximum > 200 else maximum
index = [f"topic{n}" for n in range(self.num_topics)]
columns = [f"word{n}" for n in range(num_words)]
topics = [
np.array(self._vocabulary)[np.argsort(dist)][: -num_words - 1 : -1]
for dist in self._model.topic_word_
]
return pd.DataFrame(topics, index=index, columns=columns)
@property
def _riddell_topic_word(self):
"""Topic-word distributions for Riddell LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
return pd.DataFrame(
self._model.topic_word_, index=index, columns=self._vocabulary
)
@property
def _riddell_topic_document(self):
"""Topic-document distributions for Riddell LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
return pd.DataFrame(
self._model.doc_topic_, index=self._documents, columns=index
).T
def _mallet_lda(self, dtm: pd.DataFrame) -> None:
"""Fit the MALLET LDA model.
Args:
dtm: The documen-term matrix.
"""
# Get number of CPUs for threaded processing:
cpu = multiprocessing.cpu_count() - 1
# Get temporary directory to dump corpus files:
self._tempdir = Path(tempfile.gettempdir(), "dariah-topics")
if self._tempdir.exists():
shutil.rmtree(str(self._tempdir))
self._tempdir.mkdir()
# Export document-term matrix to plaintext files:
corpus_sequence = Path(self._tempdir, "corpus.sequence")
cophi.text.utils.export(dtm, corpus_sequence, "plaintext")
# Construct MALLET object:
mallet = MALLET(self.mallet)
# Create a MALLET corpus file:
corpus_mallet = Path(self._tempdir, "corpus.mallet")
mallet.import_file(
input=str(corpus_sequence), output=str(corpus_mallet), keep_sequence=True
)
# Construct paths to MALLET output files:
self._topic_document_file = Path(self._tempdir, "topic-document.txt")
self._topic_word_file = Path(self._tempdir, "topic-word.txt")
self._topics_file = Path(self._tempdir, "topics.txt")
self._word_topic_counts_file = Path(self._tempdir, "word-topic-counts-file.txt")
# Train topics:
mallet.train_topics(
input=str(corpus_mallet),
num_topics=self.num_topics,
num_iterations=self.num_iterations,
output_doc_topics=self._topic_document_file,
output_topic_keys=self._topics_file,
topic_word_weights_file=self._topic_word_file,
word_topic_counts_file=self._word_topic_counts_file,
alpha=self.alpha,
beta=self.eta,
num_top_words=200,
num_threads=cpu,
random_seed=self.random_state,
)
@property
def _mallet_topics(self):
"""Topics of MALLET LDA model.
"""
maximum = len(self._vocabulary)
num_words = 200 if maximum > 200 else maximum
index = [f"topic{n}" for n in range(self.num_topics)]
columns = [f"word{n}" for n in range(num_words)]
topics = utils.read_mallet_topics(self._topics_file, num_words)
return pd.DataFrame(topics, index=index, columns=columns)
@property
def _mallet_topic_word(self):
"""Topic-word distributions of MALLET LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
data = pd.read_csv(self._topic_word_file, sep="\t", header=None).dropna()
data = data.pivot(index=0, columns=1, values=2)
data.columns.name = None
data.index.name = None
data.index = index
return data
@property
def _mallet_topic_document(self):
"""Topic-document distributions of MALLET LDA model.
"""
data = pd.read_csv(self._topic_document_file, sep="\t", header=None)
columns = [f"topic{n}" for n in range(self.num_topics)]
index = data[1]
data = data.drop([0, 1], axis=1)
data.columns = list(columns)
data.index = index
return data.T
def __repr__(self):
return (
f"<Model: LDA, "
f"{self.num_topics} topics, "
f"{self.num_iterations} iterations, "
f"alpha={self.alpha}, "
f"eta={self.eta}>"
)
| null |
dariah/core/modeling.py
|
modeling.py
|
py
| 8,684 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "lda.LDA",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempdir",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "cophi.text.utils.export",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "cophi.text",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "dariah.mallet.MALLET",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "dariah.core.utils.read_mallet_topics",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "dariah.core.utils",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 266,
"usage_type": "call"
}
] |
624178742
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Sorry, but the key map assignments please do manually.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#ユーザー設定のアドオンリストに表示される色々
bl_info = {'name':'Silent Key Del',
'author':'bookyakuno',
'version':(0,1),
'category':'Animation',
'location':'" object.delete_xxx " key map assignments please do manually >> 3D View > Object Mode , 3D View > Pose , Timeline',
'description':'When you delete a key frame, the message is not displayed. '}
# Blender内部のデータ構造にアクセスするために必要
import bpy
# 実際の内容
class DeleteUnmassage_xxx(bpy.types.Operator):
bl_idname = "object.delete_xxx"
bl_label = "Silent_Key_Del"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if (context.active_object):
self.report(type={"INFO"}, message="Silent_Key_Del") # Message
bpy.ops.anim.keyframe_delete_v3d() #これが実際に削除するやつ。普通にAlt + Iから実行する方は、『警告 + この文』を実行しているので、この文だけを実行させる
return {'FINISHED'}
# 実際の内容
class DeleteUnmassage_graph_silent_del(bpy.types.Operator):
bl_idname = "graph.silent_del"
bl_label = "silent_graph_Key_Del"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if (context.active_object):
self.report(type={"INFO"}, message="Silent_Key_Del") # Message
bpy.ops.graph.delete()
#これが実際に削除するやつ。普通にAlt + Iから実行する方は、『警告 + この文』を実行しているので、この文だけを実行させる
return {'FINISHED'}
#
#def menu_func(self, context):
# self.layout.operator(DeleteUnmassage_xxx.bl_idname)
#
#def register():
# bpy.utils.register_class(DeleteUnmassage_xxx)
# # bpy.types.TIMELINE_MT_frame.append(menu_func)
#
#
#
#def unregister():
# bpy.utils.register_class(DeleteUnmassage_xxx)
## bpy.types.TIMELINE_MT_frame.remove(menu_func)
#
# プラグインをインストールしたときの処理
#def register():
# bpy.utils.register_class(DeleteUnmassage_xxx)
# プラグインをアンインストールしたときの処理
#def unregister():
# bpy.utils.unregister_class(DeleteUnmassage_xxx)
# メイン関数
#if __name__ == "__main__":
# register()
# ===============================================================
#
def register(): #登録
bpy.utils.register_class(DeleteUnmassage_xxx)
bpy.utils.register_class(DeleteUnmassage_graph_silent_del)
# bpy.utils.register_class(DeleteUnmassage_xxx)
# kc = bpy.context.window_manager.keyconfigs.addon
# if kc:
# km = kc.keymaps.new(name='WINDOW', space_type='VIEW_3D' , region_type='WINDOW')
# ショートカットキー登録
# kmi = km.keymap_items.new('object.delete_xxx', 'BACK_SPACE', 'PRESS', alt=True)
def unregister(): #登録解除
bpy.utils.unregister_class(DeleteUnmassage_xxx)
bpy.utils.unregister_class(DeleteUnmassage_graph_silent_del)
# bpy.utils.unregister_class(DeleteUnmassage_xxx)
# kc = bpy.context.window_manager.keyconfigs.addon
# if kc:
# km = kc.keymaps["WINDOW"]
# for kmi in km.keymap_items:
# if kmi.idname == 'object.delete_xxx':
# km.keymap_items.remove(kmi)
# break
if __name__ == "__main__":
register()
# object.delete_xxx
| null |
SilentKeyDel.py
|
SilentKeyDel.py
|
py
| 3,740 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bpy.types",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.anim.keyframe_delete_v3d",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.graph.delete",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 125,
"usage_type": "attribute"
}
] |
587699170
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# This Python file uses the following encoding: utf-8
from setuptools import setup, find_packages
import sys
from easy_contact_setup import version
import os
# Read the version from a project file
VERSION = version.VERSION_str
# Get description from README file
long_description = open(
os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# Build a list with requirements of the app
REQUIRES = ['django-easy-contact']
# Because of the strange update behavior of "pip --upgrade package_name"
# set requierment only if packages not avallible.
try:
import django
except ImportError:
REQUIRES.append('django == 1.3.7')
try:
import django_fields
except ImportError:
REQUIRES.append('django-fields')
if sys.version_info < (2, 4):
REQUIRES.append('python >= 2.4')
setup(name='django-easy-contact-setup',
version=VERSION,
description='Admin set up for django-easy-contact',
long_description=long_description,
author='Andreas Fritz, digital.elements.li',
author_email='[email protected]',
url='http://www.digital.elements.li',
download_url='https://pypi.python.org/pypi/django-easy-contact-setup',
license='BSD',
packages=find_packages(),
include_package_data=True,
keywords='django admin setup configuration django-easy-contact-setup',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Environment :: Console',
'Natural Language :: English',
'Natural Language :: German',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Topic :: Internet',
'Topic :: Utilities',
],
install_requires=REQUIRES,
zip_safe=False,
)
| null |
pypi_install_script/django-easy-contact-setup-0.3.9.tar/setup.py
|
setup.py
|
py
| 2,062 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "easy_contact_setup.version.VERSION_str",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "easy_contact_setup.version",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 44,
"usage_type": "call"
}
] |
456710609
|
import os
import json
import requests
import yaml
def connected_to_internet(url='http://www.google.com/', timeout=5):
"""
Check that there is an internet connection
:param url: url to use for testing (Default value = 'http://www.google.com/')
:param timeout: timeout to wait for [in seconds] (Default value = 5)
"""
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
print("No internet connection available.")
return False
def send_query(query_string, clean=False):
"""
Send a query/request to a website
:param query_string: string with query content
:param clean: (Default value = False)
"""
response = requests.get(query_string)
if response.ok:
if not clean:
return response.json()['msg']
else:
return response.json()
else:
raise ValueError("Invalide query string: {}".format(query_string))
def listdir(fld):
"""
List the files into a folder with the coplete file path instead of the relative file path like os.listdir.
:param fld: string, folder path
"""
if not os.path.isdir(fld):
raise FileNotFoundError("Could not find directory: {}".format(fld))
return [os.path.join(fld, f) for f in os.listdir(fld)]
def save_json(filepath, content, append=False):
"""
Saves content to a JSON file
:param filepath: path to a file (must include .json)
:param content: dictionary of stuff to save
"""
if not 'json' in filepath:
raise ValueError("filepath is invalid")
if not append:
with open(filepath, 'w') as json_file:
json.dump(content, json_file, indent=4)
else:
with open(filepath, 'w+') as json_file:
json.dump(content, json_file, indent=4)
def save_yaml(filepath, content, append=False, topcomment=None):
"""
Saves content to a yaml file
:param filepath: path to a file (must include .yaml)
:param content: dictionary of stuff to save
"""
if not 'yaml' in filepath:
raise ValueError("filepath is invalid")
if not append:
method = 'w'
else:
method = 'w+'
with open(filepath, method) as yaml_file:
if topcomment is not None:
yaml_file.write(topcomment)
yaml.dump(content,yaml_file, default_flow_style=False, indent=4)
def load_json(filepath):
"""
Load a JSON file
:param filepath: path to a file
"""
if not os.path.isfile(filepath) or not ".json" in filepath.lower(): raise ValueError("unrecognized file path: {}".format(filepath))
with open(filepath) as f:
data = json.load(f)
return data
def load_yaml(filepath):
"""
Load a YAML file
:param filepath: path to yaml file
"""
if filepath is None or not os.path.isfile(filepath): raise ValueError("unrecognized file path: {}".format(filepath))
if not "yml" in filepath and not "yaml" in filepath: raise ValueError("unrecognized file path: {}".format(filepath))
return yaml.load(open(filepath), Loader=yaml.FullLoader)
def load_volume_file(filepath, **kwargs):
"""
Load a volume file (e.g., .nii) and return vtk actor
:param filepath: path to file
:param **kwargs:
"""
from vtkplotter import Volume, load
if not os.path.isfile(filepath): raise FileNotFoundError(filepath)
if ".x3d" in filepath.lower(): raise ValueError("brainrender cannot use .x3d data as they are not supported by vtkplotter")
elif "nii" in filepath.lower() or ".label" in filepath.lower():
import nibabel as nb
data = nb.load(filepath)
d = data.get_fdata()
act = Volume(d, **kwargs)
else:
act = load(filepath, **kwargs)
if act is None:
raise ValueError("Could not load {}".format(filepath))
return act
| null |
brainrender/Utils/data_io.py
|
data_io.py
|
py
| 3,521 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.ConnectionError",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "yaml.dump",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "vtkplotter.Volume",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "vtkplotter.load",
"line_number": 135,
"usage_type": "call"
}
] |
167764124
|
# 라이브러리 불러오기
from config import TELEGRAM_TOKEN, CHAT_ID
from noti import news
import requests
from bs4 import BeautifulSoup
import telegram
bot = telegram.Bot(token=TELEGRAM_TOKEN)
# 서치 키워드
search_word = '코로나'
# 기존에 보냈던 링크를 담아둘 리스트
naver_old_links = []
naver_old_titles = []
daum_old_links = []
daum_old_title = []
# 스크래핑 함수
def naver_extract_links(old_links=[]):
naver_url = f'http://search.naver.com/search.naver?where=news&sm=tab_jum&query={search_word}&nso=p%3Aall%2Cso%3Add'
naver_req = requests.get(naver_url)
naver_html = naver_req.text
naver_soup = BeautifulSoup(naver_html, 'html.parser')
naver_search_result = naver_soup.select_one('.type01')
naver_news_list = naver_search_result.select('li a')
naver_links = []
for naver_news in naver_news_list[:10]:
naver_link = naver_news['href']
naver_links.append(naver_link)
naver_new_links = []
for naver_link in naver_links:
if naver_link not in naver_old_links:
naver_new_links.append(naver_link)
# naver_titles = []
# for naver_news_title in naver_news_list[:10]:
# naver_title = naver_news_title['title']
# naver_titles.append(naver_title)
# naver_new_titles=[]
# for naver_title in naver_titles:
# if naver_title not in naver_old_links:
# naver_new_titles.append(naver_title)
return naver_new_links
# return naver_new_titles
# 스크래핑 함수
def daum_extract_links(old_links=[]):
daum_url = f'http://search.daum.net/search?w=news&sort=recency&q={search_word}&cluster=n&DA=STC&dc=STC&pg=1&r=1&p=1&rc=1&at=more&sd=20200326210541&ed=20200327210541&period=d'
daum_req = requests.get(daum_url)
daum_html = daum_req.text
daum_soup = BeautifulSoup(daum_html, 'html.parser')
daum_search_result = daum_soup.select_one('#newsResultUL')
daum_news_list = daum_search_result.select('li a')
daum_links = []
for daum_news in daum_news_list[:10]:
daum_link = daum_news['href']
daum_links.append(daum_link)
daum_new_links = []
for daum_link in daum_links:
if daum_link not in daum_old_links:
daum_new_links.append(daum_link)
# daum_titles = []
# for daum_news_title in daum_news_list[:10]:
# daum_title = daum_news_title['title']
# daum_titles.append(daum_title)
# daum_new_titles=[]
# for daum_title in daum_titles:
# if daum_title not in daum_old_links:
# daum_new_titles.append(daum_title)
return daum_new_links
# return daum_new_titles
# 이전 링크를 매개변수로 받아서, 비교 후 새로운 링크만 출력
# 차후 이 부분을 메시지 전송 코드로 변경하고 매시간 동작하도록 설정
# 새로운 링크가 없다면 빈 리스트 반환
for i in range(10):
naver_new_links = naver_extract_links(naver_old_links)
naver_old_links += naver_new_links.copy()
naver_old_links = list(set(naver_old_links))
# naver_new_titles = naver_extract_links(naver_old_links)
# naver_old_titles += naver_new_titles.copy()
# naver_old_titles = list(set(naver_old_links))
# naver_news = naver_new_titles[i] + '\n\n' + naver_new_links[i]
bot.sendMessage(CHAT_ID, i[i])
for i in range(10):
daum_new_links = daum_extract_links(daum_old_links)
daum_new_links += daum_new_links.copy()
daum_new_links = list(set(daum_old_links))
# daum_new_titles = naver_extract_links(daum_old_links)
# daum_old_titles += daum_new_titles.copy()
# daum_old_titles = list(set(daum_old_links))
# daum_news = daum_new_titles[i] + '\n\n' + daum_new_links[i]
bot.sendMessage(CHAT_ID, i[i])
"""
===보낼 링크===
['https://m.news.naver.com/read.nhn?mode=LSD&mid=sec&sid1=101&oid=008&aid=0004349743', 'http://it.chosun.com/site/data/html_dir/2020/01/31/2020013103216.html', 'https://m.news.naver.com/read.nhn?mode=LSD&mid=sec&sid1=101&oid=031&aid=0000523810', 'https://m.news.naver.com/read.nhn?mode=LSD&mid=sec&sid1=102&oid=001&aid=0011371561', 'http://www.fintechpost.co.kr/news/articleView.html?idxno=100097']
===보낼 링크===
[]
===보낼 링크===
[]
"""
| null |
news.py
|
news.py
|
py
| 4,230 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "telegram.Bot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.TELEGRAM_TOKEN",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "config.CHAT_ID",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "config.CHAT_ID",
"line_number": 113,
"usage_type": "argument"
}
] |
46832701
|
import json
import os
from django.core.management import BaseCommand
from authapp.models import ShopUser
from mainapp.models import ClassForMainGallery, MainGallery, MainSlider, AboutUs
from servicesapp.models import ServicesCategories, Services
JSON_PATH = 'mainapp/jsons'
def load_from_json(file_name):
with open(os.path.join(JSON_PATH, file_name + '.json'), mode='r', encoding='UTF-8') as infile:
return json.load(infile)
class Command(BaseCommand):
def handle(self, *args, **options):
class_for_gallery = load_from_json('class_for_gallery')
ClassForMainGallery.objects.all().delete()
for _class in class_for_gallery:
new_class = ClassForMainGallery(**_class)
new_class.save()
gallery = load_from_json('gallery')
MainGallery.objects.all().delete()
for gal in gallery:
gal_class = gal['class_obj']
class_obj = ClassForMainGallery.objects.get(name=gal_class)
gal['class_obj'] = class_obj
new_gal = MainGallery(**gal)
new_gal.save()
slider = load_from_json('slider')
MainSlider.objects.all().delete()
for slide in slider:
new_slide = MainSlider(**slide)
new_slide.save()
about = load_from_json('about')
AboutUs.objects.all().delete()
new_about = AboutUs(**about)
new_about.save()
# Создаём категории для услуг
services_categories = load_from_json('services_categories')
ServicesCategories.objects.all().delete()
for category in services_categories:
new_category = ServicesCategories(**category)
new_category.save()
# Создаем услуги
services = load_from_json('services')
Services.objects.all().delete()
for service in services:
service_cat = service['category']
cat_obj = ServicesCategories.objects.get(name=service_cat)
service['category'] = cat_obj
Services.objects.create(**service)
super_user = ShopUser.objects.create_superuser('admin', '[email protected]', '123', age=42)
| null |
mainapp/management/commands/fill_db.py
|
fill_db.py
|
py
| 2,198 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.core.management.BaseCommand",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mainapp.models.ClassForMainGallery.objects.all",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "mainapp.models.ClassForMainGallery.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "mainapp.models.ClassForMainGallery",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mainapp.models.ClassForMainGallery",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mainapp.models.MainGallery.objects.all",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mainapp.models.MainGallery.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mainapp.models.MainGallery",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "mainapp.models.ClassForMainGallery.objects.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mainapp.models.ClassForMainGallery.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "mainapp.models.ClassForMainGallery",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "mainapp.models.MainGallery",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "mainapp.models.MainSlider.objects.all",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "mainapp.models.MainSlider.objects",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "mainapp.models.MainSlider",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "mainapp.models.MainSlider",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "mainapp.models.AboutUs.objects.all",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "mainapp.models.AboutUs.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "mainapp.models.AboutUs",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "mainapp.models.AboutUs",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.ServicesCategories.objects.all",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.ServicesCategories.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "servicesapp.models.ServicesCategories",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "servicesapp.models.ServicesCategories",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.Services.objects.all",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.Services.objects",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "servicesapp.models.Services",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "servicesapp.models.ServicesCategories.objects.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.ServicesCategories.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "servicesapp.models.ServicesCategories",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "servicesapp.models.Services.objects.create",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "servicesapp.models.Services.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "servicesapp.models.Services",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "authapp.models.ShopUser.objects.create_superuser",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "authapp.models.ShopUser.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "authapp.models.ShopUser",
"line_number": 65,
"usage_type": "name"
}
] |
2606318
|
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from django.views.generic import RedirectView
from django.conf.urls.static import static
from accounts.views import CustomLoginView, DisclaimerCreateView, \
data_protection, subscribe_view
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^studioadmin/',
include('studioadmin.urls', namespace='studioadmin')),
url(r'^', include('booking.urls', namespace='booking')),
url(
r'^data-protection-statement/$', data_protection,
name='data_protection'
),
url(r'^accounts/profile/', include('accounts.urls', namespace='profile')),
url(r'^accounts/login/$', CustomLoginView.as_view(), name='login'),
url(
r'^accounts/disclaimer/$', DisclaimerCreateView.as_view(),
name='disclaimer_form'
),
url(r'^accounts/mailing-list/$', subscribe_view, name='subscribe'),
url(r'^accounts/', include('allauth.urls')),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^payments/ipn-paypal-notify/', include('paypal.standard.ipn.urls')),
url(r'payments/', include('payments.urls', namespace='payments')),
url(r'^favicon.ico/$',
RedirectView.as_view(url=settings.STATIC_URL+'favicon.ico',
permanent=False)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG: # pragma: no cover
import debug_toolbar
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
| null |
pipsevents/urls.py
|
urls.py
|
py
| 1,564 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "accounts.views.data_protection",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "accounts.views.CustomLoginView.as_view",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "accounts.views.CustomLoginView",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "accounts.views.DisclaimerCreateView.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "accounts.views.DisclaimerCreateView",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "accounts.views.subscribe_view",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.views.generic.RedirectView.as_view",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.views.generic.RedirectView",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.STATIC_URL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "debug_toolbar.urls",
"line_number": 37,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.