seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75136417147
|
import random
from tgalice.cascade import Pr
from cascade import csc, Turn
from datetime import datetime, timedelta
from uuid import uuid4
from scenarios.exercising import EXERCISES, Exercise
def is_morning_show(turn: Turn) -> bool:
if not turn.ctx.yandex or not turn.ctx.yandex.request:
return False
r = turn.ctx.yandex.request
if r.type != 'Show.Pull':
return False
return r.show_type == 'MORNING'
@csc.add_handler(priority=Pr.CRITICAL, checker=is_morning_show)
def morning_show(turn: Turn):
ex: Exercise = random.choice(list(EXERCISES.values()))
turn.response_text = f'А теперь - упражнение из навыка "Шпагат за месяц".\n{ex.text}'
now = datetime.utcnow()
turn.show_item_meta = dict(
content_id=str(uuid4()),
title='Упражнение на растяжку',
# title_tts='Упражнение на растяжку',
publication_date=str(now).replace(' ', 'T') + 'Z',
# expiration_date=str(now + timedelta(days=7)) + 'Z',
)
|
avidale/alice-stretching
|
scenarios/show.py
|
show.py
|
py
| 1,066 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cascade.Turn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "cascade.Turn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "scenarios.exercising.Exercise",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scenarios.exercising.EXERCISES.values",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scenarios.exercising.EXERCISES",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cascade.csc.add_handler",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cascade.csc",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tgalice.cascade.Pr.CRITICAL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tgalice.cascade.Pr",
"line_number": 20,
"usage_type": "name"
}
] |
73894506107
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from appium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
import logging, time, os
class BaseView:
'''二次封装'''
def __init__(self, driver: webdriver.Remote):
self.driver = driver
self.timeout = 2
self.poll_frequency = 0.5
self.x = self.driver.get_window_size()['width']
self.y = self.driver.get_window_size()['height']
def findElement(self, locator):
if not isinstance(locator, tuple):
logging.error('locator参数类型错误,必须传元组类型:loc=("id","value")')
else:
logging.info('正在定位元素信息,定位方式——>%s,value值——>%s' % (locator[0], locator[1]))
element = WebDriverWait(self.driver, self.timeout, self.poll_frequency).until(
lambda x: x.find_element(*locator))
return element
def findElements(self, locator):
try:
if not isinstance(locator, tuple):
logging.error('locator参数类型错误,必须传元组类型:loc=("id","value")')
else:
logging.info('正在定位元素信息,定位方式——>%s,value值——>%s' % (locator[0], locator[1]))
elements = WebDriverWait(self.driver, self.timeout, self.poll_frequency).until(
lambda x: x.find_elements(*locator))
return elements
except:
return []
def sendKeys(self, locator, text):
element = self.findElement(locator)
element.send_keys(text)
def click(self, locator):
element = self.findElement(locator)
element.click()
def clear(self, locator):
element = self.findElement(locator)
element.clear()
def swipe_up(self):
start_x = self.x * 0.5
start_y = self.y * 0.8
end_x = self.x * 0.5
end_y = self.y * 0.2
self.driver.swipe(start_x, start_y, end_x, end_y)
logging.info('==========swipe up==========')
def swipe_down(self):
start_x = self.x * 0.5
start_y = self.y * 0.2
end_x = self.x * 0.5
end_y = self.y * 0.8
self.driver.swipe(start_x, start_y, end_x, end_y)
logging.info('==========swipe down==========')
def swipe_left(self):
start_x = self.x * 0.8
start_y = self.y * 0.5
end_x = self.x * 0.2
end_y = self.y * 0.5
self.driver.swipe(start_x, start_y, end_x, end_y)
logging.info('==========swipe left==========')
def swipe_right(self):
start_x = self.x * 0.2
start_y = self.y * 0.5
end_x = self.x * 0.8
end_y = self.y * 0.5
self.driver.swipe(start_x, start_y, end_x, end_y)
logging.info('==========swipe right==========')
def getScreenShot(self, module): # module为模块名称,即保存文件名称,可自定义
now = time.strftime('%Y-%m-%d %H_%M_%S')
image_dir = os.path.abspath('../screenshots/%s_%s.png' % (module, now))
self.driver.get_screenshot_as_file(image_dir)
logging.info('%s已保存截图,保存地址为:%s' % (module, image_dir))
return image_dir
if __name__ == '__main__':
from common.desired_caps import appium_desired
driver = appium_desired()
app = BaseView(driver)
time.sleep(2)
print(app.getScreenShot('启动页'))
|
inttcc/MyPro
|
workcoming/baseView/base.py
|
base.py
|
py
| 3,466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "appium.webdriver.Remote",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "appium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "common.desired_caps.appium_desired",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 96,
"usage_type": "call"
}
] |
39262377956
|
from collections import namedtuple
from django.conf import settings
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
from eums.elasticsearch.delete_records import DeleteRecords
from eums.elasticsearch.mappings import setup_mappings
from eums.elasticsearch.sync_info import SyncInfo
from eums.models import DistributionPlanNode as DeliveryNode, Consignee, Programme, OrderItem, Item, SalesOrder, \
PurchaseOrder, ReleaseOrder, Question, TextAnswer, MultipleChoiceAnswer, NumericAnswer, Option, Run
ES_SETTINGS = settings.ELASTIC_SEARCH
def list_nodes_to_update():
last_sync = SyncInfo.last_successful_sync()
nodes_to_update = _find_nodes_to_update(last_sync)
new_nodes = _find_new_nodes(last_sync)
return list(nodes_to_update) + list(new_nodes)
def list_nodes_to_delete():
delete_records = DeleteRecords.objects.first()
return delete_records.nodes_to_delete if delete_records else []
def _find_new_nodes(last_sync):
if not last_sync:
setup_mappings()
return DeliveryNode.objects.all()
last_sync_time = last_sync.start_time
return DeliveryNode.objects.filter(created__gte=last_sync_time)
def _find_nodes_to_update(last_sync):
if last_sync:
changed_nodes = DeliveryNode.objects.filter(modified__gte=last_sync.start_time)
es = Elasticsearch([ES_SETTINGS.HOST])
match_terms = _build_match_terms(last_sync)
if not match_terms:
return changed_nodes
query = {
"fields": [],
"filter": {
"bool": {
"should": match_terms
}
}
}
scan_results = scan(es, query=query, index=ES_SETTINGS.INDEX, doc_type=ES_SETTINGS.NODE_TYPE)
node_ids = [hit['_id'] for hit in list(scan_results)]
changed_node_ids = list(changed_nodes.values_list('id', flat=True))
return DeliveryNode.objects.filter(pk__in=node_ids + changed_node_ids)
return []
def _build_match_terms(last_sync):
last_sync_time = last_sync.start_time
consignee_ids = _find_changes_for_model(Consignee, last_sync_time)
programme_ids = _find_changes_for_model(Programme, last_sync_time)
order_item_ids = _find_changes_for_model(OrderItem, last_sync_time)
item_ids = _find_changes_for_model(Item, last_sync_time)
sales_order_ids = _find_changes_for_model(SalesOrder, last_sync_time)
purchase_order_ids = _find_changes_for_model(PurchaseOrder, last_sync_time)
release_order_ids = _find_changes_for_model(ReleaseOrder, last_sync_time)
question_ids = _find_changes_for_model(Question, last_sync_time)
text_answer_ids = _find_changes_for_model(TextAnswer, last_sync_time)
multiple_choice_answer_ids = _find_changes_for_model(MultipleChoiceAnswer, last_sync_time)
numeric_answer_ids = _find_changes_for_model(NumericAnswer, last_sync_time)
option_ids = _find_changes_for_model(Option, last_sync_time)
run_ids = _find_changes_for_model(Run, last_sync_time)
match_term = namedtuple('MatchTerm', ['key', 'value'])
match_terms = [
match_term("consignee.id", consignee_ids),
match_term("ip.id", consignee_ids),
match_term("programme.id", programme_ids),
match_term("order_item.id", order_item_ids),
match_term("order_item.item.id", item_ids),
match_term("order_item.order.sales_order.id", sales_order_ids),
match_term("order_item.order.id", purchase_order_ids + release_order_ids),
match_term("responses.question.id", question_ids),
match_term("responses.id", text_answer_ids + multiple_choice_answer_ids + numeric_answer_ids),
match_term("responses.value_id", option_ids),
match_term("responses.run.id", run_ids),
match_term("id", _find_nodes_affected_by_dependency_deletion()),
]
non_empty_match_terms = filter(lambda term: len(term.value), match_terms)
if non_empty_match_terms:
formatted_match_terms = map(lambda term: {'term': {term.key: term.value}}, non_empty_match_terms)
return formatted_match_terms
return None
def _find_changes_for_model(model, last_sync_time):
return list(model.objects.filter(modified__gte=last_sync_time).values_list('id', flat=True))
def _find_nodes_affected_by_dependency_deletion():
delete_records = DeleteRecords.objects.first()
return delete_records.nodes_with_deleted_dependencies or [] if delete_records else []
|
unicefuganda/eums
|
eums/elasticsearch/sync_data_generators.py
|
sync_data_generators.py
|
py
| 4,471 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "django.conf.settings.ELASTIC_SEARCH",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "eums.elasticsearch.sync_info.SyncInfo.last_successful_sync",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "eums.elasticsearch.sync_info.SyncInfo",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords.objects.first",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "eums.elasticsearch.mappings.setup_mappings",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects.all",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "eums.models.DistributionPlanNode",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "eums.models.DistributionPlanNode.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "eums.models.DistributionPlanNode",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "eums.models.DistributionPlanNode.objects.filter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "eums.models.DistributionPlanNode",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers.scan",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects.filter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "eums.models.DistributionPlanNode.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "eums.models.DistributionPlanNode",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "eums.models.Consignee",
"line_number": 62,
"usage_type": "argument"
},
{
"api_name": "eums.models.Programme",
"line_number": 63,
"usage_type": "argument"
},
{
"api_name": "eums.models.OrderItem",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "eums.models.Item",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "eums.models.SalesOrder",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "eums.models.PurchaseOrder",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "eums.models.ReleaseOrder",
"line_number": 68,
"usage_type": "argument"
},
{
"api_name": "eums.models.Question",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "eums.models.TextAnswer",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "eums.models.MultipleChoiceAnswer",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "eums.models.NumericAnswer",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "eums.models.Option",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "eums.models.Run",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "collections.namedtuple",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords.objects.first",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords.objects",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "eums.elasticsearch.delete_records.DeleteRecords",
"line_number": 104,
"usage_type": "name"
}
] |
27483656200
|
import logging
from flask import abort, request, g, Response, make_response, jsonify, current_app
from flask_restplus import Namespace, Resource, fields, marshal_with
from battle.db.dbops import dbops
from battle.db.models import Battle
log = logging.getLogger(__name__)
posts_api = Namespace('postmeta', description='post information about bigbang')
model = posts_api.model('Model', {
"Developer_Issues" : fields.Integer,
"Issues_Resolved" : fields.Integer,
"Issues_Pending": fields.Integer,
"Component_Issues": fields.Integer,
"Component_Failures": fields.List(fields.String),
"Total_Tickets" : fields.List(fields.String),
"Jiras" : fields.List(fields.String),
"Faq_Updated": fields.Integer,
'date_updated': fields.DateTime()
})
#ns = api.namespace('post', description='Operations related to data post')
@posts_api.route('')
@posts_api.response("200", "updated successfully")
class posts(Resource):
@posts_api.expect(model)
def post(self):
"""
Update metadata of bigbang.
"""
data = request.get_json()
dbops.post_meta(data)
return None, 204
def get(self):
data = dbops.get_meta()
return data
|
mthak/classmojo
|
battle/api/post.py
|
post.py
|
py
| 1,214 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_restplus.Namespace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.List",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.String",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields.List",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.String",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields.List",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.String",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask_restplus.fields",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask_restplus.fields.DateTime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask_restplus.Resource",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "battle.db.dbops.dbops.post_meta",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "battle.db.dbops.dbops",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "battle.db.dbops.dbops.get_meta",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "battle.db.dbops.dbops",
"line_number": 41,
"usage_type": "name"
}
] |
73354334268
|
import argparse
import json
import logging
import os
import random
import math
from pprint import pprint
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)-7s - [%(funcName)s] %(message)s')
# uncomment for submission
# logger.disabled = True
ACTIONS = {
-1: 'DoNothing',
1: 'MoveUp',
2: 'MoveLeft',
3: 'MoveRight',
4: 'MoveDown',
5: 'PlaceBomb',
6: 'TriggerBomb',
}
def main(player_key, output_path):
logger.info('Player key: {}'.format(player_key))
logger.info('Output path: {}'.format(output_path))
with open(os.path.join(output_path, 'state.json'), 'r') as f:
state = json.load(f)
# logger.info('State: {}'.format(state))
# Constants for json path
PLAYER_ENTITY = "RegisteredPlayerEntities"
GAME_BLOCKS = "GameBlocks"
# Constants for data about map
MAP_HEIGHT = state["MapHeight"]
MAP_WIDTH = state["MapWidth"]
CURRENT_ROUND = state["CurrentRound"]
# Constants for entity type
WALL = "Domain.Entities.IndestructibleWallEntity, Domain"
OBSTACLE = "Domain.Entities.DestructibleWallEntity, Domain"
PLAYER = "Domain.Entities.PlayerEntity, Domain"
SUPER_POWER_UP = "Domain.Entities.PowerUps.SuperPowerUp, Domain"
POWER_UP_BOMBBAG = "Domain.Entities.PowerUps.BombBagPowerUpEntity, Domain"
POWER_UP_BOMBRADIUS = "Domain.Entities.PowerUps.BombRaduisPowerUpEntity, Domain" # emang typo dari sananya kok :(
TOTAL_PLAYER = len(state[PLAYER_ENTITY])
# Class queue
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
# Functions and Procedures
def Player_Name(index):
# Getter untuk nama bot
return(state[PLAYER_ENTITY][index]["Name"])
def Player_Index(key):
# Getter untuk indeks bot
i = 0
while ((i < TOTAL_PLAYER) and (Player_Key(i) != key)):
i += 1
return (i)
def Player_Key(index):
# Getter untuk key bot
return (state[PLAYER_ENTITY][index]["Key"])
def Player_Points(index):
# Getter untuk jumlah point yang dimiliki bot
return (state[PLAYER_ENTITY][index]["Points"])
def Player_Killed(index):
# Getter untuk status nyawa musuh
return(state[PLAYER_ENTITY][index]["Killed"])
def Player_BombBag(index):
# Getter untuk jumlah bomb bag yang dimiliki bot
return(state[PLAYER_ENTITY][index]["BombBag"])
def Player_BombRadius(index):
# Getter untuk blast radius yang dimiliki bot
return(state[PLAYER_ENTITY][index]["BombRadius"])
def Player_X(index):
# Getter untuk absis bot
return(state[PLAYER_ENTITY][index]["Location"]["X"])
def Player_Y(index):
# Getter untuk ordinat bot
return(state[PLAYER_ENTITY][index]["Location"]["Y"])
def Map_Entity(x, y):
# Getter untuk entitas yang berada pada petak (x, y)
if (state[GAME_BLOCKS][x-1][y-1]["Entity"] == None):
return(None)
elif (state[GAME_BLOCKS][x-1][y-1]["Entity"]["$type"] == PLAYER):
return(state[GAME_BLOCKS][x-1][y-1]["Entity"]["Key"])
else:
return(state[GAME_BLOCKS][x-1][y-1]["Entity"]["$type"])
def Map_Bomb(x, y):
# Bernilai true apabila ada bom pada petak (x, y)
if (state[GAME_BLOCKS][x-1][y-1]["Bomb"] == None):
return(False)
else:
return(True)
def Map_Bomb_Key(x, y):
# Getter untuk pemilik bom pada petak (x, y)
return(state[GAME_BLOCKS][x-1][y-1]["Bomb"]["Owner"]["Key"])
def Map_Bomb_Radius(x, y):
# Getter untuk blast radius bom yang terletak pada petak (x, y)
return(state[GAME_BLOCKS][x-1][y-1]["Bomb"]["BombRadius"])
def Map_Bomb_Timer(x, y):
# Getter untuk timer bom yang terletak pada petak (x, y)
return(state[GAME_BLOCKS][x-1][y-1]["Bomb"]["BombTimer"])
def Map_PowerUp(x, y):
# Getter untuk power up pada petak (x, y)
if (state[GAME_BLOCKS][x-1][y-1]["PowerUp"] == None):
return(None)
else:
return(state[GAME_BLOCKS][x-1][y-1]["PowerUp"]["$type"])
def Map_Exploding(x, y):
# Getter untuk status peledakan petak (x, y)
return(state[GAME_BLOCKS][x-1][y-1]["Exploding"])
def HasPlacedBomb():
# Memberikan nilai true apabila bot kita telah meletakkan bom dan timernya > 2
found = False
y = 0
while ((y < MAP_HEIGHT) and (not found)):
x = 0
while ((x < MAP_WIDTH) and (not found)):
if ((Map_Bomb(x, y)) and (Map_Bomb_Key(x, y) == player_key) and (Map_Bomb_Timer(x, y) > 2)):
found = True
x += 1
y += 1
return (found)
def InDanger (x, y):
# Memberi nilai true apabila bot kita berada dalam zona bahaya
# Zona bahaya: dapat terkena ledakan bom
danger = False
# Left check
x_left = x
while ((x_left >= 0) and (Map_Entity(x_left, y) != WALL) and (Map_Entity(x_left, y) != OBSTACLE) and (not danger)):
if (Map_Bomb(x_left, y)) and (Map_Bomb_Radius(x_left, y) >= abs(x_left - x)):
danger = True
else:
x_left -= 1
# Right check
x_right = x + 1
while ((x_right <= MAP_WIDTH) and (Map_Entity(x_right, y) != WALL) and (Map_Entity(x_right, y) != OBSTACLE) and (not danger)):
if (Map_Bomb(x_right, y)) and (Map_Bomb_Radius(x_right, y) >= abs(x_right - x)):
danger = True
else:
x_right += 1
# Up check
y_up = y - 1
while ((y_up >= 0) and (Map_Entity(x, y_up) != WALL) and (Map_Entity(x, y_up) != OBSTACLE) and (not danger)):
if (Map_Bomb(x, y_up)) and (Map_Bomb_Radius(x, y_up) >= abs(y_up - y)):
danger = True
else:
y_up -= 1
# Down check
y_down = y + 1
while ((y_down <= MAP_HEIGHT) and (Map_Entity(x, y_down) != WALL) and (Map_Entity(x, y_down) != OBSTACLE) and (not danger)):
if (Map_Bomb(x, y_down)) and (Map_Bomb_Radius(x, y_down) >= abs(y_down - y)):
danger = True
else:
y_down += 1
# Return
return (danger)
def DangerCounter(x, y):
# Mengembalikan timer bomb yang paling kecil yang dapat membahayakan bila bot berada di posisi x, y
most_urgent_timer = 99
# Left check
x_left = x
while ((x_left >= 0) and (Map_Entity(x_left, y) != WALL) and (Map_Entity(x_left, y) != OBSTACLE)):
if ((Map_Bomb(x_left, y)) and (Map_Bomb_Radius(x_left, y) >= abs(x_left - x)) and (most_urgent_timer > Map_Bomb_Timer(x_left, y))):
most_urgent_timer = Map_Bomb_Timer(x_left, y)
x_left -= 1
# Right check
x_right = x + 1
while ((x_right <= MAP_WIDTH) and (Map_Entity(x_right, y) != WALL) and (Map_Entity(x_right, y) != OBSTACLE)):
if ((Map_Bomb(x_right, y)) and (Map_Bomb_Radius(x_right, y) >= abs(x_right - x)) and (most_urgent_timer > Map_Bomb_Timer(x_right, y))):
most_urgent_timer = Map_Bomb_Timer(x_right, y)
x_right += 1
# Up check
y_up = y - 1
while ((y_up >= 0) and (Map_Entity(x, y_up) != WALL) and (Map_Entity(x, y_up) != OBSTACLE)):
if ((Map_Bomb(x, y_up)) and (Map_Bomb_Radius(x, y_up) >= abs(y_up - y)) and (most_urgent_timer > Map_Bomb_Timer(x, y_up))):
most_urgent_timer = Map_Bomb_Timer(x, y_up)
y_up -= 1
# Down check
y_down = y + 1
while ((y_down <= MAP_HEIGHT) and (Map_Entity(x, y_down) != WALL) and (Map_Entity(x, y_down) != OBSTACLE)):
if ((Map_Bomb(x, y_down)) and (Map_Bomb_Radius(x, y_down) >= abs(y_down - y)) and (most_urgent_timer > Map_Bomb_Timer(x, y_down))):
most_urgent_timer = Map_Bomb_Timer(x, y_down)
y_down += 1
# Return
return(most_urgent_timer)
def Distance (x1, y1, x2, y2):
# Mengembalikan banyak petak yang harus dilalui apabila ingin berpindah dari (x1, y1) ke (x2, y2)
return (abs(x1 - x2) + abs(y1 - y2))
def PythagorasPow (x1, y1, x2, y2):
# Mengembalikan kuadrat jarak Euclidean dari (x1, y1) ke (x2, y2)
return ((x1-x2)**2 + (y1-y2)**2)
def IsPowerUpInRange (x, y, radius):
# Mengembalikan nilai true apabila terdapat power up dalam radius tertentu dari titik (x, y)
# Mencegah x keluar batas map
x_end = x + radius
if (x_end > MAP_WIDTH):
x_end = MAP_WIDTH
# Mencegah y keluar batas map
y_start = y - radius
if (y_start < 1):
y_start = 1
y_end = y + radius
if (y_end > MAP_HEIGHT):
y_end = MAP_HEIGHT
found = False # Inisialisasi awal
# Pencarian power up per ordinat
while ((y_start <= y_end) and (not found)):
# Mencegah x keluar batas map
x_start = x - radius
if (x_start < 1):
x_start = 1
# Melakukan iterasi per absis
while ((x_start <= x_end) and (not found)):
if (Map_PowerUp(x_start, y_start) != None):
found = True
else:
x_start += 1
y_start += 1
# Return
return (found)
def IsEnemyInRange (player_index,radius):
# Mengembalikan indeks musuh (yang masih hidup) yang berada dalam radius tertentu dari bot
# Bernilai -1 apabila tidak ada musuh yang berada dalam radius
enemy_index = 0
found = False
# Pencarian musuh dalam radius tertentu
while ((enemy_index < TOTAL_PLAYER-1) and (not found)):
if ((enemy_index != player_index) and (not Player_Killed(enemy_index)) and (radius >= Distance(Player_X(player_index), Player_Y(player_index), Player_X(enemy_index), Player_Y(enemy_index)))):
found = True
else:
enemy_index += 1
if (found):
return(enemy_index)
else:
return(-1)
def SOS(index):
# Menghasilkan aksi yang harus bot lakukan untuk melarikan diri dari zona bahaya
goal = 0
X = Queue() # Queue yang digunakan untuk menyimpan absis (x)
Y = Queue() # Queue yang digunakan untuk menyimpan ordinat (y)
M = Queue() # Queue yang digunakan untuk menyimpan aksi (move)
X.enqueue(Player_X(index)) # Insialisasi awal dengan absis bot saat ini
Y.enqueue(Player_Y(index)) # Inisialisasi awal dengan ordinat bot saat ini
M.enqueue([]) # Inisialisasi awal dengan list kosong
# Melakukan iterasi selama queue absis tidak kosong dan belum menemukan jalan keluar
while ((not X.isEmpty()) and (goal == 0)):
i = X.dequeue()
j = Y.dequeue()
move = M.dequeue()
valid = False # valid adalah penentu apakah jalan tersebut dapat dilalui atau tidak
if ((Map_Entity(i,j) == None) or (Map_Entity(i,j) == player_key)): # Kosong (tidak ada halangan)
if (Map_Bomb(i,j)): # Ada bom
if ((Player_X(index) == i) and (Player_Y(index) == j)): # Posisi bom = posisi bot
valid = True
else: # Tidak ada bom
valid = True
count = DangerCounter(i,j)-len(move)
# Menentukan apakah sempat melarikan diri dengan pergerakan tersebut
if ((count == 0) or (count == 1)):
valid = False
if (valid):
if (not InDanger(i,j)):
goal = move[0]
elif (len(move) < 10):
temp = TargetPos(i,j,math.floor(MAP_WIDTH/2),1)
if (temp == -1):
temp = TargetPos(i,j,math.floor(MAP_WIDTH/2),2)
x_target = GetTargetX(temp)
y_target = GetTargetY(temp)
dist = []
dist.append(Distance(i,j-1,x_target,y_target)) # Memasukkan jarak antar tetangga atas (i, j) ke koordinat target
dist.append(Distance(i-1,j,x_target,y_target)) # Memasukkan jarak antar tetangga kiri (i, j) ke koordinat target
dist.append(Distance(i+1,j,x_target,y_target)) # Memasukkan jarak antar tetangga kanan (i, j) ke koordinat target
dist.append(Distance(i,j+1,x_target,y_target)) # Memasukkan jarak antar tetangga bawah (i, j) ke koordinat target
X.enqueue(i)
Y.enqueue(j)
M.enqueue(move + [-1])
for q in range(0,4):
shortest = 0
for w in range(1,4):
if (dist[w] < dist[shortest]):
shortest = w
if (shortest == 0):
X.enqueue(i)
Y.enqueue(j-1)
M.enqueue(move + [1])
elif (shortest == 1):
X.enqueue(i-1)
Y.enqueue(j)
M.enqueue(move + [2])
elif (shortest == 2):
X.enqueue(i+1)
Y.enqueue(j)
M.enqueue(move + [3])
elif (shortest == 3):
X.enqueue(i)
Y.enqueue(j+1)
M.enqueue(move + [4])
dist[shortest] = 100000 #big number
if (goal == 0): # Tidak ada jalan keluar
return (-1)
else:
return(goal)
def TargetPos(x,y,radius,search):
# Terdiri dari 2 jenis search
# Search 1: Mengembalikan nilai yang mengandung koordinat target
# Search 2: Mengembalikan nilai yang mengandung indeks musuh
x_end = x + radius # Menjaga agar x tidak keluar batas
if (x_end > MAP_WIDTH):
x_end = MAP_WIDTH
y_start = y - radius # Menjaga agar y tidak keluar batas
if (y_start < 1):
y_start = 1
y_end = y + radius # Menjaga agar y tidak keluar batas
if (y_end > MAP_HEIGHT):
y_end = MAP_HEIGHT
x_start = x - radius # Menjaga agar x tidak keluar batas
if (x_start < 1):
x_start = 1
# Insialisasi awal
found_x = -1
found_y = -1
# Melakukan pencarian
for i in range(x_start, x_end):
for j in range(y_start, y_end):
# Search 1
if (search == 1):
if (Map_PowerUp(i, j) != None):
if (found_x == -1):
found_x = i
found_y = j
else:
if (Distance(x,y,i,j) < Distance(x,y,found_x,found_y)):
found_x = i
found_y = j
# Search 2
elif (search == 2):
player_index = Player_Index(player_key)
enemy_index = IsEnemyInRange(player_index,radius)
if ((enemy_index != player_index) and (not Player_Killed(enemy_index)) and (i == Player_X(enemy_index)) and (j == Player_Y(enemy_index)) and (Distance(x, y, i, j) <= radius)):
if (found_x == -1):
found_x = i
found_y = j
else:
if (Distance(x,y,i,j) < Distance(x,y,found_x,found_y)):
found_x = i
found_y = j
if (found_x == -1): # Tidak ketemu
return -1
else:
if (search == 1): # Search 1
return (found_x*(10**(math.floor(math.log(MAP_HEIGHT,10))+1))+found_y) # Return value adalah koordinat target (data dimanipulasi)
elif (search == 2): # Search 2
# return(enemy_index*(10**(2*math.floor(math.log(MAP_HEIGHT,10))+1))+found_x*(10**(math.floor(math.log(MAP_HEIGHT,10))+1))+found_y) # Return value adalah indeks musuh (data dimanipulasi)
return(enemy_index)
def GetEnemyIndex(val):
# Mengekstrak indeks musuh dari manipulasi data yang telah dilakukan
# return (math.floor(val/(10**(2*math.floor(math.log(MAP_HEIGHT,10))))))
return(val)
def GetTargetX(val):
# Mengekstrak absis target dari manipulasi data yang telah dilakukan
return (math.floor(val/(10**(math.floor(math.log(MAP_HEIGHT,10))+1))))
def GetTargetY(val):
# Mengekstrak ordinat target dari manipulasi data yang telah dilakukan
return (val % (10**(math.floor(math.log(MAP_HEIGHT,10))+1)))
def GoToTarget(x,y,radius,index,search):
# Menghasilkan aksi yang harus dilakukan untuk bergerak mendekati target (Search 1) atau musuh (Search 2)
# Menggunakan Greedy Best-First Search
if (search == 1): # Search 1: mencari power up
temp = TargetPos(x,y,radius,1) # Koordinat target berupa manipulasi data
smin = 9999 # Insialisasi awal
move = -1 # Inisialisasi awal
# Perbandingan nilai heuristik (kuadrat pythagoras) dari keempat tetangganya
if (Map_Entity(Player_X(index), Player_Y(index)-1) == None) and (not InDanger(Player_X(index), Player_Y(index)-1)):
sup = PythagorasPow(GetTargetX(temp),GetTargetY(temp),x,y-1) # Atas
if (smin > sup) :
smin = sup
move = 1
if (Map_Entity(Player_X(index)-1, Player_Y(index)) == None) and (not InDanger(Player_X(index)-1, Player_Y(index))):
sleft = PythagorasPow(GetTargetX(temp),GetTargetY(temp),x-1,y) # Kiri
if (smin > sleft) :
smin = sleft
move = 2
if (Map_Entity(Player_X(index)+1, Player_Y(index)) == None) and (not InDanger(Player_X(index)+1, Player_Y(index))):
sright = PythagorasPow(GetTargetX(temp),GetTargetY(temp),x+1,y) # Kanan
if (smin > sright) :
smin = sright
move = 3
if (Map_Entity(Player_X(index), Player_Y(index)+1) == None) and (not InDanger(Player_X(index), Player_Y(index)+1)):
sdown = PythagorasPow(GetTargetX(temp),GetTargetY(temp),x,y+1) # Bawah
if (smin > sdown) :
smin = sdown
move = 4
# Mengembalikan aksi terbaik yang didapatkan
return move
else: # Search 2 dan 3: mencari musuh
if (search == 2): # Dalam radius tertentu
temp = TargetPos(x,y,radius,2) # Indeks musuh berupa manipulasi data
enemy_index = GetEnemyIndex(temp) # Inisialisasi dengan indeks musuh sesungguhnya
else: # Mengincar musuh yang masih hidup di manapun mereka berada
found = False
searchingindex = 0
while (searchingindex <= TOTAL_PLAYER-1) and (not found):
if (Player_Key(searchingindex) != player_key) and (not Player_Killed(searchingindex)):
found = True
enemy_index = searchingindex
else:
searchingindex += 1
# Apabila jarak musuh <= blast radius bom bot dan musuh masih hidup
if ((not Player_Killed(enemy_index)) and Distance(Player_X(index),Player_Y(index),Player_X(enemy_index),Player_Y(enemy_index)),Player_BombRadius(index)):
time_to_attack = False
# Horizontal check
if (Player_X(enemy_index) == Player_X(index)):
# Left check
x_left = x
while ((x_left >= 0) and (Map_Entity(x_left, y) != WALL) and (Map_Entity(x_left, y) != OBSTACLE) and (not time_to_attack)):
if (Map_Entity(x_left, y) == Player_Key(enemy_index)):
time_to_attack = True
else:
x_left -= 1
# Right check
x_right = x + 1
while ((x_right <= MAP_WIDTH) and (Map_Entity(x_right, y) != WALL) and (Map_Entity(x_right, y) != OBSTACLE) and (not time_to_attack)):
if (Map_Entity(x_right, y) == Player_Key(enemy_index)):
time_to_attack = True
else:
x_right += 1
# Vertical check
elif (Player_Y(enemy_index) == Player_Y(index)):
# Up check
y_up = y - 1
while ((y_up >= 0) and (Map_Entity(x, y_up) != WALL) and (Map_Entity(x, y_up) != OBSTACLE) and (not time_to_attack)):
if (Map_Entity(x, y_up) == Player_Key(enemy_index)):
time_to_attack = True
else:
y_up -= 1
# Down check
y_down = y + 1
while ((y_down <= MAP_HEIGHT) and (Map_Entity(x, y_down) != WALL) and (Map_Entity(x, y_down) != OBSTACLE) and (not time_to_attack)):
if (Map_Entity(x, y_down) == Player_Key(enemy_index)):
time_to_attack = True
else:
y_down += 1
# Ada kemungkinan dapat meledakkan musuh
if (time_to_attack):
return(5)
else: # not time_to_attack
smin = 9999 # Inisialisasi awal
move = -1 # Inisialisasi awal
# Perbandingan nilai heuristik (kuadrat pythagoras) dari keempat tetangganya
if (Map_Entity(Player_X(index), Player_Y(index)-1) == None) and (not InDanger(Player_X(index), Player_Y(index)-1)):
sup = PythagorasPow(Player_X(enemy_index),Player_Y(enemy_index),Player_X(index),Player_Y(index)-1)
if (smin > sup) :
smin = sup
move = 1
if (Map_Entity(Player_X(index)-1, Player_Y(index)) == None) and (not InDanger(Player_X(index)-1, Player_Y(index))):
sleft = PythagorasPow(Player_X(enemy_index),Player_Y(enemy_index),Player_X(index)-1,Player_Y(index))
if (smin > sleft) :
smin = sleft
move = 2
if (Map_Entity(Player_X(index)+1, Player_Y(index)) == None) and (not InDanger(Player_X(index)+1, Player_Y(index))):
sright = PythagorasPow(Player_X(enemy_index),Player_Y(enemy_index),Player_X(index)+1,Player_Y(index))
if (smin > sright) :
smin = sright
move = 3
if (Map_Entity(Player_X(index), Player_Y(index)+1) == None) and (not InDanger(Player_X(index), Player_Y(index)+1)):
sdown = PythagorasPow(Player_X(enemy_index),Player_Y(enemy_index),Player_X(index),Player_Y(index)+1)
if (smin > sdown) :
smin = sdown
move = 4
# Mengembalikan aksi terbaik yang didapatkan
return move
def Choice(index):
# Menentukan aksi yang dilakukan dengan berdasarkan urutan prioritas bot
# Prioritas pertama: kabur dari zona bahaya
if (InDanger(Player_X(index), Player_Y(index))):
return(SOS(index))
# Prioritas kedua: memicu ledakan bom yang sudah bot letakkan apabila sudah tidak berada di zona bahaya
elif (HasPlacedBomb()):
return(6)
# Prioritas ketiga: meledakkan obstacle yang ada di sebelah bot
elif ((Map_Entity(Player_X(index)-1, Player_Y(index)) == OBSTACLE) or (Map_Entity(Player_X(index)+1, Player_Y(index)) == OBSTACLE) or (Map_Entity(Player_X(index), Player_Y(index)-1) == OBSTACLE) or (Map_Entity(Player_X(index), Player_Y(index)+1) == OBSTACLE)):
return(5)
# Prioritas keempat: mengejar power up sebagai target
elif (IsPowerUpInRange(Player_X(index),Player_Y(index), math.floor(MAP_WIDTH/2))):
return(GoToTarget(Player_X(index),Player_Y(index),MAP_WIDTH,index,1))
# Prioritas kelima: mengejar musuh dalam radius tertentu
elif (IsEnemyInRange(index,math.floor(MAP_WIDTH)) != -1):
enemy_key = Player_Key(IsEnemyInRange(index,math.floor(MAP_WIDTH)))
if (((Map_Entity(Player_X(index)-1, Player_Y(index)) == enemy_key) or (Map_Entity(Player_X(index)+1, Player_Y(index)) == enemy_key) or (Map_Entity(Player_X(index), Player_Y(index)-1) == enemy_key) or (Map_Entity(Player_X(index), Player_Y(index)+1) == enemy_key)) and SOS(Player_Index(enemy_key))):
return(5)
else:
return(GoToTarget(Player_X(index),Player_Y(index),MAP_WIDTH,index,2))
# Prioritas keenam: berdiam diri apabila telah terpojok
elif ((InDanger(Player_X(index)-1,Player_Y(index))) and (InDanger(Player_X(index),Player_Y(index)+1)) and (InDanger(Player_X(index),Player_Y(index)-1)) and (InDanger(Player_X(index),Player_Y(index)+1))):
return(-1)
# Prioritas ketujuh: hunting down the enemy
else:
return(GoToTarget(Player_X(index),Player_Y(index),MAP_WIDTH,index,3))
action = Choice(Player_Index(player_key))
logger.info('Action: {}'.format(ACTIONS[action]))
with open(os.path.join(output_path, 'move.txt'), 'w') as f:
f.write('{}\n'.format(action))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('player_key', nargs='?')
parser.add_argument('output_path', nargs='?', default=os.getcwd())
args = parser.parse_args()
assert(os.path.isdir(args.output_path))
main(args.player_key, args.output_path)
|
luqmanarifin/2016-Bomberman
|
Kecewa/bot.py
|
bot.py
|
py
| 21,742 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 560,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 570,
"usage_type": "attribute"
}
] |
33574367695
|
import utils
def build(data):
G = dict()
for line in data:
a, b = line.split('-')
if a in G:
G[a].append(b)
else:
G[a] = [b]
if b in G:
G[b].append(a)
else:
G[b] = [a]
return G
def traverse(G, current_cave, current_path=None, can_revisit=False):
if current_path == None:
current_path = []
current_path.append(current_cave)
for neighbor in G[current_cave]:
if neighbor != 'start' and (neighbor.isupper() or (can_revisit or neighbor not in current_path)):
if neighbor == 'end':
PATHS.append(current_path + [neighbor])
else:
traverse(G,
neighbor,
current_path=current_path.copy(),
can_revisit=can_revisit and not (neighbor.islower() and neighbor in current_path))
if __name__ == '__main__':
timer = utils.Timer()
# Part 1
"""
timer.start()
data = utils.read_str_lines()
G = build(data)
PATHS = []
traverse(G, 'start')
print(len(PATHS))
timer.stop() # 50.94ms
"""
# Part 2
# """
timer.start()
data = utils.read_str_lines()
G = build(data)
PATHS = []
traverse(G, 'start', can_revisit=True)
print(len(PATHS))
timer.stop() # 610.58ms
# """
|
742617000027/advent-of-code-2021
|
12/12.py
|
12.py
|
py
| 1,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.Timer",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.read_str_lines",
"line_number": 51,
"usage_type": "call"
}
] |
21115802122
|
#!/usr/bin/env python
from gi.repository import Gtk, Gdk, GtkSource, GObject, Vte, GLib, Pango
from gi.repository.GdkPixbuf import Pixbuf
import os
import stat
import time
import jedi
class Handler:
def onShowCompletion(self, sview):
buffer = sview.get_buffer()
startiter, enditer = buffer.get_bounds()
mark = buffer.get_insert()
cpostiter = buffer.get_iter_at_mark(mark).copy()
source = buffer.get_text(startiter, enditer,
include_hidden_chars=False)
script = jedi.Script(source, cpostiter.get_line() + 1,
cpostiter.get_line_offset(), 'example.py')
completions = script.completions()
if completions != []:
Handler.openCompletions(completions, sview, cpostiter)
def openCompletions(completions, sview, cpostiter):
iter_loc = sview.get_iter_location(cpostiter)
win_loc = sview.buffer_to_window_coords(
Gtk.TextWindowType.WIDGET, iter_loc.x, iter_loc.y)
win = sview.get_window(Gtk.TextWindowType.WIDGET)
view_pos = win.get_toplevel().get_position()
x = win_loc[0] + view_pos[0] + 180
y = win_loc[1] + view_pos[1] + 130
try:
ccwin = Gtk.Window()
ccwin.set_keep_above(True)
ccwin.set_decorated(False)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
swin = Gtk.ScrolledWindow()
title = Gtk.Label("Title")
descr = Gtk.Label("Descr")
vbox2 = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vbox2.pack_start(title, True, True, 0)
vbox2.pack_start(descr, True, True, 0)
for c in completions:
b = Gtk.Button(c.name)
b.connect("clicked", Handler.onComplete,
c, ccwin, sview.get_buffer())
b.connect("focus-in-event", Handler.onFocusCompletion,
c, title, descr)
b.connect("focus-out-event", Handler.onUnFocusCompletion)
vbox.pack_start(b, True, True, 0)
hbox.pack_start(swin, True, True, 0)
swin.add(vbox)
hbox.pack_start(vbox2, True, True, 0)
ccwin.add(hbox)
ccwin.set_size_request(800, 400)
ccwin.move(x, y)
ccwin.connect("focus-out-event", Handler.onCCWinDestroy, ccwin)
ccwin.connect("key-release-event", Handler.onCCWinEsc)
ccwin.show_all()
except Exception as e:
print(e)
def onFocusCompletion(self, evt, completion, title, descr):
title.set_text(completion.description)
descr.set_text(completion.doc)
def onUnFocusCompletion(self, evt, data=None):
print("P")
def onCCWinEsc(self, event, data=None):
if event.keyval == Gdk.KEY_Escape:
self.destroy()
def onComplete(self, completion, win, buf):
buf.insert_at_cursor(completion.complete)
win.destroy()
def onCCWinDestroy(self, evt, window):
window.destroy()
########################################################
def onCopy(self, *args):
Handler.getCurrentBuffer().copy_clipboard(app.clipboard)
def onCut(self, *args):
Handler.getCurrentBuffer().cut_clipboard(app.clipboard, True)
def onPaste(self, *args):
Handler.getCurrentBuffer().paste_clipboard(app.clipboard, None, True)
def onModified(self, label, buffer):
if buffer.get_modified():
label.set_markup("<span foreground='#ff8000'>%s</span>"
% label.get_text())
def onDeleteWindow(self, *args):
for i in app.openfiles:
pos = app.builder.get_object("notebook1").page_num(i[2])
app.builder.get_object("notebook1").set_current_page(pos)
isclosed = Handler.onCloseTab(Handler(), i[0], i[1], i[2])
print(isclosed)
if not isclosed:
return True
Gtk.main_quit(*args)
def onFullscreen(self, *args):
app.builder.get_object("window1").fullscreen()
def onWindow(self, *args):
app.builder.get_object("window1").unfullscreen()
def onOpen(self, *args):
dialog = Gtk.FileChooserDialog("open file",
app.builder.get_object("window1"),
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
response = dialog.run()
if response == Gtk.ResponseType.OK:
Handler.openfile(dialog.get_filename())
dialog.destroy()
def onNew(self, *args):
buffer = GtkSource.Buffer()
lanm = GtkSource.LanguageManager()
lan = lanm.get_language('python')
buffer.set_language(lan)
buffer.set_highlight_syntax(True)
buffer.set_highlight_matching_brackets(True)
buffer.set_text("#!/usr/bin/env python")
buffer.set_modified(False)
swindow = Handler.create_tab("unnamed", buffer)
swindow.get_children()[0].connect("show-completion",
Handler.onShowCompletion, buffer)
app.openfiles.append([None, buffer, swindow])
def create_tab(path, buffer):
hbox = Gtk.HBox(False, 0)
label = Gtk.Label(path)
hbox.pack_start(label, True, True, 0)
close_image = Gtk.IconTheme.get_default().load_icon("exit", 16, 0)
imgw = Gtk.Image()
imgw.set_from_pixbuf(close_image)
btn = Gtk.Button()
btn.set_focus_on_click(False)
btn.add(imgw)
hbox.pack_start(btn, False, False, 0)
hbox.show_all()
sview = GtkSource.View()
sview.set_buffer(buffer)
# make settings
sview.set_show_line_numbers(True)
sview.set_auto_indent(True)
sview.set_tab_width(4)
sview.set_indent_width(4)
sview.set_insert_spaces_instead_of_tabs(True)
sview.set_right_margin_position(80)
sview.set_show_right_margin(True)
sview.modify_font(Pango.FontDescription('Dejavu Sans Mono'))
# try:
# bg_color = Gdk.RGBA()
# Gdk.RGBA.parse(bg_color, "#111111")
# sview.override_background_color(Gtk.StateType.NORMAL, bg_color)
# fg_color = Gdk.RGBA()
# Gdk.RGBA.parse(fg_color, "#DDDDDD")
# sview.override_color(Gtk.StateType.NORMAL, fg_color)
# except Exception as e:
# print(e)
# pass
swindow = Gtk.ScrolledWindow()
swindow.add(sview)
notebook = app.builder.get_object("notebook1")
pos = notebook.append_page(swindow, hbox)
notebook.show_all()
btn.connect("clicked", Handler.onCloseTab, path, buffer, swindow)
buffer.connect("modified-changed", Handler.onModified, label, buffer)
notebook.set_current_page(pos)
return swindow
def openfile(path):
for of in app.openfiles:
if of[0] != None:
if path in of[0]:
return
with open(path, "r") as loadedfile:
buffer = GtkSource.Buffer()
buffer.set_text(loadedfile.read())
buffer.set_modified(False)
# syntax highlighting
lman = GtkSource.LanguageManager()
lan = lman.guess_language(path)
swindow = Handler.create_tab(path, buffer)
if lan:
buffer.set_highlight_syntax(True)
buffer.set_language(lan)
if lan.get_name() == 'Python':
swindow.get_children()[0].connect("show-completion",
Handler.onShowCompletion,
swindow.get_children()[0])
else:
buffer.set_highlight_syntax(False)
buffer.set_highlight_matching_brackets(True)
app.openfiles.append([path, buffer, swindow])
def askForSave(buffer):
dialog = Gtk.Dialog("ask for save dialog", app.builder.get_object("window1"), 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_YES, Gtk.ResponseType.YES,
Gtk.STOCK_NO, Gtk.ResponseType.NO))
dialog.get_content_area().add(Gtk.Label("Datei nicht gespeichert. Wollen Sie die datei jetzt speichern?"))
dialog.set_default_size(150, 100)
dialog.show_all()
response = dialog.run()
if response == Gtk.ResponseType.YES:
Handler.onSaveCurrent(Handler())
dialog.destroy()
if not buffer.get_modified():
return True
else:
return False
elif response == Gtk.ResponseType.NO:
dialog.destroy()
return True
else:
dialog.destroy()
return False
def onCloseTab(self, path, buffer, swindow):
pos = app.builder.get_object("notebook1").page_num(swindow)
window = app.builder.get_object("notebook1").get_nth_page(pos)
buffer = window.get_child().get_buffer()
if buffer.get_modified():
response = Handler.askForSave(buffer)
if response:
app.builder.get_object("notebook1").remove_page(pos)
for i in app.openfiles:
if i[1] == buffer:
path = i[0]
app.openfiles.remove([path, buffer, swindow])
return True
else:
return False
else:
app.builder.get_object("notebook1").remove_page(pos)
for i in app.openfiles:
if i[1] == buffer:
path = i[0]
app.openfiles.remove([path, buffer, swindow])
return True
def savefile(buffer, path, label):
with open(path, 'w') as f:
f.write(buffer.get_text(*buffer.get_bounds(), include_hidden_chars=True))
label.set_markup("<span foreground='#000000'>%s</span>" % label.get_text())
buffer.set_modified(False)
Handler.updateOpenFiles(path, buffer)
def onSaveCurrent(self, *args):
buffer, label = Handler.getCurrentBufferAndLabel()
path = Handler.getPathFromOpenFiles(buffer)
if path == None:
path = Handler.saveAs()
label.set_text(path)
Handler.savefile(buffer, path, label)
def updateOpenFiles(path, buffer):
for i in app.openfiles:
if i[1] == buffer:
i[0] = path
i[1] = buffer
def onSaveAsCurrent(self, *args):
buffer, label = Handler.getCurrentBufferAndLabel()
path = Handler.saveAs()
label.set_text(path)
Handler.savefile(buffer, path, label)
def saveAs():
dialog = Gtk.FileChooserDialog("save file as", app.builder.get_object("window1"),
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
response = dialog.run()
retval = None
if response == Gtk.ResponseType.OK:
retval = dialog.get_filename()
dialog.destroy()
return retval
def getPathFromOpenFiles(buffer):
for i in app.openfiles:
if i[1] == buffer:
return i[0]
def getCurrentBufferAndLabel():
currentpage = app.builder.get_object("notebook1").get_current_page()
window = app.builder.get_object("notebook1").get_nth_page(currentpage)
label = app.builder.get_object("notebook1").get_tab_label(window).get_children()[0]
view = window.get_child()
return view.get_buffer(), label
def onRunApp(self, *args):
f = "/tmp/%i.py" % int(time.time())
with open (f, "w") as loadedfile:
buffer, label = Handler.getCurrentBufferAndLabel()
loadedfile.write(buffer.get_text(*buffer.get_bounds(), include_hidden_chars=True))
label.set_markup("<span foreground='#009000'>%s</span>" % label.get_text())
termwin = Gtk.Window()
termwin.set_default_size(800, 600)
def closeTerm(win, evt, label, buffer):
win.destroy()
os.remove(f)
if buffer.get_modified():
label.set_markup("<span foreground='#FF8000'>%s</span>" % label.get_text())
else:
label.set_markup("<span foreground='#000000'>%s</span>" % label.get_text())
termwin.connect("delete-event", closeTerm, label, buffer)
terminal = Vte.Terminal()
terminal.spawn_sync(
Vte.PtyFlags.DEFAULT,
os.environ['HOME'],
["/bin/bash"],
[],
GLib.SpawnFlags.DO_NOT_REAP_CHILD,
None,
None,
)
termwin.add(terminal)
termwin.show_all()
cmd = "python " + f + "\n"
terminal.feed_child(cmd, len(cmd))
class FsTree:
def populateFileSystemTreeStore(treeStore, path, parent=None):
itemCounter = 0
# iterate over the items in the path
for item in os.listdir(path):
# Get the absolute path of the item
itemFullname = os.path.join(path, item)
# Extract metadata from the item
try:
itemMetaData = os.stat(itemFullname)
except:
pass
# Determine if the item is a folder
itemIsFolder = stat.S_ISDIR(itemMetaData.st_mode)
# Generate an icon from the default icon theme
itemIcon = Gtk.IconTheme.get_default().load_icon("folder" if itemIsFolder else "empty", 22, 0)
# Append the item to the TreeStore
currentIter = treeStore.append(parent, [item, itemIcon, itemFullname])
# add dummy if current item was a folder
if itemIsFolder:
try:
if not os.listdir(itemFullname) == [] :
treeStore.append(currentIter, [None, None, None])
except:
pass
#increment the item counter
itemCounter += 1
# add the dummy node back if nothing was inserted before
if itemCounter < 1: treeStore.append(parent, [None, None, None])
def onFSRowExpanded(treeView, treeIter, treePath):
# get the associated model
treeStore = treeView.get_model()
# get the full path of the position
newPath = treeStore.get_value(treeIter, 2)
# populate the subtree on curent position
FsTree.populateFileSystemTreeStore(treeStore, newPath, treeIter)
# remove the first child (dummy node)
treeStore.remove(treeStore.iter_children(treeIter))
def onFSRowCollapsed(treeView, treeIter, treePath):
# get the associated model
treeStore = treeView.get_model()
# get the iterator of the first child
currentChildIter = treeStore.iter_children(treeIter)
# loop as long as some childern exist
while currentChildIter:
# remove the first child
treeStore.remove(currentChildIter)
# refresh the iterator of the next child
currentChildIter = treeStore.iter_children(treeIter)
# append dummy node
treeStore.append(treeIter, [None, None, None])
def onFSRowActivated(treeView, path, column):
model = treeView.get_model()
curiter = model.get_iter(path)
fspath = model.get_value(curiter, 2)
if not os.path.isdir(str(fspath)):
Handler.openfile(str(fspath))
class Pyide:
openfiles = []
# fs tree store from http://stackoverflow.com/questions/23433819/creating-a-simple-file-browser-using-python-and-gtktreeview
def __init__(self, *args):
self.builder = Gtk.Builder()
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
GObject.type_register(GtkSource.View)
self.builder.add_from_file("pyide.glade")
self.my_accelerators = Gtk.AccelGroup()
fileSystemTreeStore = Gtk.TreeStore(str, Pixbuf, str)
FsTree.populateFileSystemTreeStore(fileSystemTreeStore, os.path.expanduser("~"))
fileSystemTreeView = self.builder.get_object("treeview1")
fileSystemTreeView.set_model(fileSystemTreeStore)
treeViewCol = Gtk.TreeViewColumn("File")
colCellText = Gtk.CellRendererText()
colCellImg = Gtk.CellRendererPixbuf()
treeViewCol.pack_start(colCellImg, False)
treeViewCol.pack_start(colCellText, True)
treeViewCol.add_attribute(colCellText, "text", 0)
treeViewCol.add_attribute(colCellImg, "pixbuf", 1)
fileSystemTreeView.append_column(treeViewCol)
fileSystemTreeView.connect("row-expanded", FsTree.onFSRowExpanded)
fileSystemTreeView.connect("row-collapsed", FsTree.onFSRowCollapsed)
fileSystemTreeView.connect("row-activated", FsTree.onFSRowActivated)
self.builder.connect_signals(Handler())
def add_accelerator(self, widget, accelerator, signal="activate"):
if accelerator is not None:
key, mod = Gtk.accelerator_parse(accelerator)
widget.add_accelerator(signal, self.my_accelerators, key, mod, Gtk.AccelFlags.VISIBLE)
print("The accelerator is well added with the signal " + signal)
def run(self):
window = self.builder.get_object("window1")
window.add_accel_group(self.my_accelerators)
window.show_all()
Handler.openfile("./pyide.py")
Gtk.main()
if __name__ == "__main__":
app = Pyide()
app.run()
|
superdachs/pyide
|
pyide.py
|
pyide.py
|
py
| 18,021 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "jedi.Script",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.TextWindowType",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.TextWindowType",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Window",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Box",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Orientation",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.Box",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Orientation",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.ScrolledWindow",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Box",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Orientation",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gdk.KEY_Escape",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gdk",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.main_quit",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.FileChooserDialog",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.FileChooserAction",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.STOCK_CANCEL",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.STOCK_OPEN",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.Buffer",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.LanguageManager",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.HBox",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.IconTheme.get_default",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.IconTheme",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Image",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.View",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "gi.repository.Pango.FontDescription",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "gi.repository.Pango",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ScrolledWindow",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.Buffer",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.LanguageManager",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Dialog",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.STOCK_CANCEL",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.STOCK_YES",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.STOCK_NO",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.FileChooserDialog",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.FileChooserAction",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.STOCK_CANCEL",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.STOCK_SAVE",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.ResponseType",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Window",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "gi.repository.Vte.Terminal",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "gi.repository.Vte",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "gi.repository.Vte.PtyFlags",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Vte",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.GLib.SpawnFlags",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.GLib",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "stat.S_ISDIR",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.IconTheme.get_default",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.IconTheme",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.Builder",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Clipboard.get",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Clipboard",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gdk.SELECTION_CLIPBOARD",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gdk",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "gi.repository.GObject.type_register",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "gi.repository.GObject",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "gi.repository.GtkSource.View",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.GtkSource",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.AccelGroup",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.TreeStore",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "gi.repository.GdkPixbuf.Pixbuf",
"line_number": 441,
"usage_type": "argument"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "os.path.expanduser",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 442,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.TreeViewColumn",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.CellRendererText",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.CellRendererPixbuf",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.accelerator_parse",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.AccelFlags",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.main",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 473,
"usage_type": "name"
}
] |
31249185445
|
import io
import os
import torch
from torch import nn
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from transformers import (set_seed,
TrainingArguments,
Trainer,
GPT2Config,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
GPT2ForSequenceClassification,
PreTrainedTokenizerFast)
from sklearn.metrics import classification_report, accuracy_score
from music_midi_dataset import MidiMusicDataset
def start():
set_seed(123)
epochs = 4
batch_size = 8
max_length = 60
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_name_or_path = 'gpt2'
labels_ids = {'cheerful': 0, 'tense': 1}
n_labels = len(labels_ids)
train_midi_data_dir = '../data/music_midi/emotion_midi_text/train'
test_midi_data_dir = '../data/music_midi/emotion_midi_text/test'
path_tokenizer = 'tokenizer.json'
tokenizer = PreTrainedTokenizerFast(tokenizer_file=path_tokenizer)
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
classes = ['cheerful', 'tense']
pad_length = 128
learning_rate = 0.001
print('Loading configuraiton...')
model_config = GPT2Config.from_pretrained(pretrained_model_name_or_path=model_name_or_path, num_labels=n_labels)
print('Loading model...')
model = GPT2ForSequenceClassification.from_pretrained(pretrained_model_name_or_path=model_name_or_path,
config=model_config)
model.resize_token_embeddings(len(tokenizer))
# fix model padding token id
model.config.pad_token_id = model.config.eos_token_id
# Load model to defined device.
model.to(device)
training_data = MidiMusicDataset(midi_data_dir=train_midi_data_dir,
classes=classes,
tokenizer=tokenizer,
block_size=pad_length)
test_data = MidiMusicDataset(midi_data_dir=test_midi_data_dir,
classes=classes,
tokenizer=tokenizer,
block_size=pad_length)
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
valid_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
optimizer = AdamW(model.parameters(),
lr=2e-5, # default is 5e-5, our notebook had 2e-5
eps=1e-8 # default is 1e-8.
)
# Total number of training steps is number of batches * number of epochs.
# `train_dataloader` contains batched data so `len(train_dataloader)` gives
# us the number of batches.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps)
# Store the average loss after each epoch so we can plot them.
all_loss = {'train_loss': [], 'val_loss': []}
all_acc = {'train_acc': [], 'val_acc': []}
for epoch in range(epochs):
print(f'Epoch {epoch}')
print('Training on batches...')
# Perform one full pass over the training set.
train_labels, train_predict, train_loss = train(train_dataloader, model, optimizer, scheduler, device)
train_acc = accuracy_score(train_labels, train_predict)
# Get prediction form model on validation data.
print('Validation on batches...')
valid_labels, valid_predict, val_loss = validation(valid_dataloader, model, device)
val_acc = accuracy_score(valid_labels, valid_predict)
# Print loss and accuracy values to see how training evolves.
print(" train_loss: %.5f - val_loss: %.5f - train_acc: %.5f - valid_acc: %.5f" % (
train_loss, val_loss, train_acc, val_acc))
print()
# Store the loss value for plotting the learning curve.
all_loss['train_loss'].append(train_loss)
all_loss['val_loss'].append(val_loss)
all_acc['train_acc'].append(train_acc)
all_acc['val_acc'].append(val_acc)
def train(dataloader, model, optimizer_, scheduler_, device_):
predictions_labels = []
true_labels = []
total_loss = 0
model.train()
for batch in tqdm(dataloader):
# print(batch)
true_labels += batch['labels'].numpy().flatten().tolist()
batch = {k: v.type(torch.long).to(device_) for k, v in batch.items()}
model.zero_grad()
outputs = model(**batch)
loss, logits = outputs[:2]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer_.step()
scheduler_.step()
logits = logits.detach().cpu().numpy()
predictions_labels += logits.argmax(axis=-1).flatten().tolist()
avg_epoch_loss = total_loss / len(dataloader)
return true_labels, predictions_labels, avg_epoch_loss
def validation(dataloader, model, device_):
predictions_labels = []
true_labels = []
total_loss = 0
model.eval()
for batch in tqdm(dataloader):
true_labels += batch['labels'].numpy().flatten().tolist()
batch = {k: v.type(torch.long).to(device_) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss, logits = outputs[:2]
logits = logits.detach().cpu().numpy()
total_loss += loss.item()
predict_content = logits.argmax(axis=-1).flatten().tolist()
predictions_labels += predict_content
avg_epoch_loss = total_loss / len(dataloader)
return true_labels, predictions_labels, avg_epoch_loss
if __name__ == '__main__':
start()
|
Vitaliy1234/music_generation
|
emotion_classification/gpt2_classifier.py
|
gpt2_classifier.py
|
py
| 6,088 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "transformers.set_seed",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "transformers.PreTrainedTokenizerFast",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Config.from_pretrained",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Config",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "transformers.GPT2ForSequenceClassification.from_pretrained",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2ForSequenceClassification",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "music_midi_dataset.MidiMusicDataset",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "music_midi_dataset.MidiMusicDataset",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "transformers.AdamW",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "transformers.get_linear_schedule_with_warmup",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 145,
"usage_type": "call"
}
] |
74794654268
|
# import urllib library
from urllib.request import urlopen
import json
import random
score = 0
import string
NUMBER_OF_ATTEMPTS = 2
ENTER_ANSWER = 'Hit %s for your answer\n'
TRY_AGAIN = 'Incorrect!!! Try again.'
CORRECT = 'Correct'
NO_MORE_ATTEMPTS = 'Incorrect!!! You ran out of your attempts'
def question(message, options, correct, attempts=NUMBER_OF_ATTEMPTS):
print (message)
while attempts > 0:
response = input(ENTER_ANSWER % ', '.join(options))
if response == correct:
print (CORRECT)
return True
else:
attempts -= 1
print (TRY_AGAIN)
print (NO_MORE_ATTEMPTS)
return False
urlQuestion = "https://d-wwts.ext.hp.com/qna/questions.json"
urlAnswers = "https://d-wwts.ext.hp.com/qna/answers.json"
responseQuestionsAndAnswers = urlopen(urlQuestion)
responseQandA_json = json.loads(responseQuestionsAndAnswers.read())
responseCurrectAnswers = urlopen(urlAnswers)
responseUrlCurrectAnswers_json = json.loads(responseCurrectAnswers.read())
random_item = random.choice(responseQandA_json)
questionId = random_item['id'];
filterAnswer = [f for f in responseUrlCurrectAnswers_json if f["id"] == questionId]
ans = filterAnswer[0]['a'];
question2 = question(random_item['q'], random_item['a'], ans)
|
RoiAtias/Devops_test
|
test/test.py
|
test.py
|
py
| 1,321 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 45,
"usage_type": "call"
}
] |
64085434
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 20:30:54 2019
@author: Sunanda
"""
import argparse, re, decimal
parser = argparse.ArgumentParser(
description='''The purpose of this application is to check
the COMP 472/6721 Winter 2019 Projects'''
)
parser.add_argument('-c',
required = False,
action='store_true',
help="optional argument to check the format")
parser.add_argument( '-m',
required = False,
action='store_true',
help="check the trace files for minimax implementation")
parser.add_argument('-a',
required = False,
action='store_true',
help="check the trace files for alpha beta implementation")
parser.add_argument("-f", dest="filename", required=True,
help="output file from demos", metavar="FILE",
type=argparse.FileType('r'))
args = parser.parse_args()
content = args.filename.read().strip()
groups = re.split('(?:\r\n\r\n|\n\n)',content)
if args.m or args.a :
print("\n\x1b[1;31;mACESS DENIED\x1b[0m ")
else:
print("Checking format.. ")
error = 0
traceNo = 0
for i,bunch in enumerate(groups,1):
if bunch.startswith('\r') or bunch.startswith('\n'):
error = 5
break
rows = bunch.split()
if i % 2 == 1:
traceNo += 1
if len(rows) > 2:
error = 1
break
elif len(rows) < 2:
error = 2
break
for val in rows:
try:
float(val)
except:
error = 3
break
if decimal.Decimal(val).as_tuple().exponent < -1:
error = 4
break
if error != 0 :
break
# print("done")
if error == 1:
print("\x1b[1;31;mERROR:\x1b[0m Too many values in the beginning (Trace No. "+ str(traceNo) +")")
elif error == 2:
print("\x1b[1;31;mERROR:\x1b[0m Not enough values in the beginning (Trace No. "+ str(traceNo) +")")
elif error == 3:
print("\x1b[1;31;mERROR:\x1b[0m Number expected (Trace No. "+ str(traceNo) +")")
elif error == 4:
print("\x1b[1;31;mERROR:\x1b[0m Upto one decimal point expected (Trace No. "+ str(traceNo) +")")
elif error == 5:
print("\x1b[1;31;mERROR:\x1b[0m Too many new lines (Trace No. "+ str(traceNo) +")")
else:
print("\x1b[1;32;mCORRECT FORMAT\x1b[0m")
|
lqw1111/COMP6721-AI-Project
|
check.py
|
check.py
|
py
| 2,781 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 64,
"usage_type": "call"
}
] |
41169055403
|
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
import datetime
import time
import matplotlib.pyplot as plt
from torchinfo import summary
import yaml
import json
import sys
sys.path.append("..")
from lib.utils import (
MaskedMAELoss,
print_log,
seed_everything,
set_cpu_num,
CustomJSONEncoder,
)
from lib.metrics import MAE_RMSE, RMSE_MAE_MAPE
from lib.data_prepare import get_dataloaders_from_tvt
from models import model_select
@torch.no_grad()
def eval_model(model, valset_loader, criterion):
model.eval()
batch_loss_list = []
for x_batch, y_batch in valset_loader:
x_batch = x_batch.to(DEVICE)
y_batch = y_batch.to(DEVICE)
out_batch = model(x_batch)
# out_batch = SCALER.inverse_transform(out_batch)
loss = criterion(out_batch, y_batch)
batch_loss_list.append(loss.item())
return np.mean(batch_loss_list)
@torch.no_grad()
def predict(model, loader):
model.eval()
y = []
out = []
for x_batch, y_batch in loader:
x_batch = x_batch.to(DEVICE)
y_batch = y_batch.to(DEVICE)
out_batch = model(x_batch)
# out_batch = SCALER.inverse_transform(out_batch)
out_batch = out_batch.cpu().numpy()
y_batch = y_batch.cpu().numpy()
out.append(out_batch)
y.append(y_batch)
out = np.vstack(out).squeeze() # (samples, out_steps, num_nodes)
y = np.vstack(y).squeeze()
return y, out
def train_one_epoch(
model, trainset_loader, optimizer, scheduler, criterion, clip_grad, log=None
):
model.train()
batch_loss_list = []
for x_batch, y_batch in trainset_loader:
x_batch = x_batch.to(DEVICE)
y_batch = y_batch.to(DEVICE)
out_batch = model(x_batch)
# out_batch = SCALER.inverse_transform(out_batch)
loss = criterion(out_batch, y_batch)
batch_loss_list.append(loss.item())
optimizer.zero_grad()
loss.backward()
if clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
epoch_loss = np.mean(batch_loss_list)
scheduler.step()
return epoch_loss
def train(
model,
trainset_loader,
valset_loader,
optimizer,
scheduler,
criterion,
clip_grad=0,
max_epochs=200,
early_stop=10,
compile_model=False,
verbose=1,
plot=False,
log=None,
save=None,
):
if torch.__version__ >= "2.0.0" and compile_model:
model = torch.compile(model)
model = model.to(DEVICE)
wait = 0
min_val_loss = np.inf
train_loss_list = []
val_loss_list = []
for epoch in range(max_epochs):
train_loss = train_one_epoch(
model, trainset_loader, optimizer, scheduler, criterion, clip_grad, log=log
)
train_loss_list.append(train_loss)
val_loss = eval_model(model, valset_loader, criterion)
val_loss_list.append(val_loss)
if (epoch + 1) % verbose == 0:
print_log(
datetime.datetime.now(),
"Epoch",
epoch + 1,
" \tTrain Loss = %.5f" % train_loss,
"Val Loss = %.5f" % val_loss,
log=log,
)
if val_loss < min_val_loss:
wait = 0
min_val_loss = val_loss
best_epoch = epoch
best_state_dict = model.state_dict()
else:
wait += 1
if wait >= early_stop:
break
model.load_state_dict(best_state_dict)
train_mae, train_rmse = MAE_RMSE(*predict(model, trainset_loader))
val_mae, val_rmse = MAE_RMSE(*predict(model, valset_loader))
out_str = f"Early stopping at epoch: {epoch+1}\n"
out_str += f"Best at epoch {best_epoch+1}:\n"
out_str += "Train Loss = %.5f\n" % train_loss_list[best_epoch]
out_str += "Train MAE = %.5f, RMSE = %.5f\n" % (
train_mae,
train_rmse,
)
out_str += "Val Loss = %.5f\n" % val_loss_list[best_epoch]
out_str += "Val MAE = %.5f, RMSE = %.5f" % (
val_mae,
val_rmse,
)
print_log(out_str, log=log)
if plot:
plt.plot(range(0, epoch + 1), train_loss_list, "-", label="Train Loss")
plt.plot(range(0, epoch + 1), val_loss_list, "-", label="Val Loss")
plt.title("Epoch-Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
if save:
torch.save(best_state_dict, save)
return model
@torch.no_grad()
def test_model(model, testset_loader, log=None):
model.eval()
print_log("--------- Test ---------", log=log)
start = time.time()
y_true, y_pred = predict(model, testset_loader)
end = time.time()
(
mae_all,
rmse_all,
) = MAE_RMSE(y_true, y_pred)
out_str = "Test MAE = %.5f, RMSE = %.5f\n" % (
mae_all,
rmse_all,
)
# (rmse_all, mae_all, mape_all) = RMSE_MAE_MAPE(y_true, y_pred)
# out_str = "Test MAE = %.5f, RMSE = %.5f, MAPE = %.5f\n" % (
# rmse_all,
# mae_all,
# mape_all,
# )
print_log(out_str, log=log, end="")
print_log("Inference time: %.2f s" % (end - start), log=log)
if __name__ == "__main__":
# -------------------------- set running environment ------------------------- #
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default="500")
parser.add_argument("-p", type=int, default="20")
parser.add_argument("-m", "--model", type=str, default="gridgcn")
parser.add_argument("-g", "--gpu_num", type=int, default=0)
parser.add_argument("-c", "--compile", action="store_true")
parser.add_argument("--seed", type=int, default=233)
parser.add_argument("--cpus", type=int, default=1)
args = parser.parse_args()
seed_everything(args.seed)
set_cpu_num(args.cpus)
GPU_ID = args.gpu_num
os.environ["CUDA_VISIBLE_DEVICES"] = f"{GPU_ID}"
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n = args.n
p = args.p
dataset = f"n_{n}_p_{p}"
data_path = f"../data/{dataset}"
model_name = args.model.upper()
model_class = model_select(model_name)
model_name = model_class.__name__
with open(f"../configs/{model_name}.yaml", "r") as f:
cfg = yaml.safe_load(f)
cfg = cfg[dataset]
# -------------------------------- load model -------------------------------- #
# cfg.get(key, default_value=None): no need to write in the config if not used
# cfg[key]: must be assigned in the config, else KeyError
if cfg.get("pass_device"):
cfg["model_args"]["device"] = DEVICE
model = model_class(**cfg["model_args"])
# ------------------------------- make log file ------------------------------ #
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
log_path = f"../logs/{model_name}"
if not os.path.exists(log_path):
os.makedirs(log_path)
log = os.path.join(log_path, f"{model_name}-{dataset}-{now}.log")
log = open(log, "a")
log.seek(0)
log.truncate()
# ------------------------------- load dataset ------------------------------- #
print_log(dataset, log=log)
(
trainset_loader,
valset_loader,
testset_loader,
) = get_dataloaders_from_tvt(
n,
p,
batch_size=cfg.get("batch_size", 32),
log=log,
)
print_log(log=log)
# --------------------------- set model saving path -------------------------- #
save_path = f"../saved_models/{model_name}"
if not os.path.exists(save_path):
os.makedirs(save_path)
save = os.path.join(save_path, f"{model_name}-{dataset}-{now}.pt")
# ---------------------- set loss, optimizer, scheduler ---------------------- #
# criterion = nn.SmoothL1Loss()
# criterion = MaskedMAELoss()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(
model.parameters(),
lr=cfg["lr"],
weight_decay=cfg.get("weight_decay", 0),
eps=cfg.get("eps", 1e-8),
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=cfg.get("milestones", []),
gamma=cfg.get("lr_decay_rate", 0.1),
verbose=False,
)
# --------------------------- print model structure -------------------------- #
print_log("---------", model_name, "---------", log=log)
print_log(
json.dumps(cfg, ensure_ascii=False, indent=4, cls=CustomJSONEncoder), log=log
)
print_log(
summary(
model,
[
cfg["batch_size"],
cfg["num_grids_width"],
cfg["num_grids_height"],
cfg["model_args"]["input_dim"],
],
verbose=0,
),
log=log,
)
print_log(log=log)
# --------------------------- train and test model --------------------------- #
print_log(f"Loss: {criterion._get_name()}", log=log)
print_log(log=log)
model = train(
model,
trainset_loader,
valset_loader,
optimizer,
scheduler,
criterion,
clip_grad=cfg.get("clip_grad"),
max_epochs=cfg.get("max_epochs", 200),
early_stop=cfg.get("early_stop", 10),
compile_model=args.compile,
verbose=1,
log=log,
save=save,
)
test_model(model, testset_loader, log=log)
log.close()
|
XDZhelheim/GN-RRT
|
scripts/train.py
|
train.py
|
py
| 9,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.__version__",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "torch.compile",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "lib.utils.print_log",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "lib.metrics.MAE_RMSE",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "lib.metrics.MAE_RMSE",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "lib.metrics.MAE_RMSE",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "lib.utils.seed_everything",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "lib.utils.set_cpu_num",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "models.model_select",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "lib.utils.print_log",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "lib.data_prepare.get_dataloaders_from_tvt",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.MultiStepLR",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "lib.utils.print_log",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "lib.utils.CustomJSONEncoder",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "lib.utils.print_log",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torchinfo.summary",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "lib.utils.print_log",
"line_number": 328,
"usage_type": "call"
}
] |
12019045259
|
from PyQt5.QtWidgets import QTableView, QPushButton, QHeaderView, QDialog, QVBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtSql import QSqlDatabase, QSqlTableModel, QSqlQuery
class HistoryWindow(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("History")
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
self.db = None
self.table_model = None
self.table_view = QTableView()
self.table_view.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.table_view.horizontalHeader().setStretchLastSection(1)
self.table_view.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.load_data()
self.btn_clear_history = QPushButton("Clear")
self.layout = QVBoxLayout()
self.layout.addWidget(self.table_view)
self.layout.addWidget(self.btn_clear_history)
self.setLayout(self.layout)
self.setGeometry(400, 200, 400, 600)
self.btn_clear_history.clicked.connect(self.clear_history)
def load_data(self):
self.db = QSqlDatabase.addDatabase('QSQLITE')
self.db.setDatabaseName('history.db')
self.db.open()
self.table_model = QSqlTableModel()
self.table_model.setTable("history")
self.table_model.select()
self.table_view.setModel(self.table_model)
def clear_history(self):
query = QSqlQuery()
query.exec_("DELETE FROM history")
if query.isActive():
print("Records deleted successfully")
else:
print("Error deleting records: ", query.lastError().text())
self.load_data()
|
umraan-xm/Image-Based-Equation-Solver
|
HistoryWindow.py
|
HistoryWindow.py
|
py
| 1,767 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.WindowContextHelpButtonHint",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableView",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeToContents",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeToContents",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtSql.QSqlDatabase.addDatabase",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtSql.QSqlDatabase",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtSql.QSqlTableModel",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtSql.QSqlQuery",
"line_number": 44,
"usage_type": "call"
}
] |
17399515937
|
from sys import argv
from special.mode import Mode
from special.settings import Settings
class BadMode(Exception):
pass
class BadCommandLineArguments(Exception):
pass
class Run:
def __init__(self, mode: Mode):
if len(argv) > 1:
self.parse_arguments(argv[1:])
else:
self.parse_mode(mode)
def parse_arguments(self, args):
nicks_path = 'nicks_test'
play_path = 'play_test'
games_path = 'games_test'
chat_path = 'chat_test'
testing_data_path = 'testing'
backup_testing_path = 'backup testing'
network_name = 'neural network'
if args[0] == '--unit-tests':
Settings.game_mode = Mode.UnitTest
self.start_unit_tests()
elif args[0] == '--evolution-tests':
from holdem.name_manager import NameManager
from holdem.play.play_manager import PlayManager
Settings.game_mode = Mode.Evolution
NameManager.NicksPath = nicks_path
PlayManager.PlayPath = play_path
PlayManager.GenCount = 30
self.start_evolution(100, 9, 27, 1000)
NameManager.remove_folder()
elif args[0] == '--parsing-tests':
from data.game_parser import GameParser, PokerGame
Settings.game_mode = Mode.Parse
PokerGame.converted_games_folder = games_path
PokerGame.converted_chat_folder = chat_path
games = GameParser.parse_dir(testing_data_path, True, True)
assert len(games) == 6
GameParser.copy_dir(backup_testing_path, testing_data_path)
PokerGame.load_dir(testing_data_path)
elif args[0] == '--learning-tests':
from learning.learning import Learning
from learning.data_sets.decision_model.poker_decision import PokerDecision
Settings.game_mode = Mode.Learning
learn = Learning()
learn.create_data_set(PokerDecision)
learn.add_data_set(testing_data_path)
learn.save_data_set(network_name)
learn.load_data_set(network_name)
learn.learning(network_name)
elif args[0] == '--network-play-tests':
from holdem.game.game import Game
from holdem.play.play_manager import PlayManager
from holdem.player.neural_network.net1_net2_player import Net1Net2Player
Settings.game_mode = Mode.Testing
PlayManager.PlayPath = play_path
game = Game()
for _ in range(8):
game.add_bot_player()
game.add_nn_player(network_name, Net1Net2Player)
PlayManager.remove_folder()
else:
raise BadCommandLineArguments(str(args))
def parse_mode(self, mode):
Settings.game_mode = mode
if mode == Mode.GameEngine:
from holdem.game.game_manager import GameManager
# PlayManager.standings()
GameManager().run()
elif mode == Mode.Parse:
from data.game_parser import GameParser, PokerGame
# GameParser.parse_dir('pack0')
GameParser.parse_dir('pack1', False, False)
# game.save()
# game.convert()
# print(game)
# PokerGame.load('hh.txt')
elif mode == Mode.Evolution:
self.start_evolution(100000, 9, 999, 10000)
elif mode == Mode.Testing:
# from learning.neural_network import NeuralNetwork
# NeuralNetwork.PokerDecision.Bubble(100, 9).show()
from time import sleep
from datetime import datetime
from pickle import load
from statistics import mean
from holdem.game.game import Game
from holdem.player.neural_network.net1_net2_player import Net1Net2Player
from holdem.player.neural_network.net3_player import Net3Player
from holdem.player.neural_network.net4_player import Net4Player
from holdem.player.neural_network.net5_player import Net5Player
from holdem.player.neural_network.net6_player import Net6Player
from holdem.player.neural_network.net7_player import Net7Player
from holdem.player.neural_network.net8_player import Net8Player
from holdem.player.neural_network.net9_player import Net9Player
from holdem.play.play_manager import PlayManager
start_time = datetime.now()
if 1:
for _id in range(400):
game = Game(players=100)
for _ in range(92):
game.add_bot_player()
game.add_nn_player('nn2', Net1Net2Player)
game.add_nn_player('nn3', Net3Player)
game.add_nn_player('nn4', Net4Player)
game.add_nn_player('nn5', Net5Player)
game.add_nn_player('nn6', Net6Player)
game.add_nn_player('nn7', Net7Player)
game.add_nn_player('nn8', Net8Player)
game.add_nn_player('nn9', Net9Player)
print('Start game #', _id + 1)
while not game.game_finished:
sleep(0.01)
plays = load(open('networks/plays', 'rb'))
plays = sorted([(k, v) for k, v in plays.items()], key=lambda k: mean(k[1]))
for i, play in enumerate(plays):
pl = PlayManager.get_play_by_name(play[0])
print(f'{i+1:>4}. {round(mean(play[1]), 2):>6} {play[0]:>10} '
f'(ex {pl.exemplar:>6}) {"*" * pl.wins}')
print('It took', datetime.now() - start_time)
elif mode == Mode.UnitTest:
self.start_unit_tests()
elif mode == Mode.Learning:
from learning.learning import Learning
from learning.data_sets.decision_model.poker_decision_10 import PokerDecision10
from data.game_parser import GameParser
from datetime import datetime
learn = Learning()
learn.create_data_set(PokerDecision10)
start = datetime.now()
# GameParser.parse_dir('pack1', False, False)
# learn.add_data_set('pack1')
# learn.save_data_set('nn11 common cards.txt')
learn.load_data_set('nn11 common cards.txt')
learn.learning('nn11 200x100x100')
end = datetime.now()
print('Learning took', end - start)
elif mode == Mode.Search:
from data.game_parser import GameParser
GameParser.search_in_dir('pack1', 'Seat 10')
else:
raise BadMode('Bad mode')
@staticmethod
def start_unit_tests():
from unit_tests.testing import UnitTesting
UnitTesting.test_all()
@staticmethod
def start_evolution(games: int, seats_on_table: int, players: int, start_money: int):
from holdem.play.play_manager import PlayManager
from learning.evolution import Evolution
from core.blinds.scheme.schemes import Schemes
PlayManager.standings()
Evolution(games, seats_on_table, players, start_money, Schemes.Rapid.value).run()
|
aaaaaa2493/poker-engine
|
src/special/run.py
|
run.py
|
py
| 7,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "special.mode.Mode",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.UnitTest",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Evolution",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "holdem.name_manager.NameManager.NicksPath",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "holdem.name_manager.NameManager",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "holdem.play.play_manager.PlayManager.PlayPath",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "holdem.play.play_manager.PlayManager.GenCount",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "holdem.name_manager.NameManager.remove_folder",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "holdem.name_manager.NameManager",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Parse",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "data.game_parser.PokerGame.converted_games_folder",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "data.game_parser.PokerGame",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "data.game_parser.PokerGame.converted_chat_folder",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "data.game_parser.PokerGame",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "data.game_parser.GameParser.parse_dir",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "data.game_parser.GameParser",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "data.game_parser.GameParser.copy_dir",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "data.game_parser.GameParser",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "data.game_parser.PokerGame.load_dir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "data.game_parser.PokerGame",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Learning",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "learning.learning.Learning",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "learning.data_sets.decision_model.poker_decision.PokerDecision",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Testing",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "holdem.play.play_manager.PlayManager.PlayPath",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "holdem.game.game.Game",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "holdem.player.neural_network.net1_net2_player.Net1Net2Player",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "holdem.play.play_manager.PlayManager.remove_folder",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "special.settings.Settings.game_mode",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "special.settings.Settings",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.GameEngine",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "holdem.game.game_manager.GameManager",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "special.mode.Mode.Parse",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "data.game_parser.GameParser.parse_dir",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "data.game_parser.GameParser",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Evolution",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Testing",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "holdem.game.game.Game",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "holdem.player.neural_network.net1_net2_player.Net1Net2Player",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net3_player.Net3Player",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net4_player.Net4Player",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net5_player.Net5Player",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net6_player.Net6Player",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net7_player.Net7Player",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net8_player.Net8Player",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "holdem.player.neural_network.net9_player.Net9Player",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "holdem.play.play_manager.PlayManager.get_play_by_name",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "statistics.mean",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.UnitTest",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Learning",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "learning.learning.Learning",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "learning.data_sets.decision_model.poker_decision_10.PokerDecision10",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "special.mode.Mode.Search",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "special.mode.Mode",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "data.game_parser.GameParser.search_in_dir",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "data.game_parser.GameParser",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "unit_tests.testing.UnitTesting.test_all",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "unit_tests.testing.UnitTesting",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "holdem.play.play_manager.PlayManager.standings",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "holdem.play.play_manager.PlayManager",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "learning.evolution.Evolution",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "core.blinds.scheme.schemes.Schemes.Rapid",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "core.blinds.scheme.schemes.Schemes",
"line_number": 182,
"usage_type": "name"
}
] |
71000611389
|
import mob
import room
from time import sleep
import pygame
pygame.init()
size = (800, 600)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Bran's Cool Game")
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
BLUE = ( 0, 0, 255)
# Player Initialization
player_state = 0 # none (0), inv (1), menu (2), event (3), movement (4), choice (5)
mc = mob.Mob("Bran") # Initialize Player character
img = pygame.image.load("base.bmp").convert() #Import image
img.set_colorkey(WHITE) # bg transparency
mc_rect = img.get_rect() # Use this rect to do collision detection!
mc_rect.x = mc.x
mc_rect.y = mc.y
inv_open = 0
menu_wait = 0
# Move Rect first, check for collision, then move if safe
#Room Initialization
curr_room = 0 # Index of current room
rooms = []
rooms.append(room.Room("test_room"))
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
pygame.key.set_repeat(1, 1)
while not done:
# --- Main event loop
if pygame.event.get(pygame.QUIT):
done = True
blocked = [0, 0, 0, 0] # Down, Up, Left, and Right block states
keys = pygame.key.get_pressed() # Returns a list of key statuses
if keys[pygame.K_SPACE]:
print("Rect", mc_rect.x, " ", mc_rect.y)
print("MC", mc.x, " ", mc.y)
if keys[pygame.K_x]:
if inv_open == 1:
inv_open = 0
sleep(1)
else:
inv_open = 1
sleep(1)
if keys[pygame.K_DOWN]:
# Loop through bounds, comparing y + 5 coord to list and change blocked to 1 if
# a match is found. Then do an if to check if blocked[x] is 1 before continuing on.
# After that, revert blocked
mc_rect = mc_rect.move(0, 5)
if mc_rect.collidelist(rooms[curr_room].rects) != -1:
print("COLLISION D")
mc_rect = mc_rect.move(0, -5)
else:
mc.y += 5
if keys[pygame.K_UP]:
mc_rect = mc_rect.move(0, -5)
if mc_rect.collidelist(rooms[curr_room].rects) != -1:
print("COLLISION U")
mc_rect = mc_rect.move(0, 5)
else:
mc.y -= 5
if keys[pygame.K_LEFT]:
mc_rect = mc_rect.move(-5, 0)
if mc_rect.collidelist(rooms[curr_room].rects) != -1:
print("COLLISION L")
mc_rect = mc_rect.move(5, 0)
else:
mc.x -= 5
if keys[pygame.K_RIGHT]:
mc_rect = mc_rect.move(5, 0)
if mc_rect.collidelist(rooms[curr_room].rects) != -1:
print("COLLISION R")
mc_rect = mc_rect.move(-5, 0)
else:
mc.x += 5
# --- Game logic should go here
# Wall collision test
# --- Drawing code should go here
# bottom layer
screen.fill(WHITE)
rooms[curr_room].build_walls(screen)
'''Draw room function'''
'''Mobs and items draw functions'''
'''MC draw function'''
screen.blit(img, [mc.x, mc.y], [0, 0, 100, 100]) # x1, y1, w, h (of image)
if inv_open == 1:
pygame.draw.rect(screen, BLACK, [0, 400, 800, 750]) # Dialog/Inventory BlkBox
pygame.draw.rect(screen, WHITE, [25, 425, 125, 150]) # Dialog/Inventory Pic
pygame.draw.rect(screen, WHITE, [400, 450, 100, 100]) # Dialog/Inventory Box1
pygame.draw.rect(screen, WHITE, [525, 450, 100, 100]) # Dialog/Inventory Box2
pygame.draw.rect(screen, WHITE, [650, 450, 100, 100]) # Dialog/Inventory Box3
pygame.draw.rect(screen, WHITE, [275, 450, 100, 100]) # Dialog/Inventory Box4
# topmost layer
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
pygame.quit()
|
heroicbran/games
|
Bran_s Pygame Engine/main.py
|
main.py
|
py
| 4,050 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mob.Mob",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "room.Room",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.set_repeat",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_x",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 137,
"usage_type": "call"
}
] |
69958425789
|
"""Test delete model
@Author: NguyenKhacThanh
"""
import pytest
from voluptuous import Schema, All, Required
from tests.api import APITestCase
@pytest.mark.usefixtures("inject_client", "inject_params_model_regression")
class DeleteModelTestCase(APITestCase):
def url(self):
return "/regression"
def method(self):
return "DELETE"
def test_success(self):
# push data
res = self.client.post(
"/api/v1/regression/huber",
json=self.params["huber"]
)
code, body = self.call_api(
url=f"/regression/{res.get_json()['id']}",
)
schema = Schema({
Required("message"): str
})
schema(body)
assert 200 == code, body["message"]
def test_id_model_not_found(self):
code, body = self.call_api(
url="/regression/123456781234567812345678",
)
schema = Schema({
Required("message"): str
})
schema(body)
assert 400 == code, body["message"]
|
magiskboy/wipm
|
tests/api/test_delete_model_regression.py
|
test_delete_model_regression.py
|
py
| 1,047 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tests.api.APITestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "voluptuous.Schema",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "voluptuous.Required",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "voluptuous.Required",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.mark.usefixtures",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 9,
"usage_type": "attribute"
}
] |
41168902007
|
from utils import get_input_lines
from numpy import median
from collections import Counter
lines = get_input_lines("input7.txt")
input_list = [int(x) for x in lines[0].split(",")]
# pt1
print(sum(abs(x - median(input_list)) for x in input_list))
# pt2
mi, mx = min(input_list), max(input_list)
fuel_required = Counter({ elem:0 for elem in range(mi, mx+1) })
sum_one_to_n = lambda n: (n * (n + 1)) // 2
for i in range(mi, mx+1):
for e in input_list:
fuel_required[i] += sum_one_to_n(abs(e - i))
print(fuel_required.most_common()[-1])
|
sakshaat/aoc
|
solution7.py
|
solution7.py
|
py
| 551 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.get_input_lines",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
}
] |
6117900800
|
import os
import sys
import numpy as np
import pandas as pd
import argparse
import pybedtools
import re
import pyBigWig as pbw
import time
import urllib.request
def main():
start = time.time()
print("Generating consensus peak file...")
args = parse_args()
#for
Input_peaks=pd.read_csv(args.Peaks, sep="\t")
# Current peaks =
for i in range(0, len(Input_peaks.index)):
urllib.request.urlretrieve(Input_peaks.iloc[i]["URL"], "../tmp/tmp_peak.bed.gz")
Current_peaks=pybedtools.BedTool("../tmp/tmp_peak.bed.gz")
Current_peaks_pd=pd.read_table(Current_peaks.fn)
Current_peaks_pd.to_csv("../tmp/Concat_peaks.bed", mode='a', header=False, sep="\t", index=False)
# Formatted peaks =
print(str(len(Input_peaks.index))+" peak files read in "+str(time.time()-start)+" seconds...")
Concat_peaks=pybedtools.BedTool("../tmp/Concat_peaks.bed")
Concat_peaks_sorted=Concat_peaks.sort()
Concat_peaks_merged=Concat_peaks_sorted.merge(d=0)
Concat_peaks_merged.saveas(args.outdir+args.prefix+"_Consensus_peaks.bed")
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', metavar="<str>", help=("Output prefix"), type=str, required=True)
parser.add_argument('--Peaks', metavar="<str>", help=("Input peak URLs"), type=str, required=True)
parser.add_argument('--outdir', metavar="<str>", help=("Output directory"), type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
# python Generate_consensus_peaks.py --prefix BLUEPRINT --Peaks ~/BLUEPRINT_peak_URLs.tsv --outdir ~/BLUEPRINT_peaks/
#
|
xyg123/SNP_enrich_preprocess
|
scripts/CHEERS_preprocessing/Generate_consensus_peaks.py
|
Generate_consensus_peaks.py
|
py
| 1,717 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pybedtools.BedTool",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_table",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pybedtools.BedTool",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 39,
"usage_type": "call"
}
] |
10420803323
|
from __future__ import annotations
from typing import TYPE_CHECKING
from randovania.game_description.pickup import pickup_category
from randovania.game_description.pickup.pickup_entry import PickupEntry, PickupGeneratorParams, PickupModel
from randovania.game_description.resources.location_category import LocationCategory
from randovania.games.prime3.patcher import corruption_items
from randovania.generator.pickup_pool import PoolResults
if TYPE_CHECKING:
from randovania.game_description.resources.resource_database import ResourceDatabase
ENERGY_CELL_CATEGORY = pickup_category.PickupCategory(
name="energy_cell", long_name="Energy Cell", hint_details=("an ", "energy cell"), hinted_as_major=True, is_key=True
)
def add_energy_cells(
resource_database: ResourceDatabase,
) -> PoolResults:
"""
:param resource_database:
:return:
"""
item_pool: list[PickupEntry] = []
for i in range(9):
item_pool.append(create_energy_cell(i, resource_database))
return PoolResults(item_pool, {}, [])
def create_energy_cell(
cell_index: int,
resource_database: ResourceDatabase,
) -> PickupEntry:
return PickupEntry(
name=f"Energy Cell {cell_index + 1}",
progression=((resource_database.get_item(corruption_items.ENERGY_CELL_ITEMS[cell_index]), 1),),
extra_resources=(
(resource_database.get_item(corruption_items.ENERGY_CELL_TOTAL_ITEM), 1),
(resource_database.get_item(corruption_items.PERCENTAGE), 1),
),
model=PickupModel(
game=resource_database.game_enum,
name=corruption_items.ENERGY_CELL_MODEL,
),
pickup_category=ENERGY_CELL_CATEGORY,
broad_category=pickup_category.GENERIC_KEY_CATEGORY,
generator_params=PickupGeneratorParams(
preferred_location_category=LocationCategory.MAJOR,
probability_offset=0.25,
),
)
|
randovania/randovania
|
randovania/games/prime3/generator/pickup_pool/energy_cells.py
|
energy_cells.py
|
py
| 1,931 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_category.PickupCategory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.pickup.pickup_category",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.resource_database.ResourceDatabase",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_entry.PickupEntry",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "randovania.generator.pickup_pool.PoolResults",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "randovania.generator.pickup_pool.PoolResults",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.resources.resource_database.ResourceDatabase",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_entry.PickupEntry",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items.ENERGY_CELL_ITEMS",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items.ENERGY_CELL_TOTAL_ITEM",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items.PERCENTAGE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_entry.PickupModel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items.ENERGY_CELL_MODEL",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime3.patcher.corruption_items",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_category.GENERIC_KEY_CATEGORY",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.pickup.pickup_category",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_entry.PickupGeneratorParams",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.resources.location_category.LocationCategory.MAJOR",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "randovania.game_description.resources.location_category.LocationCategory",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.pickup.pickup_entry.PickupEntry",
"line_number": 37,
"usage_type": "name"
}
] |
19716184286
|
import os, subprocess, datetime, re, shlex
class TortoiseSVNManager:
def __init__(self, tortoisesvn=None):
if tortoisesvn == None:
print("\n\n None Path - TortoiseProc.exe")
os.system("Pause")
sys.exit()
else:
self.tortoisesvn = tortoisesvn
def makecommitmsg(self, buildversion, commitmsg):
# Make Commit Message
try:
with open(r'./commitmsg.txt', "w") as f:
buildversion = re.sub(",", ", ", buildversion)
f.write(commitmsg + "\n\n" + buildversion)
except FileNotFoundError:
return False
return True
def commit(self, projectlist):
# Ensure TortoiseProc exists
if not os.path.isfile(self.tortoisesvn + '\\TortoiseProc.exe'):
raise Exception('TortoiseProc.exe not found. path=' + self.tortoisesvn + '\\TortoiseProc.exe')
commitmsgpath = os.getcwd()
os.chdir(self.tortoisesvn)
for project in projectlist:
if project["isuse"] == "1":
print("PROGRESSING COMMIT - " + project["project_path"] + "\n")
command = 'TortoiseProc.exe'
command += ' /command:commit'
command += (' /path:' + project["project_path"])
command += (' /logmsgfile:"' + commitmsgpath + '\\commitmsg.txt"')
command += ' /closeonend:0'
os.system(command)
print("\n")
return True
def run(self, buildversion=None, projectlist=None, commitmsg=None):
summary = ''
# File header
start = datetime.datetime.now()
print('\n' * 3)
summary += self.log('STARTED SVN COMMIT - ' + start.strftime("%Y-%m-%d %H:%M:%S"))
# Make Commit Message
if (buildversion is not None) and (commitmsg is not None):
makeOk = self.makecommitmsg(buildversion, commitmsg)
if not makeOk:
self.log('COMMIT: FAILED - FILE NOT FOUND', start)
sys.exit(100)
summary += self.log('COMMIT: SUCCEEDED - MAKE COMMIT MESSAGE', start)
else:
summary += self.log('COMMIT: NOT SPECIFIED')
# Commit
if projectlist is not None:
commitOK = self.commit(projectlist)
if not commitOK:
self.log('COMMIT: FAILED', start)
sys.exit(100)
summary += self.log('COMMIT: SUCCEEDED', start)
else:
summary += self.log('COMMIT: NOT SPECIFIED - PROJECT LIST')
summary += self.log('COMMIT: *** FINISH ***', start)
# Build summary
print('\n\n' + '-' * 80)
print(summary)
print('-' * 80)
def log(self, message, start=None):
timestamp = ''
numsecs = ''
if start is not None:
split = datetime.datetime.now()
diff = split - start
timestamp = split.strftime("%Y-%m-%d %H:%M:%S") + '\t'
numsecs = ' (' + str(diff.seconds) + ' seconds)'
msg = timestamp + message + numsecs + '\n\n'
print('=' * 10 + '> ' + msg)
return msg
|
Nohhhhhh/PUBLIC
|
Project/AutoDeployment/AutoDeployment/tortoisesvnmanager.py
|
tortoisesvnmanager.py
|
py
| 3,153 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "attribute"
}
] |
29216462786
|
from enum import Enum, unique
from sqlalchemy import (
Column, Table, MetaData, Integer, String, ForeignKey, Enum as PgEnum, DateTime,
PrimaryKeyConstraint,
UniqueConstraint
)
convention = {
'all_column_names': lambda constraint, table: '_'.join([
column.name for column in constraint.columns.values()
]),
'ix': 'ix__%(table_name)s__%(all_column_names)s',
'uq': 'uq__%(table_name)s__%(all_column_names)s',
'ck': 'ck__%(table_name)s__%(constraint_name)s',
'fk': 'fk__%(table_name)s__%(all_column_names)s__%(referred_table_name)s',
'pk': 'pk__%(table_name)s'
}
metadata = MetaData(naming_convention=convention)
@unique
class ShopUnitType(Enum):
OFFER = 'OFFER'
CATEGORY = 'CATEGORY'
shop_unit_ids_table = Table(
'shop_unit_ids', metadata,
Column('id', String, primary_key=True),
)
shop_unit_revisions_table = Table(
'shop_unit_revisions', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('date', DateTime, nullable=False),
Column('shop_unit_id', String,
ForeignKey('shop_unit_ids.id', ondelete='CASCADE', onupdate='RESTRICT'), nullable=False),
Column('name', String, nullable=False),
Column('price', Integer, nullable=True),
Column('type', PgEnum(ShopUnitType, name='shop_unit_type'), nullable=False),
UniqueConstraint('shop_unit_id', 'date', name='uq__shop_unit_revisions__shop_unit_id_date'),
)
relations_table = Table(
'relations', metadata,
Column('child_revision_id', Integer,
ForeignKey('shop_unit_revisions.id', ondelete='CASCADE', onupdate='RESTRICT'),
nullable=False),
Column('parent_id', String,
ForeignKey('shop_unit_ids.id', ondelete='RESTRICT', onupdate='CASCADE'), nullable=False),
UniqueConstraint('child_revision_id', 'parent_id'),
PrimaryKeyConstraint('child_revision_id', name='pk__relations'),
)
|
Dest0re/backend-school2022
|
megamarket/db/schema.py
|
schema.py
|
py
| 1,911 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.MetaData",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "enum.unique",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Enum",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.UniqueConstraint",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.UniqueConstraint",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 54,
"usage_type": "call"
}
] |
22079658709
|
import numpy as np
from termcolor import colored
import matplotlib.pyplot as plt
filename = 'scan1.txt'
file = open(filename, 'r')
lines = file.readlines()
inittime=lines[0]
endtime=lines[1]
print('# of lines',len(lines))
ADCout=[]
ADCoutstepsMean=[]
ADCoutstepsStd=[]
i=2
data=len(lines)
while i<data:
ADCout.append(float(lines[i]))
i+=1
xaxis=range(data-2)
plt.plot(xaxis, ADCout, marker='.', linestyle='')
plt.grid(True)
plt.title(filename)
plt.xlabel('sample')
plt.ylabel('ADC output (V)')
plt.tight_layout()
plt.savefig(filename[:-4]+'.png')
plt.show()
|
gpapad14/RPy_CROC
|
18bit_ADC_data/analysis.py
|
analysis.py
|
py
| 568 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
}
] |
5051482507
|
import csv
import sys
import getopt
import numpy as np
import pandas as pd
import nltk
def get_dataframe(filename):
return pd.read_table(filename)
def get_hfw():
word_file = open('./picked/pacifier.txt', 'r')
res = list()
for word in word_file.readlines():
word = word.split(" ")[0]
res.append(word)
return res
def get_hfw_weight():
word_file = open('./picked/pacifier.txt', 'r')
res = list()
for word in word_file.readlines():
for weigth in word.split(" "):
try:
res.append(float(weigth))
except ValueError:
pass
return res
def get_adj():
word_file = open('./picked/pacifier_a.txt', 'r')
res_p = list()
res_n = list()
for word in word_file.readlines():
is_positive = False
if "1" in word.split(" ") or "1\n" in word.split(" "):
is_positive = True
word = word.split(" ")[0]
if is_positive:
res_p.append(word)
else:
res_n.append(word)
return (res_p, res_n)
def get_brands():
table = get_dataframe("/source_file_path/Problem_C_Data/pacifier.tsv")
product_titles = table[table['helpful_votes']!=0].product_title.tolist()
count = {}
for t in product_titles:
count[t] = count.get(t, 0) + 1
res = list()
for title in count:
if count[title] > 5:
res.append(title)
return res
def get_staratings(product_title):
table = get_dataframe("/source_file_path/Problem_C_Data/pacifier.tsv")
product_stars = table[table['product_title']==product_title].star_rating.tolist()
product_votes = table[table['product_title']==product_title].helpful_votes.tolist()
res = 0.0
count = 0
for i in range(len(product_stars)):
res += product_stars[i] * product_votes[i]
count += product_votes[i]
return res/count
def get_sentence(product_title):
table = get_dataframe("/source_file_path/Problem_C_Data/pacifier.tsv")
product_reviews = table[table['product_title']==product_title].review_body.tolist()
product_review_titles = table[table['product_title']==product_title].review_headline.tolist()
'''
product_reviews = table.review_body.tolist()
product_review_titles = table.review_headline.tolist()
'''
product_reviews.extend(product_review_titles)
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = list()
for paragraph in product_reviews:
try:
sentences.extend(tokenizer.tokenize(paragraph))
except:
continue
finally:
pass
return sentences
def get_pairs(product_title):
# print("----------"+product_title+"----------")
hfw = get_hfw()
product_reviews = get_sentence(product_title)
counts = {}
for rw in product_reviews:
tokens = nltk.word_tokenize(rw)
for hf_word in hfw:
if hf_word in tokens:
pos_tags = nltk.pos_tag(tokens)
last_token = ""
for token, pos in pos_tags:
if pos == "JJ" or pos == "JJS" or pos == "JJR":
tmp_pair=(hf_word.lower(), token.lower())
if last_token != "not" and last_token != "barely" and last_token != "hardly":
counts[tmp_pair] = counts.get(tmp_pair, 0) + 1
last_token = token
return counts
def compute_vector(brandname):
adjs = get_adj()
positive_adj = adjs[0]
negative_adj = adjs[1]
dimension = get_hfw()
pair_counts = get_pairs(brandname)
items = list(pair_counts.items())
items.sort(key=lambda x:x[1], reverse=True)
vector = []
# each dimension
for d in dimension:
val = 0
adj_count = 0
dimension_score = 0
# iteration in pairs to
for pairs_ct in items:
pairs, count = pairs_ct
count = int(count)
if pairs[0] == d:
if pairs[1] in positive_adj:
val += 1 * count
elif pairs[1] in negative_adj:
val -= 1 * count
adj_count += count
if adj_count != 0:
dimension_score = val / adj_count
dimension_res = (d, dimension_score)
vector.append(dimension_res)
return vector
def compute_value(brandname):
vector = compute_vector(brandname)
value = 0.0
weights = get_hfw_weight()
total = 0.0
for w in weights:
total += w
st_weight = list()
for w in weights:
st_weight.append(w/total)
for i in range(len(vector)):
value += vector[i][1] * st_weight[i]
return value
def main():
items = get_brands()
score_line = ""
star_line = ""
for i in items:
score = compute_value(i)
star = get_staratings(i)
if True: #star < (20*score+2.5) and star > (1.25*16 / 3)*(score+0.125):
score_line += str(score) + ", "
star_line += str(star) + ", "
print(score_line)
print(star_line)
# main()
# compute_vector()
# get_sentence("samsung smh1816s 1.8 cu. ft. stainless steel over-the-range pacifier")
main()
|
WXM99/DataMiningProject
|
python_scripts/project/script/text_based_analysis/product_analysis/stat_product_score.py
|
stat_product_score.py
|
py
| 5,220 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_table",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nltk.data.load",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "nltk.pos_tag",
"line_number": 97,
"usage_type": "call"
}
] |
23937560939
|
import torch
import torch.nn as nn
# Tuple is structured by (filters, kernel_size, stride)
'''
Information about architecture config:
Tuple is structured by (filters, kernel_size, stride)
Every conv is a same convolution.
List is structured by "B" indicating a residual block followed by the number of repeats
"S" is for scale prediction block and computing the yolo loss
"U" is for upsampling the feature map and concatenating with a previous layer
'''
# ['B',重复次数]
config = [
(32, 3, 1),
(64, 3, 2),
["B", 1],
(128, 3, 2),
["B", 2],
(256, 3, 2),
["B", 8],
(512, 3, 2),
["B", 8],
(1024, 3, 2),
["B", 4], # To this point is Darknet-53
(512, 1, 1),
(1024, 3, 1),
"S",
(256, 1, 1),
"U",
(256, 1, 1),
(512, 3, 1),
"S",
(128, 1, 1),
"U",
(128, 1, 1),
(256, 3, 1),
"S",
]
class CNNBlock(nn.Module):
def __init__(self,in_channels,out_channels,bn_act=True,**kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels,out_channels,bias=not bn_act,**kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.leaky = nn.LeakyReLU(0.1)
self.use_bn_act = bn_act
def forward(self,x):
if self.use_bn_act:
self.leaky(self.bn(self.conv))
else :
return self.conv(x)
class ResidualBlock(nn.Module):
def __init__(self,channels,use_residual=True,num_repeats = 1):
super().__init__()
self.layers = nn.ModuleList()
for _ in num_repeats:
self.layers +=[
CNNBlock(channels,channels//2,kernel_size=1),
CNNBlock(channels//2,channels,kernel_size=3,padding=1)
]
self.use_residual = use_residual
self.num_repeats = num_repeats
def forward(self):
for layers in self.layers:
x = layers(x)+x if self.use_residual else layers(x)
return x
class ScalePrediction(nn.Module):
pass
class YOLOv3(nn.Module):
pass
|
1zzc/yolov3_achieve
|
model.py
|
model.py
|
py
| 1,917 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 88,
"usage_type": "name"
}
] |
38165912553
|
import warnings
from typing import List
import numpy as np
from scipy import linalg
class Tensor(np.ndarray):
def __new__(cls, num_modes: int, modes: tuple[int], data: np.ndarray):
obj = np.asarray(data).view(cls)
obj.num_modes = num_modes
obj.modes = np.asarray(modes)
if np.any(obj.modes) <= 0: raise ValueError(
"'modes' must contain strictly positive values; if np.any mode is 1, consider a smaller num_modes")
return obj
# Pour pouvoir accéder aux données via .data et mimiquer rTensor. Mais inutile, à déprécier
@property
def data(self):
return self[...]
def is_zero_tensor(self):
if np.sum(self.data == 0) == np.prod(np.asarray(self.modes)): return True
return False
def astensor(array: np.ndarray) -> Tensor:
modes = array.shape
num_modes = len(modes)
return Tensor(num_modes=num_modes, modes=modes, data=array)
def unfold(tensor: Tensor, row_idx: List[int], col_idx: List[int], order='F') -> Tensor:
rs = np.asarray(row_idx)
cs = np.asarray(col_idx)
# if not rs or not cs: raise ValueError("row and column indices must be specified")
num_modes = tensor.num_modes # pour démarrer à zero
if len(rs) + len(cs) != num_modes:
raise ValueError("Incorrect number of indices. Number of modes not reached.")
if np.any(rs < 0) or np.any(rs > num_modes - 1) or np.any(cs < 0) or np.any(cs > num_modes - 1):
raise ValueError(
"Illegal indices specified. 'row_idx' and 'col_idx' must be positive and strictly less than 'num_modes'.")
perm = np.array(list(rs) + list(cs))
if np.any(np.sort(perm) != np.arange(num_modes)):
raise ValueError("Missing and/or repeated indices")
modes = tensor.modes
mat = tensor.data
new_modes = np.array([np.prod(modes[rs]), np.prod(modes[cs])])
mat = np.transpose(tensor.data, perm) # probablement soustraire -1 à perm pour les indices
mat = mat.reshape(new_modes, order=order) # rearrangement style fortran comme pour dim() <- dim en R:
# https://rstudio.github.io/reticulate/reference/array_reshape.html
return astensor(mat)
def rs_unfold(tensor: Tensor, m: int, order='F') -> Tensor:
assert 0 <= m < tensor.num_modes, f"'m' must be a valid mode of the tensor, not {m}."
rs = np.asarray([m])
cs = np.asarray([i for i in range(tensor.num_modes) if i != m])
return unfold(tensor, row_idx=rs, col_idx=cs, order=order)
# Validé par essai manuel
def superdiagonal_tensor2(num_modes, length, elements=1):
modes = [length] * num_modes
arr = np.zeros(modes, dtype=np.float32)
if isinstance(elements, int):
elements = [elements] * length
for i in range(length):
indices = [i] * num_modes
arr[tuple(indices)] = elements[i]
return astensor(arr)
# L'implémentation originale pernd une liste en argument, et les multiplie entre elles, "element-wise".
# L'opération est largement simplifiée avec un ndarray
# Vérifiée à la main
# def hadamard_list(L: np.ndarray) -> np.ndarray:
# # TODO: Verif forme des tableaux, et de la nature de L``
# # return np.prod(L, axis=-1) # typiquement axis=2 dans notre cas
#
# retmat = L[0]
# for matrice in L[1:]:
# retmat = np.multiply(retmat, matrice)
# return retmat
def hadamard_list(L):
retmat = L[0]
for matrice in L[1:]:
retmat *= matrice
return retmat
def kronecker_list(L):
result = L[0]
for matrix in L[1:]:
result = np.kron(result, matrix)
return result
def superdiagonal_tensor(num_modes, length, elements=1):
modes = np.repeat(length, num_modes)
arr = np.zeros(modes)
if isinstance(elements, int) == 1:
elements = np.repeat(elements, length)
for i in range(length):
txt = "arr[" + ",".join([str(i)] * num_modes) + "]=" + str(elements[i])
txt = txt.replace(" ", ", ")
print(txt)
exec(txt)
return arr
def khatri_rao_list_2(L, reverse=False):
if reverse:
L = L[::-1]
retmat = L[0]
for matrice in L[1:]:
retmat = linalg.khatri_rao(retmat, matrice)
return retmat
def khatri_rao_list(L, reverse=False):
assert all([isinstance(x, np.ndarray) for x in L]), "All elements in L must be matrices"
ncols = [x.shape[1] for x in L]
assert len(set(ncols)) == 1, "All matrices in L must have the same number of columns"
ncols = ncols[0]
nrows = [x.shape[0] for x in L]
retmat = np.zeros((np.prod(nrows), ncols))
if reverse:
L = L[::-1]
for j in range(ncols):
Lj = [x[:, j] for x in L]
retmat[:, j] = kronecker_list(Lj)
return retmat
def khatri_rao_list_bis(L, reverse=False):
# Vérifie que tous les éléments de L sont des matrices
assert all(isinstance(matrix, np.ndarray) for matrix in L), "Tous les éléments de L doivent être des matrices"
# Vérifie que toutes les matrices ont le même nombre de colonnes
ncols = [matrix.shape[1] for matrix in L]
assert len(set(ncols)) == 1, "Toutes les matrices doivent avoir le même nombre de colonnes"
ncols = ncols[0]
# Initialise la matrice résultante
nrows = [matrix.shape[0] for matrix in L]
retmat = np.zeros((np.prod(nrows), ncols))
# Inverse l'ordre des matrices si reverse=True
if reverse:
L = L[::-1]
# Remplit la matrice résultante en utilisant le produit de Kronecker
for j in range(ncols):
# Lj = [matrix[:, j] for matrix in L]
# retmat[:, j] = kronecker_list(Lj)
retmat = linalg.khatri_rao(a, b)
return retmat
def ttl(tnsr, list_mat, ms=None):
if ms is None or not isinstance(ms, (list, np.ndarray)):
raise ValueError("m modes must be specified as a vector")
if len(ms) != len(list_mat):
raise ValueError("m modes length does not match list_mat length")
num_mats = len(list_mat)
if len(set(ms)) != num_mats:
print("Consider pre-multiplying matrices for the same m for speed")
mat_nrows = [mat.shape[0] for mat in list_mat]
mat_ncols = [mat.shape[1] for mat in list_mat]
for i in range(num_mats):
mat = list_mat[i]
m = ms[i]
mat_dims = mat.shape
modes_in = tnsr.modes
if modes_in[m] != mat_dims[1]:
raise ValueError(f"Modes mismatch: tnsr.modes[{m}] != mat.shape[1]")
modes_out = modes_in.copy()
modes_out[m] = mat_dims[0]
tnsr_m = rs_unfold(tnsr, m=m).data
retarr_m = np.dot(mat, tnsr_m)
tnsr = rs_fold(retarr_m, m=m, modes=modes_out)
return tnsr
def fold(mat: Tensor | np.ndarray, row_idx: List[int], col_idx: List[int], modes: List[int], order='F'):
rs = row_idx
cs = col_idx
if not isinstance(mat, np.ndarray):
raise ValueError("mat must be of type 'numpy.ndarray'")
if mat.ndim != 2:
raise ValueError("mat must be a 2D matrix")
num_modes = len(modes)
if num_modes != len(rs) + len(cs):
raise ValueError("Number of modes does not match the sum of row and column space indices")
mat_modes = mat.shape
if mat_modes[0] != np.prod([modes[i] for i in rs]) or mat_modes[1] != np.prod([modes[i] for i in cs]):
raise ValueError("Matrix dimensions do not match Tensor modes")
# iperm = [modes.index(mode) + 1 for mode in rs + cs]
modes = list(modes)
iperm = rs + cs
# iperm = [modes.index(x) + 1 if x in modes else None for x in rs + cs]
# iperm = [np.where(np.array(modes) == mode)[0][0] if mode in modes else None for mode in rs + cs]
modes = np.asarray(modes)
mat = mat.reshape([modes[i] for i in rs] + [modes[i] for i in cs], order=order)
# folded_tensor = np.transpose(mat, iperm)
folded_tensor = np.moveaxis(mat, range(len(rs) + len(cs)), rs + cs)
# mat = mat.reshape(new_modes, order='F') # rearrangement style fortran comme pour dim() <- dim en R:
# https://rstudio.github.io/reticulate/reference/array_reshape.html return astensor(mat)
return astensor(folded_tensor)
def k_fold(mat: Tensor | np.ndarray, m: int, modes: List[int], order='F') -> Tensor:
num_modes = len(modes)
rs = [m]
cs = [i for i in range(num_modes) if i != m] # vérifier si on bouge m, ou l'indice lié à m
return fold(mat, row_idx=rs, col_idx=cs, modes=modes, order=order)
def rs_fold(mat: Tensor | np.ndarray, m: int, modes: List[int], order='F') -> Tensor:
return k_fold(mat, m, modes, order)
|
lukbrb/pyTensor
|
pyTensor/tensorclass.py
|
tensorclass.py
|
py
| 8,507 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.ndarray",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "numpy.kron",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.khatri_rao",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.khatri_rao",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "numpy.prod",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.moveaxis",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 241,
"usage_type": "name"
}
] |
14012082445
|
import re
import numpy as np
import pickle
import spacy_udpipe
import spacy
import pandas as pd
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from tqdm import tqdm
from bs4 import BeautifulSoup
from collections import Counter
class VectorSpaceModel():
'''
Vector space information retrieval system
Baseline has:
- terms: word forms
- case normalization: no
- removing stopwords: no
- query construction: all word forms from "title"
- term weighting: natural
- document frequency weighting: none
- vector normalization: cosine
- similarity measurement: cosine
- relevance feedback: none
- query expansion: none
Options include:
stopwords, lemmas, stemming, lower-case,
pivoted document length, tf/df weighting
'''
def __init__(self, args, index):
self.run = args.run
self.output = args.output
self.stopwords = args.stopwords
self.lemmas = args.lemmas
self.stemming = args.stemming
self.lowercase = args.lowercase
self.pivoted = args.pivoted
self.tf_weighting = args.tf_weighting
self.df_weighting = args.df_weighting
self.index = index
self.lang = args.documents.split(".")[0][-2:]
self.query_terms = self.get_topic_terms(args.queries)
self.docs = self.get_docs(args.documents)
self.save_results()
def get_topic_terms(self, queries):
''' return dictionary of topic_num: [term for term in title] '''
with open(queries, 'r') as f:
topics = f.read()
soup = BeautifulSoup(topics, 'html.parser')
head = soup.contents[2]
topics = [item for item in head.children][1::2]
nums = [doc.num.contents[0] for doc in topics]
if self.lang == "en":
nlp = spacy.load("en_core_web_sm", exclude=['parser', 'ner'])
else:
nlp = spacy_udpipe.load("cs")
stopword_list = nlp.Defaults.stop_words
tokenizer = RegexpTokenizer(r'\w+')
if self.lemmas:
titles = [nlp(str(doc.title.contents[0])) for doc in topics]
else:
titles = [tokenizer.tokenize(doc.title.contents[0]) for doc in topics]
if self.lemmas:
titles = [[k.lemma_.lower() for k in doc] for doc in titles]
elif self.lowercase:
titles = [[k.lower() for k in doc] for doc in titles]
else:
titles = [[k for k in doc] for doc in titles]
if self.stopwords:
titles = [[k for k in doc if not k in stopword_list and k.isalpha()]
for doc in titles]
# only for English - Czech is pre-stemmed
if self.stemming:
stemmer = PorterStemmer()
titles = [[stemmer.stem(str(k)) for k in doc] for doc in titles]
query_terms = {num: title for num, title in zip(nums, titles)}
return query_terms
def get_tf_weight(self, tf, d, weighting='natural'):
'''
weighting options are as below
natural (default): tf_{t,d}
logarithm: 1 + log(tf_{t,d})
augmented: 0.5 + (0.5*tf_{t,d})/max_t(tf_{t,d})
'''
if self.tf_weighting:
weighting = self.tf_weighting
if weighting == 'natural': tf_weight = tf
elif weighting == 'logarithm': tf_weight = 1 + np.log2(tf)
elif weighting == 'augmented':
tf_weight = 0.5 + ((0.5 * tf)/ max(Counter(d).values()))
return tf_weight
def get_df_weight(self, df, tf, weighting='no'):
'''
weighting options are as below
no (default): 1
idf: log(N/df_{t})
prob_idf: max(0, log((N-df)/df))
'''
if self.df_weighting:
weighting = self.df_weighting
if weighting == 'no': df_weight = 1
elif weighting == 'idf': df_weight = np.log2(len(self.docs)/df)
elif weighting == 'prob_idf':
df_weight = max(0, np.log2((len(self.docs) - df)/df))
return df_weight
def get_docs(self, docs_file):
''' returns list of tuples of (doc_id, [terms]) '''
docs_folder = docs_file.split(".")[0]+self.run+"/"
with open(docs_file, "r") as f:
filenames = [line.split(".")[0] for line in f.readlines()]
docs = []
for fn in filenames:
with open(docs_folder+fn+"_docs.bin", 'rb') as f:
collection = pickle.load(f)
for doc in collection:
docs.append((doc[0], doc[1]))
return docs
def similarity(self, query, length, k=1000):
'''
fast cosine score (IIR fig 7.1)
returns top k docs for query
'''
docs = self.docs
doc_dict = {doc[0]: doc[1] for doc in docs}
scores = pd.DataFrame(np.zeros((1,len(docs))), columns=[doc[0] for doc in docs])
for t in query:
try:
postings_list = self.index[t]
for d, tf in postings_list.items():
# just storing natural term frequency
scores[d] += self.get_tf_weight(tf, doc_dict[d]) \
* self.get_df_weight(len(postings_list), tf)
except KeyError:
pass
query_length = np.linalg.norm(list(Counter(query).values()))
scores = scores/(query_length*length)
scores = scores.to_numpy().reshape((len(docs),))
inds = np.argpartition(scores, -k)[-k:]
sorted_inds = inds[np.argsort(scores[inds])]
doc_nos = [docs[docid][0] for docid in sorted_inds]
return scores[sorted_inds][::-1], doc_nos[::-1]
def save_results(self):
'''
save results in TREC format, as described in section 5.3
(qid, iter, docno, rank, sim, run_id)
'''
iteration = "0"
run_id = self.run
print("Processing queries")
doc_dict = {doc[0]: list(Counter(doc[1]).values()) for doc in self.docs}
# cosine normalization
length = np.array([np.linalg.norm(counts) for counts in doc_dict.values()])
if self.pivoted: # value of a
if self.lang == "cs":
# values are computed as described in report
piv = 24.6788
else:
piv = 40.7795
length = self.pivoted*length + (1-self.pivoted)*piv
for (qid, query) in tqdm(self.query_terms.items()):
sim_scores, doc_nos = self.similarity(query, length)
results = [qid+"\t"+iteration+"\t"+doc_no+"\t"+str(i)+"\t"+str(sim)
+"\t"+run_id+"\n"
for i, (doc_no, sim) in enumerate(zip(doc_nos, sim_scores))]
with open(self.output, "a+") as f:
f.writelines(results)
|
awmcisaac/charles
|
winter/npfl103/A2/A1/model.py
|
model.py
|
py
| 6,816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "spacy_udpipe.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.RegexpTokenizer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.argpartition",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 183,
"usage_type": "call"
}
] |
71927421627
|
import pymdicator.timeseries as ts
from pymdicator.indicators import Momentum, MACD, RSI
import numpy as np
import datetime
import pytest
import os
from pandas import read_csv
@pytest.fixture(params=[False, True])
def test_data(request, datadir):
if request.param:
pd = read_csv(datadir.join('stock_data.txt'))
dts = pd['Date'].tolist()
vals = pd["Close"].tolist()
else:
dts = [datetime.date(2018, 1, 1)]
for ii in range(1, 20):
dts.append(dts[0] + datetime.timedelta(ii))
vals = [100.0, 102.0, 99.0, 101.0, 103.0, 101.5, 103.0, 104.0, 103.5, 105,
106.0, 105.5, 108.0, 109.0, 111.0, 109.5, 112.0, 114.0, 113.5, 115]
pd = None
test_ts = ts.Timeseries(dts, vals, ts.TimeseriesType.PRICE, ts.TimeseriesSubType.ABSOLUTE, 1)
return {
'dts': dts,
'vals': vals,
'test_ts': test_ts,
'is_csv': request.param,
'pd' : pd}
@pytest.fixture
def dts(test_data):
return test_data['dts']
@pytest.fixture
def vals(test_data):
return test_data['vals']
@pytest.fixture
def test_ts(test_data):
return test_data['test_ts']
@pytest.fixture
def is_csv(test_data):
return test_data['is_csv']
@pytest.fixture
def pd(test_data):
return test_data['pd']
def test_latest_momentum(test_ts, vals, dts, is_csv, pd):
momIndicator = Momentum(12)
mom = momIndicator.calculate_current_ts(test_ts)
if not is_csv:
assert np.isclose(mom, 100.0 * 115.0 / 104.0)
else:
assert np.isclose(mom, 100.0*vals[-1] / vals[-13])
mom = momIndicator.calculate_current_df(pd)
assert np.isclose(mom, 100.0 * vals[-1] / vals[-13])
def test_latest_momentum_overlong(test_ts, vals):
momIndicator = Momentum(len(test_ts))
mom = momIndicator.calculate_current_ts(test_ts)
assert np.isnan(mom)
momIndicator = Momentum(len(test_ts) - 1)
mom = momIndicator.calculate_current_ts(test_ts)
assert np.isclose(mom, 100.0 * vals[-1] / vals[0])
def test_momentum_timeseries(test_ts, vals, dts, is_csv, pd):
momIndicator = Momentum(10)
mom_ts = momIndicator.calculate_timeseries_ts(test_ts)
if not is_csv:
assert np.isclose(mom_ts.values[0], 106.0)
assert np.isclose(mom_ts.values[1], 105.5/1.02)
assert np.isclose(mom_ts.values[2], 108.0/0.99)
assert np.isclose(mom_ts.values[3], 109.0/1.01)
else:
n_checks = min(20, len(vals)-10)
mom_ts_pd = momIndicator.calculate_timeseries_df(pd)
for ii in range(n_checks):
assert np.isclose(mom_ts.values[ii],
100.0 * vals[ii + 10] / vals[ii])
assert np.isclose(mom_ts_pd.values[ii],
100.0 * vals[ii + 10] / vals[ii])
def test_latest_momentum_outer(test_ts, vals, dts, is_csv, pd):
momIndicator = Momentum(12)
mom = momIndicator.calculate_current(test_ts)
if not is_csv:
assert np.isclose(mom, 100.0 * 115.0 / 104.0)
else:
assert np.isclose(mom, 100.0*vals[-1] / vals[-13])
mom_pd = momIndicator.calculate_current(pd)
assert np.isclose(mom_pd, 100.0*vals[-1] / vals[-13])
mom_dict = momIndicator.calculate_current({"a": pd})
assert np.isclose(mom_dict["a"], 100.0*vals[-1] / vals[-13])
def test_momentum_timeseries_outer(test_ts, vals, dts, is_csv, pd):
momIndicator = Momentum(10)
mom_ts = momIndicator.calculate_timeseries(test_ts)
if not is_csv:
assert np.isclose(mom_ts.values[0], 106.0)
assert np.isclose(mom_ts.values[1], 105.5/1.02)
assert np.isclose(mom_ts.values[2], 108.0/0.99)
assert np.isclose(mom_ts.values[3], 109.0/1.01)
else:
n_checks = min(20, len(vals)-10)
mom_pd = momIndicator.calculate_timeseries(pd)
mom_dict = momIndicator.calculate_timeseries({"a": pd})
for ii in range(n_checks):
assert np.isclose(mom_ts.values[ii],
100.0 * vals[ii + 10] / vals[ii])
assert np.isclose(mom_pd.values[ii],
100.0 * vals[ii + 10] / vals[ii])
assert np.isclose(mom_dict["a"].values[ii],
100.0 * vals[ii + 10] / vals[ii])
def test_latest_macd(test_ts, vals, is_csv, pd):
macd_calc = MACD()
(macd, signal) = macd_calc.calculate_current_ts(test_ts)
if not is_csv:
assert np.isnan(macd) and np.isnan(signal)
else:
# I can't be bothered to do full calculation,
# so make sure the values are sensible
slow_average = sum(vals[-26:]) / 26.0
fast_average = sum(vals[-12:]) / 12.0
assert abs(macd) <= abs(2*(fast_average - slow_average))
assert abs(signal) <= abs(2*(fast_average - slow_average))
(macd_df, signal_df) = macd_calc.calculate_current_df(pd)
assert np.isclose(macd_df, macd)
assert np.isclose(signal_df, signal)
def test_latest_macd_outer(test_ts, vals, is_csv, pd):
macd_calc = MACD()
(macd, signal) = macd_calc.calculate_current(test_ts)
if not is_csv:
assert np.isnan(macd) and np.isnan(signal)
else:
# I can't be bothered to do full calculation,
# so make sure the values are sensible
slow_average = sum(vals[-26:]) / 26.0
fast_average = sum(vals[-12:]) / 12.0
assert abs(macd) <= abs(2*(fast_average - slow_average))
assert abs(signal) <= abs(2*(fast_average - slow_average))
(macd_df, signal_df) = macd_calc.calculate_current(pd)
assert np.isclose(macd_df, macd)
assert np.isclose(signal_df, signal)
(macd_dict, signal_dict) = macd_calc.calculate_current({"a":pd})["a"]
assert np.isclose(macd_dict, macd)
assert np.isclose(signal_dict, signal)
def test_latest_rsi(test_ts, vals, is_csv, pd):
rsi_calc = RSI(10)
rsi = rsi_calc.calculate_current_ts(test_ts)
if not is_csv:
assert np.isclose(100.0 - 100.0 / (1 + 12.5/2.5), rsi)
else:
rsi_df = rsi_calc.calculate_current_df(pd)
assert np.isclose(rsi, rsi_df)
def test_timeseries_rsi(test_ts, vals, is_csv, pd):
rsi_calc = RSI(10)
rsi = rsi_calc.calculate_current_ts(test_ts)
rsi_ts = rsi_calc.calculate_timeseries_ts(test_ts)
if not is_csv:
assert np.isclose(100.0 - 100.0 / (1 + 12.5/2.5), rsi_ts.values[-1])
assert np.isclose(100.0 - 100.0 / (1 + 12.5/2.5), rsi_ts.values[-2])
assert np.isclose(100.0 - 100.0 / (1 + 12.5/2.5), rsi_ts.values[-3])
assert np.isclose(100.0 - 100.0 / (1 + 11.5/2.5), rsi_ts.values[-4])
assert np.isclose(100.0 - 100.0 / (1 + 10.5/2.5), rsi_ts.values[-5])
assert np.isclose(100.0 - 100.0 / (1 + 10.5/2.5), rsi_ts.values[-6])
else:
assert np.isclose(rsi_ts.values[-1], rsi)
|
Cronan/pymdicator
|
tests/test_indicators.py
|
test_indicators.py
|
py
| 6,858 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pymdicator.timeseries.Timeseries",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pymdicator.timeseries",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pymdicator.timeseries.TimeseriesType",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pymdicator.timeseries.TimeseriesSubType",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.Momentum",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.MACD",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.MACD",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.RSI",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "pymdicator.indicators.RSI",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 191,
"usage_type": "call"
}
] |
13708970511
|
#AREA
from database.config import Conexion
from helper import helper
#TABLA AREA
class Area:
def __init__(self, tipoArea=None):
self.tipoArea = tipoArea
def add_Area(self, area, app):
try:
conn = Conexion()
query = f'''
INSERT INTO area(tipoArea)
VALUES('{area.tipoArea}')
'''
conn.ejecutar_sentencia(query)
conn.commit()
message = f'''Se agrego el area: {area.tipoA}'''
return helper.handler_response(app, 201, message)
except Exception as e:
raise
print(e)
finally:
conn.cerrar_conexion()
def listar_Area(self, app):
listado_area = []
diccionario={}
try:
conn = Conexion()
query = f'''
SELECT * FROM area
'''
cursor = conn.ejecutar_sentencia(query)
filas = cursor.fetchall()
for fila in filas:
listado_area.append({'id ':str(fila[0]), 'Cargo ': fila[1]})
diccionario['Area'] = listado_area
print(diccionario)
return helper.handler_response(app, 201, diccionario)
except Exception as e:
raise
print(e)
finally:
conn.cerrar_conexion()
def obtener_Area(self, app, id_area):
listado_area = []
diccionario={}
try:
conn = Conexion()
query = f'''
SELECT * FROM area WHERE id={id_area}
'''
cursor = conn.ejecutar_sentencia(query)
fila = cursor.fetchone()
area = Area(fila[1])
listado_area.append({'id ':str(fila[0]), 'Cargo ': area.tipoArea})
diccionario['Area'] = listado_area
return helper.handler_response(app, 201, diccionario)
except Exception as e:
raise
print(e)
finally:
conn.cerrar_conexion()
def actualizar_Area(self, app, id_area, area):
try:
conn = Conexion()
query = f'''
UPDATE area
SET tipoArea = '{id_area.tipoArea}'
WHERE id = {id_area}
'''
conn.ejecutar_sentencia(query)
conn.commit()
proces = 'Procesado'
return helper.handler_response(app, 201, proces)
except Exception as e:
raise
print(e)
finally:
conn.cerrar_conexion()
def eliminar_Area(self, app, id_area):
try:
conn = Conexion()
query = f'''
DELETE FROM area WHERE id={id_area}
'''
cursor = conn.ejecutar_sentencia(query)
conn.commit()
eliminado = 'Eliminado...'
return helper.handler_response(app, 201, eliminado)
except Exception as e:
raise
print(e)
finally:
conn.cerrar_conexion()
|
jesustr20/Reto10_PythonFLASK_Mysql_Empresa
|
apps/classes/area.py
|
area.py
|
py
| 3,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "database.config.Conexion",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "helper.helper.handler_response",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "helper.helper",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "database.config.Conexion",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "helper.helper.handler_response",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "helper.helper",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "database.config.Conexion",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "helper.helper.handler_response",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "helper.helper",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "database.config.Conexion",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "helper.helper.handler_response",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "helper.helper",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "database.config.Conexion",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "helper.helper.handler_response",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "helper.helper",
"line_number": 96,
"usage_type": "name"
}
] |
70316000189
|
from lib.get_endpoint import call_endpoint, endpoints
from lib.utils import format_response, trace
endpoint_inputs = {
"headers": {
"Content-Type": "application/json"
}
}
@trace(text="Delete company tasks")
def delete_tasks(command_args: dict):
"""
Function compiles a list of tasks associated with a companyid that is supplied to the the deleteCompanyTasks endpoint
in order to delete all tasks associated with a client
:return: endpoint call response object
"""
# Check required argument
assert command_args.get('companyid'), "Required argument 'companyid' is missing"
# Find count
search_args = {"companyid": command_args.get('companyid')}
endpoint_inputs["args"] = search_args
endpoint_inputs['json'] = search_args
count_response = call_endpoint(endpoint_config=endpoints.company_tasks_svc.countCompanyTasks,
command_args=command_args,
**endpoint_inputs)
actual_count = count_response.data.get('content')
if actual_count == 0:
print("Client has no tasks.")
return
# Search task Ids
search_args["pageRequest"] = {"size": actual_count}
search_response = call_endpoint(endpoint_config=endpoints.company_tasks_svc.getCompanyTasks,
command_args=command_args,
**endpoint_inputs)
if search_response.status == 404:
print(search_response.data['errors'][0]['description'])
return
# Delete all task Ids
ids = [company_task['id'] for company_task in search_response.data['content']]
if ids:
endpoint_inputs['json'] = ids
delete_response = call_endpoint(endpoint_config=endpoints.company_tasks_svc.deleteCompanyTasks,
command_args=command_args,
**endpoint_inputs)
assert search_response.status == 200, f"Delete operation failed! Error: {format_response(delete_response)}"
print(f"{actual_count} Company tasks deleted successfully! Now, Client has no tasks.")
|
dattatembare/pytaf
|
utilities/delete_all_company_tasks.py
|
delete_all_company_tasks.py
|
py
| 2,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "lib.get_endpoint.call_endpoint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "lib.get_endpoint.endpoints.company_tasks_svc",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "lib.get_endpoint.endpoints",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "lib.get_endpoint.call_endpoint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lib.get_endpoint.endpoints.company_tasks_svc",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "lib.get_endpoint.endpoints",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "lib.get_endpoint.call_endpoint",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "lib.get_endpoint.endpoints.company_tasks_svc",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "lib.get_endpoint.endpoints",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "lib.utils.format_response",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "lib.utils.trace",
"line_number": 11,
"usage_type": "call"
}
] |
14162889179
|
import pyaudio
import wave
import pydub
import numpy as np
import os
import threading
import random
import time
import sys
freq = 60 #60秒おきに集中力を計算
excert_range = 5 #その5倍の時間の姿勢データを計算に使う
global position
position = [4] * freq*excert_range #####集中力を計算するために姿勢を格納する配列(最初は非集中なので姿勢4を入れてる)
def get_position_seq():
global position
n = len(position)
i = 0
while True:
position[i] = 1 ##############ここに姿勢を入れる####################
print("姿勢は",position[i])
i += 1
if(i == n):
i = 0
time.sleep(1) ##1秒ディレイを入れてる(多分いらない)
def concentration_rate(sequence): ###集中力を計算する(関数は適当)
counts = [0, 0, 0, 0]
for num in sequence:
if num == 1:
counts[0] += 1
elif num == 2:
counts[1] += 1
elif num == 3:
counts[2] += 1
elif num == 4:
counts[3] += 1
concentrate_raw = (counts[0]+counts[1]*0.2)/(len(sequence))
if concentrate_raw >= 0.7: ##集中力はせいぜい0.7が最大と仮定
concentrate = 1
else:
concentrate = concentrate_raw/0.7
print("集中力は",concentrate)
return concentrate
def choose_music(concentration,threshold): ##集中力に応じて音楽を選ぶ
folder_path = os.path.dirname(os.path.abspath(sys.argv[0]))
#上がる方
if concentration < threshold:
mp3_folder_path = os.path.join(folder_path, "no_concentrate_music")
mp3_files = [file for file in os.listdir(mp3_folder_path) if file.endswith(".mp3")]
random_file = random.choice(mp3_files)
file_path = os.path.join(mp3_folder_path, random_file)
print("上がる音楽",file_path,"を再生します")
#集中できる方
else:
mp3_folder_path = os.path.join(folder_path, "concentrate_music")
mp3_files = [file for file in os.listdir(mp3_folder_path) if file.endswith(".mp3")]
random_file = random.choice(mp3_files)
file_path = os.path.join(mp3_folder_path, random_file)
print("集中できる音楽",file_path,"を再生します")
return file_path
def volume(raw_volume): ##dBに基づいて適切な音量に変える
min_volume = 0.1
return (10**(raw_volume*-0.5)-10**-0.5+min_volume)/(1-10**-0.5+min_volume)
def play_audio(freq): ##音楽を再生する、音量は1秒おきに少しずつ滑らかに変わるようになってる(中断ボタンに合わせて再生を終了するとかは未実装)
global position
global event
decay = int(freq/2)
threshold_0 = 0.1 ##これを下回ったら非集中と仮定
threshold_1 = 0.5 ##これを上回ったら集中と仮定
n = 0
concentration_1 = 0
while True:
if event == "end":
break
file_path = choose_music(concentration_1,threshold_0)
# WAV形式に変換
wav_file = file_path[:-4] + ".wav"
sound = pydub.AudioSegment.from_mp3(file_path)
sound.export(wav_file, format="wav")
# WAVファイルを再生
wf = wave.open(wav_file, 'rb')
chunk = wf.getframerate()
# PyAudioの初期化
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate = chunk,
output=True)
# 音声のストリームを再生
data = wf.readframes(chunk)
concentration_origin = concentration_1
print("最初の集中力は",concentration_origin)
while data:
# 入力値に基づいて音量を調整
concentration_0 = concentration_1
concentration_1 = concentration_rate(position)
n += freq
if concentration_origin < threshold_0 and concentration_1 > threshold_1:
break
elif concentration_origin > threshold_1 and concentration_1 < threshold_0:
break
concentration_step = (concentration_1 - concentration_0)/decay
raw_volume = concentration_0
raw_volume += concentration_step
for i in range(decay):
# バイナリデータをnumpy配列に変換
audio_array = np.frombuffer(data, dtype=np.int16)
volume_factor = volume(raw_volume)
print("音量は",volume_factor)
adjusted_array = (audio_array * volume_factor).astype(np.int16)
# 音声データをバイナリに戻す
adjusted_data = adjusted_array.tobytes()
# 調整済みの音声を再生
stream.write(adjusted_data)
# 次のデータを読み込む
data = wf.readframes(chunk)
raw_volume += concentration_step
#########ここに中断ボタンを押されたらループを抜けるコード??
if event == "end":
break
if event == "end":
break
volume_factor = volume(raw_volume)
for i in range(freq - decay):
# バイナリデータをnumpy配列に変換
audio_array = np.frombuffer(data, dtype=np.int16)
print("音量は",volume_factor)
adjusted_array = (audio_array * volume_factor).astype(np.int16)
# 音声データをバイナリに戻す
adjusted_data = adjusted_array.tobytes()
# 調整済みの音声を再生
stream.write(adjusted_data)
# 次のデータを読み込む
data = wf.readframes(chunk)
#########ここに中断ボタンを押されたらループを抜けるコード??
if event == "end":
break
if event == "end":
break
# ストリームを閉じる
stream.stop_stream()
stream.close()
# PyAudioを終了する
p.terminate()
# 一時的に作成したWAVファイルを削除
os.remove(wav_file)
#########ここに中断ボタンを押されたらループを抜けるコード??
if event == "end":
break
# メインの処理
if __name__ == "__main__":
# ロックオブジェクトを作成
lock = threading.Lock()
# スレッドを作成
t1 = threading.Thread(target=get_position_seq)
t2 = threading.Thread(target=play_audio, args=(freq,))
# スレッドを開始
t1.start()
t2.start()
|
agridrama/system-project-1
|
volume_control.py
|
volume_control.py
|
py
| 6,822 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pydub.AudioSegment.from_mp3",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "wave.open",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "numpy.int16",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.frombuffer",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "numpy.int16",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 185,
"usage_type": "call"
}
] |
41431741975
|
from django.forms import ModelForm, TextInput
from django import forms
from .models import List
class ListForm(ModelForm):
class Meta:
model = List
fields = ['list']
widgets = {'task': TextInput(attrs={
'class': 'form-control',
'name': 'list',
'id': 'list',
'placeholder': 'List'
}),
}
|
awpogodin/py-CustomField
|
django/listvalid/forms.py
|
forms.py
|
py
| 402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 9,
"usage_type": "call"
}
] |
74911410428
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from qtasync import AsyncTask, coroutine
from PyQt5.QtCore import QCoreApplication, Qt,QThread
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from core import fmaker
import random
from planmaker import make_plan
import numpy as np
import asyncio
class App(QWidget):
fmaker_ =0
def __init__(self):
super().__init__()
self.left = 400
self.top = 400
self.title = 'PyQt5 matplotlib example - pythonspot.com'
self.width = 800
self.height = 600
self.fmaker_ = fmaker()
self.plan=[[],[]]
self.initUI()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.center()
hbox = QHBoxLayout(self)
topleft = QSplitter(Qt.Vertical)
topleft.setFrameShape(QFrame.StyledPanel)
splitbutton = QSplitter(Qt.Horizontal)
splitbutton.setFrameShape(QFrame.StyledPanel)
splitter1 = QSplitter(Qt.Horizontal)
splitter1.addWidget(topleft)
topleft.addWidget(splitbutton)
parts= []
for i in range(3):
parts.append(QSplitter(Qt.Horizontal))
parts[i].setFrameShape(QFrame.StyledPanel)
topleft.addWidget(parts[i])
hbox.addWidget(splitter1)
self.setLayout(hbox)
self.setWindowTitle('Синтез непрерывных D-планов для нечетких моделей с тремя подобластями')
self.m = PlotCanvas(splitter1, width=5, height=4)
self.m.move(0,0)
self.radiobutton = []
dic = ["в первом","во втором", "в третьем"]
for i in range(3):
grid = QGridLayout()
group_box = QGroupBox("Модель "+dic[i]+" нечетком множестве")
group_box.setLayout(grid)
self.radiobutton.append(QRadioButton("Квадратичная"))
self.radiobutton[i * 2].setChecked(True)
self.radiobutton[i * 2].type = "quad"
self.radiobutton[i * 2].toggled.connect(self.on_radio_button_toggled1)
self.radiobutton.append(QRadioButton("Линейная"))
self.radiobutton[i * 2 + 1].type = "lin"
self.radiobutton[i * 2 + 1].toggled.connect(self.on_radio_button_toggled1)
parts[i].addWidget(group_box)
grid.addWidget(self.radiobutton[i * 2],1,1)
grid.addWidget(self.radiobutton[i * 2 + 1],2,1)
button = QPushButton('Сформировать план')
button.clicked.connect(self.start_calculations)
splitbutton.addWidget(button)
self.show()
def on_radio_button_toggled1(self):
radiobutton = self.sender()
if radiobutton.isChecked():
self.fmaker_.change_model((self.radiobutton.index(radiobutton)+1)//3, radiobutton.type)
@coroutine
def start_calculations(self,arg):
button = self.sender()
button.setText('Производятся вычисления')
button.setEnabled(False)
for rb in self.radiobutton:
rb.setEnabled(False)
self.plan = yield AsyncTask(make_plan,self.fmaker_,button)
self.m.plot(self.plan)
file = open('plan.txt','w')
for i in range(len(self.plan[0])):
file.write(str(self.plan[0][i]) + '\t' + str(self.plan[1][i]) + '\n')
file.close()
button.setText('Сформировать план')
button.setEnabled(True)
for rb in self.radiobutton:
rb.setEnabled(True)
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot(self, plan):
data = [random.random() for i in range(25)]
self.axes.cla()
self.axes.scatter( x = plan[0], y = [0 for _ in plan[0]],
s = 5e3 * np.array(plan[1]), c = np.random.rand(len(plan[0])),
alpha = 0.5,
label = 'Веса точек в плане')
self.axes.scatter( x = [0], y = [0],
s = 0,
alpha = 0.0,
label = '|M| = ' + str(plan[2]))
plt.ylim(-1,1)
self.axes.legend()
for i, num in enumerate(plan[1]):
self.axes.annotate(round(num,3), (plan[0][i]-0.05,plan[1][i]/40))
self.axes.set_title('План эксперимента')
self.axes.grid()
self.draw()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
lupusomniator/paae_kurs
|
main v2.0.py
|
main v2.0.py
|
py
| 5,611 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "core.fmaker",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.Vertical",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Horizontal",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Horizontal",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Horizontal",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "qtasync.AsyncTask",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "planmaker.make_plan",
"line_number": 102,
"usage_type": "argument"
},
{
"api_name": "qtasync.coroutine",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.figure.Figure",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 155,
"usage_type": "call"
}
] |
5259672483
|
import csv
import io
import pickle
import proper_noun
import os
import subprocess
import common_nouns
import location
import re
from stop_words import get_stop_words
from nltk.tokenize import TweetTokenizer
from collections import defaultdict
import pickle
from nltk.corpus import wordnet as wn
from itertools import product
import spacy
from spacy.symbols import *
from nltk import Tree
import nltk
from nltk.stem import *
import spacy
nlp=spacy.load('en')
import math
import sys
from gensim.models import *
import numpy as np
import sys
from nltk.stem import *
import spacy
import time
import CMUTweetTagger
stop_words=get_stop_words('en')
stop_words_2=['i','me','we','us','you','u','she','her','his','he','him','it','they','them','who','which','whom','whose','that','this','these','those','anyone','someone','some','all','most','himself','herself','myself','itself','hers','ours','yours','theirs','to','in','at','for','from','etc',' ',',']
for i in stop_words_2:
if i not in stop_words:
stop_words.append(i)
nepal_stop_list=['nepal','earthquake','quake','nepalese','italy']
nepal_re="(nepal|quake|earthquake|nepalese|Earthquake|Nepal|NEPAL|Quake|Earth|Italy|italy)+"
web_url="http[s]?:[a-zA-Z._0-9/]+[a-zA-Z0-9]"
replacables="RT\s|-\s|\s-|#|@"
prop_name="([A-Z][a-z]+)"
num="([0-9]+)"
name="([A-Za-z]+)"
and_rate="([&][a][m][p][;])"
ellipses="([A-Za-z0-9]+[…])"
mentions="([a-zA-z\s0-9]+[:])"
nlp=spacy.load('en')
model=KeyedVectors.load_word2vec_format('/media/hdd/hdd/crisisNLP_word2vec_model/crisisNLP_word_vector.bin',binary=True)
dict_file=open('built_dict_italy.txt','r')
prannay_dict={}
for line in dict_file:
line=line.rstrip().split(',')
prannay_dict[line[0]]=line[1]
from nltk.stem.lancaster import LancasterStemmer
stem2=LancasterStemmer()
import numpy as np
wc2_vector_array=np.load('/media/hdd/hdd/data_backup/results/nepal/Need/wc2_nepal_2_word_embeddings.npy')
global_offer_resource_list=[]
global_need_resource_list=[]
id_need_list=[]
offer_text=[]
need_text=[]
id_offer_list=[]
import pickle
with open('nepal_global_offer_resource_list.p','rb') as handle:
global_offer_resource_list=pickle.load(handle)
with open('nepal_global_need_resource_list.p','rb') as handle:
global_need_resource_list= pickle.load(handle)
with open('nepal_need_text.p','rb') as handle:
need_text=pickle.load(handle)
with open('nepal_offer_text.p','rb') as handle:
offer_text= pickle.load(handle)
with open('nepal_id_need_list.p','rb') as handle:
id_need_list= pickle.load(handle)
with open('nepal_id_offer_list.p','rb')as handle:
id_offer_list= pickle.load(handle)
# print(len(global_need_resource_list))
# print(len(global_offer_resource_list))
# print(len(offer_text))
# print(len(need_text))
# print(len(nepal_id_need_list))
# print(len(nepal_id_offer_list))
need_send_verb_list=['need','require','want','lack','send','give','donate','transfer','distribute','aid','help','earthquake','victims']
stemmer=PorterStemmer()
out_stem_list=[stemmer.stem(i.lower()) for i in need_send_verb_list]
lanc_stem_list=[stem2.stem(i.lower()) for i in need_send_verb_list]
def euclidean_norm(u):
prod=0
for i in range(0,len(u)):
prod=prod+u[i]*u[i]
return math.sqrt(prod)
def cosine_similarity(u,v):
if len(u)==len(v):
e1=euclidean_norm(u)
e2=euclidean_norm(v)
if e1==0 or e2==0:
return 0
length=len(u)
scalar_product=0
for i in range(length):
scalar_product=scalar_product+u[i]*v[i]
return scalar_product/(e1*e2)
def get_list_1(need_tweet_list):
need_res_set=[]
for i in need_tweet_list:
for j in i.split():
if stemmer.stem(j.lower()) not in out_stem_list:
need_res_set.append(j.lower())
return list(set(need_res_set))
def get_list_2(need_tweet_list):
need_res_set=[]
for i in need_tweet_list:
for j in i.split():
if stem2.stem(j.lower()) not in lanc_stem_list:
need_res_set.append(j.lower())
return list(set(need_res_set))
def get_set_1(need_tweet_list):
need_res_set=set()
for i in need_tweet_list:
for j in i.split():
if stemmer.stem(j.lower()) not in out_stem_list:
need_res_set.add(stemmer.stem(j.lower()))
return need_res_set
def resource_similarity_score_via_exact_word_match_1(need_res_set,offer_tweet_list):
if len(need_res_set)==0:
return 0
offer_res_set=set()
for i in offer_tweet_list:
for j in i.split():
if j not in out_stem_list:
offer_res_set.add(stemmer.stem(j.lower()))
return(len(offer_res_set&need_res_set)/len(need_res_set))
def get_similarity_score_1(word,given_list):
max_similarity=0
if word.lower() in given_list:
max_similarity=1
else:
current_verb_list=wn.synsets(word.lower())
for verb in given_list:
related_verbs=wn.synsets(verb)
for a,b in product(related_verbs,current_verb_list):
d=wn.wup_similarity(a,b)
try:
if d> max_similarity:
max_similarity=d
except:
continue
return max_similarity
def get_similarity_score_2(word,given_list):
max_similarity=0
flag1=0
flag2=0
if word.lower() in given_list:
max_similarity=1
else:
try:
u=model[word]
except:
u=model['unk']
flag1=1
for item in given_list:
try:
v=model[item]
except:
v=model['unk']
flag2=1
if flag1==1 and flag2==1:
d=0
else:
d=cosine_similarity(u,v)
if d >max_similarity:
max_similarity=d
return max_similarity
def get_similarity_score_3(word,given_list):
max_similarity=0
flag1=0
flag2=0
if word.lower() in given_list:
max_similarity=1
else:
try:
u=wc2_vector_array[int(prannay_dict[word])]
except:
u=wc2_vector_array[0]
flag1=1
for item in given_list:
try:
v=wc2_vector_array[int(prannay_dict[item])]
except:
v=wc2_vector_array[0]
flag2=1
if flag1==1 and flag2==1:
d=0
else:
d=cosine_similarity(u,v)
if d>max_similarity:
max_similarity=d
return max_similarity
def resource_similarity_score_via_wc2_2(input_need_res_list,offer_tweet_list):
offer_tweet_list=get_list_2(offer_tweet_list)
l1=len(input_need_res_list)
value=0
for item in input_need_res_list:
temp=get_similarity_score_3(item,offer_tweet_list)
value=value+temp
return value/l1
def resource_similarity_score_via_wc2_1(need_vector,offer_tweet_list):
offer_tweet_list_2=get_list_2(offer_tweet_list)
l2=len(offer_tweet_list)
offer_vector=np.zeros(256)
if l2 ==0:
return 0
for i in offer_tweet_list_2:
try:
v2=wc2_vector_array[int(prannay_dict[i.lower()])]
except:
v2=wc2_vector_array[0]
for j in range(len(offer_vector)):
offer_vector[j]+=v2[j]
offer_vector=[i/l2 for i in offer_vector]
return cosine_similarity(need_vector,offer_vector)
def resource_similarity_score_via_word_net_1(need_res_set,offer_tweet_list):
if len(need_res_set)==0:
return 0
value=0
offer_res_list=[]
for i in offer_tweet_list:
for j in i.split():
if stemmer.stem(j.lower()) not in out_stem_list:
offer_res_list.append(stemmer.stem(j.lower()))
for word in need_res_set:
temp= get_similarity_score_1(word,offer_res_list)
if temp > 0.6:
value=value+temp
return value/len(need_res_set)
def resource_similarity_score_via_word_vec_1(need_vector,offer_tweet_list):
offer_tweet_list_2=get_list_1(offer_tweet_list)
l2=len(offer_tweet_list)
offer_vector=np.zeros(300)
if l2 ==0:
return 0
for i in offer_tweet_list_2:
try:
v2=model[i.lower()]
except:
v2=model['unk']
for j in range(len(offer_vector)):
offer_vector[j]+=v2[j]
offer_vector=[i/l2 for i in offer_vector]
return cosine_similarity(need_vector,offer_vector)
def resource_similarity_score_via_word_vec_2(input_need_res_list,offer_tweet_list):
offer_tweet_list=get_list_1(offer_tweet_list)
l1=len(input_need_res_list)
#print(offer_tweet_list)
value=0
for item in input_need_res_list:
temp=get_similarity_score_2(item,offer_tweet_list)
value=value+temp
return value/l1
def get_top_k_searches_1(input_id,k,method,outfile,idfile):
outfile.write('\n'+need_text[id_need_list.index(input_id)]+'\n')
#print(need_text[id_need_list.index(input_id)])
input_need_res_set=get_set_1(global_need_resource_list[id_need_list.index(input_id)])
score_array={}
if method==1:
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_exact_word_match_1(input_need_res_set,global_offer_resource_list[id_offer_list.index(item)])
if method==2:
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_word_net_1(input_need_res_set,global_offer_resource_list[id_offer_list.index(item)])
if method==3:
input_need_res_list=get_list_1(global_need_resource_list[id_need_list.index(input_id)])
l1=len(input_need_res_list)
if l1==0:
for item in id_offer_list:
score_array[item]=0
else:
need_vector=np.zeros(300)
for i in input_need_res_list:
try:
v1=model[i.lower()]
except:
v1=model['unk']
for j in range(300):
need_vector[j]+=v1[j]
need_vector=[i/l1 for i in need_vector]
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_word_vec_1(need_vector,global_offer_resource_list[id_offer_list.index(item)])
if method ==4:
input_need_res_list=get_list_1(global_need_resource_list[id_need_list.index(input_id)])
l1=len(input_need_res_list)
if l1==0:
for item in id_offer_list:
score_array[item]=0
else:
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_word_vec_2(input_need_res_list,global_offer_resource_list[id_offer_list.index(item)])
if method==5:
input_need_res_list=get_list_2(global_need_resource_list[id_need_list.index(input_id)])
l1=len(input_need_res_list)
if l1==0:
for item in id_offer_list:
score_array[item]=0
else:
need_vector=np.zeros(256)
for i in input_need_res_list:
try:
v1=wc2_vector_array[int(prannay_dict[i])]
except:
v1=wc2_vector_array[0]
for j in range(256):
need_vector[j]+=v1[j]
need_vector=[i/l1 for i in need_vector]
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_wc2_1(need_vector,global_offer_resource_list[id_offer_list.index(item)])
if method==6:
input_need_res_list=get_list_2(global_need_resource_list[id_need_list.index(input_id)])
l1=len(input_need_res_list)
if l1==0:
for item in id_offer_list:
score_array[item]=0
else:
for item in id_offer_list:
score_array[item]=resource_similarity_score_via_wc2_2(input_need_res_list,global_offer_resource_list[id_offer_list.index(item)])
score_array_sorted_keys=sorted(score_array,key=score_array.get,reverse=True)
count=0
for r in score_array_sorted_keys:
outfile.write(str(score_array[r])+'\t'+offer_text[id_offer_list.index(r)]+'\n')
# if method==5 or method ==6:
print(str(score_array[r])+'\t'+offer_text[id_offer_list.index(r)])
idfile.write(str(input_id)+'\t'+str(r)+'\n')
if count==k:
return
count+=1
def get_top_k_searches_2(resource_list,k,method,need_offer_flag):
print('HERE I AM IN TOP SEARCHES')
input_need_res_set=get_set_1(resource_list)
score_array={}
print(need_offer_flag)
print(k)
print(method)
if need_offer_flag==1:
id_need_offer_list=id_offer_list
global_need_offer_resource_list=global_offer_resource_list
need_offer_text=offer_text
else:
id_need_offer_list=id_need_list
global_need_offer_resource_list=global_need_resource_list
need_offer_text=need_text
if method==1:
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_exact_word_match_1(input_need_res_set,global_need_offer_resource_list[id_need_offer_list.index(item)])
if method==2:
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_word_net_1(input_need_res_set,global_need_offer_resource_list[id_need_offer_list.index(item)])
if method==3:
input_need_res_list=get_list_1(resource_list)
l1=len(input_need_res_list)
if l1==0:
for item in id_need_offer_list:
score_array[item]=0
else:
need_vector=np.zeros(300)
for i in input_need_res_list:
try:
v1=model[i.lower()]
except:
v1=model['unk']
for j in range(300):
need_vector[j]+=v1[j]
need_vector=[i/l1 for i in need_vector]
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_word_vec_1(need_vector,global_need_offer_resource_list[id_need_offer_list.index(item)])
if method ==4:
input_need_res_list=get_list_1(resource_list)
l1=len(input_need_res_list)
if l1==0:
for item in id_need_offer_list:
score_array[item]=0
else:
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_word_vec_2(input_need_res_list,global_need_offer_resource_list[id_need_offer_list.index(item)])
if method==5:
input_need_res_list=get_list_2(resource_list)
l1=len(input_need_res_list)
if l1==0:
for item in id_need_offer_list:
score_array[item]=0
else:
need_vector=np.zeros(256)
for i in input_need_res_list:
try:
v1=wc2_vector_array[int(prannay_dict[i])]
except:
v1=wc2_vector_array[0]
for j in range(256):
need_vector[j]+=v1[j]
need_vector=[i/l1 for i in need_vector]
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_wc2_1(need_vector,global_need_offer_resource_list[id_need_offer_list.index(item)])
if method==6:
input_need_res_list=get_list_2(resource_list)
l1=len(input_need_res_list)
if l1==0:
for item in id_need_offer_list:
score_array[item]=0
else:
for item in id_need_offer_list:
score_array[item]=resource_similarity_score_via_wc2_2(input_need_res_list,global_need_offer_resource_list[id_need_offer_list.index(item)])
score_array_sorted_keys=sorted(score_array,key=score_array.get,reverse=True)
count=0
for r in score_array_sorted_keys:
print(str(score_array[r])+'\t'+need_offer_text[id_need_offer_list.index(r)])
if count==k:
return
count+=1
tknzr=TweetTokenizer(strip_handles=True,reduce_len=True)
def tweet_preprocess(text):
#text=" ".join(tknzr.tokenize(text))
text=re.sub(web_url,'',text)
text=re.sub(mentions,'',text)
text=re.sub(ellipses,'',text)
text=re.sub(and_rate,'and',text)
text=re.sub(str(num)+''+name,"\\1 \\2",text)
text=re.sub(name+''+str(num),"\\1 \\2",text)
text=re.sub(prop_name+''+prop_name,"\\1 \\2",text)
return text.lstrip().rstrip()
def tweet_preprocess2(text):
#text=" ".join(tknzr.tokenize(text))
text=re.sub(web_url,'',text)
text=re.sub(mentions,'',text)
text=re.sub(ellipses,'',text)
text=re.sub(and_rate,'and',text)
text=re.sub(replacables,'',text)
#text=re.sub(mentions,'',text)
text=" ".join(tknzr.tokenize(text))
text=re.sub(str(num)+''+name,"\\1 \\2",text)
text=re.sub(name+''+str(num),"\\1 \\2",text)
text=re.sub(prop_name+''+prop_name,"\\1 \\2",text)
return text.lstrip().rstrip()
verb_dict={}
common_resource=['food','water','medicine','tent','clothes','communication','transport','infrastructure','shelter','internet','sanitation','hospital','donations','blood']
def post_preprocess(text,final_resource_keys,quantity_dict,loc_list,source_list,which_k,which_method,need_offer_flag):
########## Remove the nepal stop list terns ###############
final_resource_keys_2=[]
for i in final_resource_keys:
final_resource_keys_2.append(re.sub(nepal_re,'',i))
source_list_2=[]
for i in source_list:
source_list_2.append(re.sub(nepal_re,'',i))
loc_list_2=[]
for i in loc_list:
loc_list_2.append(re.sub(nepal_re,'',i))
source_list=list(source_list_2)
loc_list=list(loc_list_2)
final_resource_keys=list(final_resource_keys_2)
#########################################################
for i in source_list_2:
if i.lower() in stop_words:
try:
source_list.remove(i)
except:
continue
for j in loc_list:
for i in source_list_2:
if i in j:
try:
source_list.remove(i)
except:
continue
######### Remove the terms duplicates #############
source_list_2=list(source_list)
for i in final_resource_keys_2:
length=len(final_resource_keys)
for j in range(length):
if i in final_resource_keys[j] and len(i) < len(final_resource_keys[j]):
try:
final_resource_keys.remove(i)
break
except:
continue
final_resource_keys_2=list(final_resource_keys)
for i in source_list_2:
length=len(source_list)
for j in range(length):
if i in source_list[j] and len(i) < len(source_list[j]):
try:
source_list.remove(i)
break
except:
continue
source_list_2=list(source_list)
for i in loc_list_2:
length=len(loc_list)
for j in range(length):
if i in loc_list[j] and len(i)< len(loc_list[j]):
try:
loc_list.remove(i)
break
except:
continue
loc_list_2=list(loc_list)
######################################################
source_list_2=list(source_list)
for j in loc_list:
for i in source_list_2:
if j in i:
try:
source_list.remove(j)
except:
continue
for i in final_resource_keys_2:
for j in loc_list:
if i in j:
try:
final_resource_keys.remove(i)
except:
continue
final_resource_keys_2=list(final_resource_keys)
loc_list_2=list(loc_list)
source_list_2=list(source_list)
##################################################
for i in final_resource_keys_2:
if i.lower().rstrip().lstrip() in stop_words:
try:
final_resource_keys.remove(i)
except:
continue
for i in loc_list_2:
i=re.sub('#','',i)
if i.lower().rstrip().lstrip() in stop_words:
try:
loc_list.remove(i)
except:
continue
for i in source_list_2:
if i.lower().rstrip().lstrip() in stop_words:
try:
source_list.remove(i)
except:
continue
if len(final_resource_keys)==0:
doc=nlp(text)
for word in doc:
if word.pos_=='NOUN':
final_resource_keys.append(word.orth_)
#global_need_resource_list.append(final_resource_keys)
print("Resource_list")
print(final_resource_keys)
print()
print("Quantity dictionary")
print(quantity_dict)
print()
print("Location")
print(loc_list)
print()
common_nouns.get_contact(text)
print()
print("Source list")
print(source_list)
get_top_k_searches_2(final_resource_keys,which_k,which_method,need_offer_flag)
def create_resource_list(need_text_2,which_k,which_method,need_offer_flag):
count=0
start_time=time.time()
for text in need_text_2:
source_list_3=[]
urls=re.findall(web_url,text)
for i in urls:
if len(i)>len('http://t.co'):
source_list_3.append(i)
text2=tweet_preprocess(text)
need_cmu_tags=CMUTweetTagger.runtagger_parse([text2])
text=tweet_preprocess2(text)
quantity_dict={}
final_resource_keys=[]
source_list=[]
loc_list=[]
poss_places=[]
org_person_list=[]
quantity_dict,final_resource_keys,source_list,poss_places,org_person_list= common_nouns.get_resource(text)
for i in source_list_3:
source_list.append(i)
# print(count)
print(text)
doc=nlp(text)
#need_tag.append(CMUTweetTagger.runtagger_parse([text]))
loc_list=proper_noun.give_location(need_cmu_tags)
for i in org_person_list:
if i in loc_list:
try:
loc_list.remove(i)
except:
continue
if i not in source_list:
source_list.append(i)
for i in loc_list:
if i in source_list:
try:
source_list.remove(i)
except:
continue
for i in poss_places:
if i not in loc_list and location.is_inside_Nepal(i)==1:
loc_list.append(i)
for i in org_person_list:
if i in final_resource_keys:
try:
final_resource_keys.remove(i)
except:
continue
count=count+1
final_resource_lists=[]
for key in final_resource_keys:
if key in quantity_dict:
final_resource_lists.append(key.split(' ')[-1])
continue
if key in text:
final_resource_lists.append(key)
post_preprocess(text,final_resource_lists,quantity_dict,loc_list,source_list,which_k,which_method,need_offer_flag)
print(time.time()-start_time)
start_time=time.time()
need_text_2=[]
need_text_2.append('There are many people stranded in Kathmandu')
which_k=4
which_method=3
need_offer_flag=1
create_resource_list(need_text_2,which_k,which_method,need_offer_flag)
|
varun-manjunath/disaster-mitigation
|
matching/process_both.py
|
process_both.py
|
py
| 20,459 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "spacy.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "stop_words.get_stop_words",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "stop_words.append",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "nltk.stem.lancaster.LancasterStemmer",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.wup_similarity",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.TweetTokenizer",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "common_nouns.get_contact",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 717,
"usage_type": "call"
},
{
"api_name": "CMUTweetTagger.runtagger_parse",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "common_nouns.get_resource",
"line_number": 733,
"usage_type": "call"
},
{
"api_name": "proper_noun.give_location",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "location.is_inside_Nepal",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 785,
"usage_type": "call"
}
] |
6398739491
|
from dash import Dash
from dash.dependencies import Input, Output
from dash_core_components import Dropdown, Graph
from dash_html_components import H1, Div, P
from peewee import fn
from src.database import LastPackage, Package, PackageHistory
dash_app = Dash(__name__)
server = dash_app.server
dash_app.layout = Div(
children=[
Div(
className='header',
children=[
P(children='📈', className='header-emoji'),
H1(children='COCOMO-PYTHON', className='header-title'),
P(
children='''
A Cocomo analysis of the packages available on Pypi.
''',
className='header-description',
),
P(
children='https://github.com/dunossauro/cocomo-python',
className='header-description',
),
],
),
Div(
className='menu',
children=[
Div(
className='dropdown',
children=[
Div(children='Select Group', className='menu-title'),
Dropdown(
id='group',
className='dropdown',
),
],
),
Div(
className='dropdown',
children=[
Div(
children='Select packages', className='menu-title'
),
Dropdown(
id='package',
className='dropdown',
multi=True,
),
],
),
],
),
Div(
className='wrapper',
children=[
Graph(
id='graph_lines_value',
config={'displayModeBar': False},
)
],
),
Div(
className='wrapper',
children=[
Graph(
id='graph_license',
config={'displayModeBar': False},
)
],
),
H1(children='Package History', className='header-title2'),
Div(
className='graph-header',
children=[
Div(
className='menu2',
children=[
Div(
className='dropdown',
children=[
Div(
children='Select package',
className='menu-title',
),
Dropdown(
id='package_history',
className='dropdown',
),
],
),
],
),
Div(
className='wrapper',
children=[
Graph(
id='graph_package_history',
config={'displayModeBar': False},
)
],
),
H1(children='Python versions', className='header-title'),
Div(
className='wrapper',
children=[
Graph(
id='python_history',
config={'displayModeBar': False},
)
],
),
],
),
]
)
@dash_app.callback(
Output('group', 'options'),
Input('group', 'search_value'),
)
def update_groups(search_value):
return [
{'label': p.group.capitalize(), 'value': p.group}
for p in LastPackage.select().group_by(LastPackage.group).execute()
]
@dash_app.callback(
Output('package', 'options'),
Input('group', 'value'),
)
def update_packages(search_value):
return [
{'label': p.name.name.capitalize(), 'value': p.name.name}
for p in LastPackage.select().where(LastPackage.group == search_value)
]
@dash_app.callback(
Output('graph_lines_value', 'figure'),
Input('group', 'value'),
Input('package', 'value'),
)
def lines_price(group, package):
if not package:
query = LastPackage.select().where(LastPackage.group == group)
else:
query = (
LastPackage.select().join(Package).where(Package.name.in_(package))
)
return {
'data': [
{
'y': [d.name.name for d in query],
'x': [d.total_lines for d in query],
'name': 'Code Lines',
'type': 'bar',
'orientation': 'h',
'marker': {
'color': ['#71134C' for x in query],
},
},
{
'y': [d.name.name for d in query],
'x': [d.total_cost for d in query],
'name': 'Cocomo',
'type': 'bar',
'orientation': 'h',
'marker': {
'color': ['#0D7040' for x in query],
},
},
],
'layout': {
'title': {
'text': f'SLOC-package x Cocomo-Value (110.140) - {group}',
'x': 0.05,
'xanchor': 'left',
}
},
}
@dash_app.callback(
Output('package_history', 'options'),
Input('package_history', 'value'),
)
def history(package_history):
return [
{'label': p.name, 'value': p.name}
for p in Package.select().order_by(Package.name)
]
@dash_app.callback(
Output('graph_package_history', 'figure'),
Input('package_history', 'value'),
)
def package_history(package):
query = (
PackageHistory.select()
.join(Package)
.where(Package.name == package)
.order_by(PackageHistory.date)
)
wheel_query = query.where(PackageHistory.package_type == 'wheel')
tar_query = query.where(PackageHistory.package_type == 'tar')
return {
'data': [
{
'y': [d.total_lines for d in wheel_query],
'x': [d.date for d in wheel_query],
'name': 'Wheel',
},
{
'y': [d.total_cost for d in wheel_query],
'x': [d.date for d in wheel_query],
'name': 'Cocomo wheel',
},
{
'y': [d.total_lines for d in tar_query],
'x': [d.date for d in tar_query],
'name': 'Tar',
},
{
'y': [d.total_cost for d in tar_query],
'x': [d.date for d in tar_query],
'name': 'Cocomo tar',
},
],
'layout': {
'title': {
'text': f'Package history - {package}',
'x': 0.05,
'xanchor': 'left',
}
},
}
@dash_app.callback(
Output('graph_license', 'figure'),
Input('group', 'value'),
)
def license(value):
query = (
Package.select(
Package.license, fn.COUNT(Package.id).alias("license_count")
)
.join(LastPackage)
.where(LastPackage.group == value)
.group_by(Package.license)
)
return {
'data': [
{
'y': [x.license_count for x in query],
'x': [x.license for x in query],
'type': 'bar',
'marker': {
'color': ['#71134C' for x in query],
},
},
],
'layout': {
'title': {
'text': 'License type',
'x': 0.05,
'xanchor': 'left',
}
},
}
@dash_app.callback(
Output('python_history', 'figure'),
Input('package_history', 'value'),
)
def python(value):
query = (
PackageHistory.select().join(Package).where(Package.name == 'python')
)
return {
'data': [
{
'x': [x.version for x in query],
'y': [x.total_lines for x in query],
'type': 'bar',
'marker': {
'color': ['#71134C' for x in query],
},
'name': 'code lines',
},
{
'x': [x.version for x in query],
'y': [x.total_cost for x in query],
'type': 'bar',
'name': 'cocomo',
'marker': {
'color': ['#0D7040' for x in query],
},
},
],
'layout': {
'title': {
'text': 'Python versions',
'x': 0.05,
'xanchor': 'left',
}
},
}
# dash_app.run_server(debug=True)
|
dunossauro/cocomo-python
|
dashboard.py
|
dashboard.py
|
py
| 9,281 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "dash.Dash",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H1",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H1",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H1",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage.select",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "src.database.LastPackage.group",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage.select",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "src.database.LastPackage.group",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage.select",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "src.database.LastPackage.group",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "src.database.Package",
"line_number": 156,
"usage_type": "argument"
},
{
"api_name": "src.database.LastPackage.select",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "src.database.Package.name.in_",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "src.database.Package.name",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "src.database.Package.select",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "src.database.Package",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "src.database.Package.name",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "src.database.Package",
"line_number": 209,
"usage_type": "argument"
},
{
"api_name": "src.database.PackageHistory.select",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "src.database.PackageHistory",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "src.database.Package.name",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "src.database.Package",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "src.database.PackageHistory.date",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "src.database.PackageHistory",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "src.database.PackageHistory.package_type",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "src.database.PackageHistory",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "src.database.PackageHistory.package_type",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "src.database.PackageHistory",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "src.database.LastPackage",
"line_number": 258,
"usage_type": "argument"
},
{
"api_name": "src.database.Package.select",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "src.database.Package",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "src.database.Package.license",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "src.database.Package",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "peewee.fn.COUNT",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "peewee.fn",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "src.database.Package.id",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "src.database.LastPackage.group",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "src.database.LastPackage",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "src.database.Package.license",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "src.database.Package",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "src.database.Package",
"line_number": 289,
"usage_type": "argument"
},
{
"api_name": "src.database.PackageHistory.select",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "src.database.PackageHistory",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "src.database.Package.name",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 285,
"usage_type": "call"
}
] |
41603415015
|
from phi.flow import *
from phi.geom import Phi
import matplotlib.pyplot as plt
import time, os, sys, argparse
sys.path.append('../')
from functions import *
parser = argparse.ArgumentParser()
parser.add_argument("-res", "--resolution", type = int, default = 128, choices=[64,128,256,512], help = "set resolution")
parser.add_argument("-v", "--velocity", type=float, required = True, help="set velocity at center line")
parser.add_argument("-dt", "--time_step", type=float, help="set time step")
def main():
################ set parameters ################
args = parser.parse_args()
res = args.resolution
inflow_velocity = args.velocity
DT = 0.5/inflow_velocity*0.01 if args.time_step == None else args.time_step
radius = 0.3
diffusivity = 0.001
t_end = 10
ep = res/128 #used for force calculation
substeps = 20 if res == 512 else 4 #used for pressure solve
################ set up phiflow domain ################
#set up domain and inflow
DOMAIN = dict(x = 2*res, y = res, bounds=Box[-1:3,-1:1], extrapolation = extrapolation.combine_sides(x = extrapolation.BOUNDARY, y = extrapolation.ZERO))
INFLOW = StaggeredGrid(HardGeometryMask(Box[:-0.98, :]), **DOMAIN)
#define poiseuille inflow velocity profile
def poiseuille_flow(field):
x = field.staggered_direction['y'].vector['x']
y = field.staggered_direction['x'].vector['y']
x_values = inflow_velocity*(1 - y**2)
y_values = 0*x
return math.stack([x_values,y_values], channel('staggered_direction'))
INFLOW_VELO = StaggeredGrid(poiseuille_flow, **DOMAIN)
#set up domain for phi
DOMAIN_PHI = dict(x = 2*res, y = res, bounds=Box[-1:3,-1:1], extrapolation = extrapolation.ZERO)
def phi_func(field):
x,y = field.unstack(dimension = 'vector')
return x**2 + y**2 - radius**2
#instantiate initial phi field
phi_field = CenteredGrid(phi_func, **DOMAIN_PHI)
phi_geom = Phi(phi_field)
phi_obs = Obstacle(phi_geom)
#regularize phi (|gradient of phi|= 1)
phi_field = make_sdf(phi_field)
#initialize field value
pressure = None
velocity = INFLOW_VELO + 10*StaggeredGrid(Noise(vector=2), **DOMAIN) * INFLOW #add noise to accelerate flow evolution
################ create path ################
path = '../prestored_data/unsteady/res{res}/dt{dt:03d}/poiseuille/'.format(res=res, dt=int(DT*1e2))
try:
os.makedirs(path)
except:
print('Data file already exists.')
sys.exit()
################ prepare storage ################
pressure_record = np.zeros((int(t_end/DT),2))
viscous_record = np.zeros((int(t_end/DT),2))
velocity_record = np.zeros(int(t_end/DT))
################ start simulation ################
t_start = time.time()
for i, t in enumerate(np.arange(0, t_end, DT)):
velocity = advect.semi_lagrangian(velocity, velocity, DT)
velocity = velocity * (1- INFLOW) + INFLOW * INFLOW_VELO
velocity = diffuse.explicit(velocity, diffusivity, DT, substeps = substeps)
velocity, pressure = fluid.make_incompressible(velocity,
obstacles = (phi_obs,),
solve=math.Solve('auto', 1e-3, 0, x0 = pressure, max_iterations=1e4, gradient_solve=math.Solve('auto', 1e-5, 1e-5)))
velocity_record[i] = np.mean(velocity.at_centers().values.numpy('y,x,vector')[:,10,0])
pressure_force, viscous_force = evaluate_force(phi_field, pressure/DT, velocity, diffusivity, epsilon_factor = ep)
pressure_record[i,:] = pressure_force
viscous_record[i,:] = viscous_force
if i % 100 == 0:
print('Iteration {} finished --- time spent: {}min'.format(i, (time.time() - t_start)/60))
t_start = time.time()
with cwd(path):
with open('velocity_x_rad030_t200_vel{:04d}.txt'.format(int(inflow_velocity*1e3)), 'w') as f:
for elem in velocity.vector[0].values.numpy('x,y'):
np.savetxt(f, elem)
with open('velocity_y_rad030_t200_vel{:04d}.txt'.format(int(inflow_velocity*1e3)), 'w') as f:
for elem in velocity.vector[1].values.numpy('x,y'):
np.savetxt(f, elem)
with open('pressure_rad030_t200_vel{:04d}.txt'.format(int(inflow_velocity*1e3)), 'w') as f:
for elem in pressure.values.numpy('x,y'):
np.savetxt(f, elem)
with open('velocity_record.txt', 'w') as f:
np.savetxt(f, velocity_record)
with open('pressure_record.txt', 'w') as f:
np.savetxt(f, pressure_record)
with open('viscous_record.txt', 'w') as f:
np.savetxt(f, viscous_record)
plt.figure()
plt.plot(pressure_record[:,0])
plt.grid()
plt.savefig('pressure drag evolution')
if __name__ == '__main__':
main()
|
Brian-Hsieh/shapeOptim
|
code/generate_data.py
|
generate_data.py
|
py
| 4,942 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "phi.geom.Phi",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
}
] |
34094783054
|
#!/usr/bin/env python3
from plotter import collection, dataset
from plotter import histo, atlas, presets
import ROOT
import logging
logging.basicConfig(
level=logging.INFO, format="%(levelname)s (%(name)s): %(message)s"
)
log = logging.getLogger(__name__)
atlas.SetAtlasStyle()
cData = collection("Data")
cData.add_dataset(dataset("Data", "test/Nominal2/data.root"))
cBkg = collection("Bkg")
cBkg.add_dataset(dataset("Bkg", "test/Nominal2/background.root"))
cHis = collection("Hists")
cHis.add_dataset(dataset("Hists", "test/Nominal2/hists.root"))
def plot_dm(var: str = "ptll", varTitle: str = "p_{T}^{ll}", suffix: str = ""):
hD = histo("Data", cData.get_th(var+"_data"+suffix), configPath="configs/data.json")
hS = histo("Z", cHis.get_th(var+"_Z"+suffix), fillColor=ROOT.kBlue,
configPath="configs/mc.json")
hNF = histo("nonFid", cHis.get_th(var+"_nonFid"+suffix), fillColor=ROOT.kRed,
configPath="configs/mc.json")
hB = histo("Top+EW", cBkg.get_th(var+"_topew"+suffix), fillColor=ROOT.kGreen,
configPath="configs/mc.json")
dm = presets.dataMC("test"+suffix, xTitle=varTitle)
dm.ratioPad.set_yrange(0.701, 1.199)
dm.add_and_plot(hD, [hS, hNF, hB])
# dm.mainPad.basis.th.GetXaxis().SetRangeUser(0, 100)
# dm.ratioPad.basis.th.GetXaxis().SetRangeUser(0, 100)
dm.canvas.tcan.cd()
atlas.ATLASLabel(0.22, 0.9, "Internal")
extraTitles = []
if extraTitles != []:
yPosition = 0.85
for title in extraTitles:
dm.canvas.add_text(title, 0.22, yPosition)
yPosition -= 0.05
plotName = var+"_"+suffix
dm.save("AI/dm/"+plotName+".png")
dm.mainPad.logy()
dm.save("AI/dm/"+plotName+"_log.png")
def plot_frac(var: str = "ptll", varTitle: str = "p_{T}^{ll}", suffix: str = ""):
hS = histo("Z", cHis.get_th(var+"_Z"+suffix), lineColor=ROOT.kBlue,
configPath="configs/mc.json")
hNF = histo("nonFid", cHis.get_th(var+"_nonFid"+suffix), lineColor=ROOT.kRed,
configPath="configs/mc.json")
hB = histo("Top+EW", cBkg.get_th(var+"_topew"+suffix), lineColor=ROOT.kGreen,
configPath="configs/mc.json")
frac = presets.fraction("frac"+suffix, xTitle=varTitle)
frac.add_and_plot([hS, hNF, hB], [hNF, hB])
# frac.mainPad.basis.th.GetXaxis().SetRangeUser(0, 100)
frac.canvas.tcan.cd()
atlas.ATLASLabel(0.22, 0.9, "Internal")
extraTitles = []
if extraTitles != []:
yPosition = 0.85
for title in extraTitles:
frac.canvas.add_text(title, 0.22, yPosition)
yPosition -= 0.05
plotName = var+"_"+suffix
frac.save("AI/frac/"+plotName+".png")
frac.mainPad.logy()
frac.save("AI/frac/"+plotName+"_log.png")
plot_dm()
plot_frac()
nPt = 25
nY = 8
for y in range(nY):
suf = f"_M0_Y{y}"
log.info(f"Working on {suf}")
plot_dm(suffix=suf)
plot_frac(suffix=suf)
for pt in range(nPt):
suf = f"_PT{pt}_M0"
log.info(f"Working on {suf}")
plot_dm("yll", "y_{ll}", suffix=suf)
plot_frac("yll", "y_{ll}", suffix=suf)
|
fnechans/plotter
|
test.py
|
test.py
|
py
| 3,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "plotter.atlas.SetAtlasStyle",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "plotter.atlas",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "plotter.collection",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "plotter.dataset",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "plotter.collection",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "plotter.dataset",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "plotter.collection",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "plotter.dataset",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "plotter.histo",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "plotter.histo",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ROOT.kBlue",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "plotter.histo",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "ROOT.kRed",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "plotter.histo",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "ROOT.kGreen",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "plotter.presets.dataMC",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "plotter.presets",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "plotter.atlas.ATLASLabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "plotter.atlas",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "plotter.histo",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ROOT.kBlue",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "plotter.histo",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ROOT.kRed",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "plotter.histo",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "ROOT.kGreen",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "plotter.presets.fraction",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "plotter.presets",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "plotter.atlas.ATLASLabel",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "plotter.atlas",
"line_number": 74,
"usage_type": "name"
}
] |
4467004756
|
import requests
urls = dict()
urls['http'] = ['gilgil.net']
urls['https'] = [
'google.com',
'naver.com',
'daum.net',
'github.com',
'gitlab.com',
'portal.korea.ac.kr',
'yonsei.ac.kr',
'snu.ac.kr',
'kaist.ac.kr',
'kisa.or.kr',
'kitribob.kr',
'twitter.com',
'youtube.com',
'instagram.com',
'netflix.com',
'facebook.com',
'qt.io',
'programmers.co.kr',
'tistory.com',
'arxiv.org',
]
for url in urls['http']:
r = requests.get(f'http://{url}')
print(f'{r.url} status code={r.status_code}')
for url in urls['https']:
r = requests.get(f'https://{url}')
print(f'{r.url} status code={r.status_code}')
|
ugonfor/suricata-rule
|
request_url.py
|
request_url.py
|
py
| 694 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
}
] |
17789517633
|
from pygost.gost3412 import GOST3412Kuznechik as Kuz
from pygost.utils import hexdec, hexenc
from rich import print
REPLACES = {
",": "ЗПТ",
".": "ТЧК",
"-": "ТИРЕ",
";": "ТЧКИЗПТ",
}
def print_header(text):
print(header(text))
def print_kv(k, v):
print(kv(k, v))
def header(text):
return f"[bold black on bright_white] { text } [/bold black on bright_white]"
def kv(k, v):
return f"[bold cyan] { k } :[/bold cyan] { v } "
default_alph = "абвгдеёжзийклмнопрстуфхцчшщъыьэюя"
key = "ffeeddccbbaa99887766554433221100f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"
def to_indexes(text, alph=default_alph):
return [alph.index(symbol) for symbol in text]
def to_symbols(nums, alph=default_alph):
return "".join([alph[num] for num in nums])
def clear_text(text, alph=default_alph):
import re
text = replace_special(text)
text = text.lower()
text = re.sub(f"[^{alph}]", "", text)
return text
def replace_special(text, replaces=REPLACES):
for key, value in replaces.items():
text = text.replace(key, value)
return text
def is_hex(s):
import string
try:
return all(c in string.hexdigits for c in s)
except:
return False
def get_key(key: str) -> bytes:
if is_hex(key):
key = hexdec(key)
else:
key = bytes(key, "utf-8")
return key
def get_text(text: str) -> bytes:
if type(text) == str:
if is_hex(text):
text = hexdec(text)
else:
text = bytes(text, "utf-8")
return text
def get_chipher(key: str) -> Kuz:
key = get_key(key)
return Kuz(key)
def enc(text: str, key: str = key):
chipher = get_chipher(key)
byte_text = get_text(text)
enc_bytes = chipher.encrypt(byte_text)
enc_text = hexenc(enc_bytes)
return enc_text
def dec(text: str, key: str = key, t: str = "str"):
chipher = get_chipher(key)
byte_text = get_text(text)
dec_bytes = chipher.decrypt(byte_text)
dec_text = ""
if t == "hex":
dec_text = hexenc(dec_bytes)
else:
dec_text = dec_bytes.decode("utf-8")
return dec_text
def main():
print_header("Пример из GOST_R_34_12-2015")
text = input("Введите текст-бит: ") #"1122334455667700ffeeddccbbaa9988" , деш_кл = 7f679d90bebc24305a468d42b9d4edcd
key = input("Введите ключ-бит: ") #8899aabbccddeeff0011223344556677fedcba98765432100123456789abcdef
question = input("Выполнить действие (шифровать/дешифоровать): ")
if question == "шифровать":
print_kv("Открытый текст", text)
enc_text = enc(text, key)
print_kv("Результат", enc_text)
elif question == "дешифоровать":
print_kv("Шифр", text)
dec_text = dec(text, key, t="hex")
print_kv("Расшифр.", dec_text)
if __name__ == "__main__":
main()
|
VasiliiSkrypnik/PKA_2023
|
files/new_lab/lab7/Kuznyechik.py
|
Kuznyechik.py
|
py
| 3,152 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rich.print",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "string.hexdigits",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygost.utils.hexdec",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pygost.utils.hexdec",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygost.gost3412.GOST3412Kuznechik",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygost.gost3412.GOST3412Kuznechik",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "pygost.utils.hexenc",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pygost.utils.hexenc",
"line_number": 107,
"usage_type": "call"
}
] |
26331405503
|
import cv2
import glob
class FrameIterator:
"""
An iterator to iterate over multiple files containing either
images other videos. The files are gathered using pattern matching
and read with the universal cv2.VideoCapture().
...
Attributes
----------
pathpattern : str
input path pattern to be matched
verbose : int
verbosity (0,1) for logging into standard output
Methods
-------
current_frame():
Returns the path and position of the current frame.
"""
def __init__(self, pathpattern, verbose=0):
self.pathpattern = pathpattern
# list all files matching the pathpattern
self.files = glob.glob(self.pathpattern)
if verbose >= 1: print(f'filenames: {self.files}')
# check that at least one file exists
self.n_files = len(self.files)
if self.n_files < 1:
raise ValueError(f'Could not find any filename matching the path pattern: \'{pathpattern}\'')
def __iter__(self):
# initialize counter and current VideoCapture
self.index = 0
try: self.cap = cv2.VideoCapture(self.files[self.index])
except: raise RuntimeError('Opening VideoCapture from \'{self.files[self.index]}\' failed.')
return self
def __next__(self):
# try to read next frame
try: ret, frame = self.cap.read()
except: raise RuntimeError('Reading frame from \'{self.files[self.index]}\' failed.')
# return frame if read was sucessfull
if ret: return frame
# try to open next VideoCapture if read was unsucessful
self.index = self.index + 1
# stop iterating if there are no more files
if self.index >= self.n_files:
raise StopIteration
# initiallize next VideoCapture
try: self.cap = cv2.VideoCapture(self.files[self.index])
except: raise RuntimeError('Opening VideoCapture from \'{self.files[self.index]}\' failed.')
# return first frame of next VideoCapture
return self.__next__()
def current_frame(self):
'''Return path and position of the current frame.'''
path = self.files[self.index]
pos = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES)) - 1
return f'{path}::{pos}'
def first(self):
'''Return first frame from first file'''
# open new video capture
try: cap = cv2.VideoCapture(self.files[0])
except: raise RuntimeError('Opening VideoCapture from \'{self.files[0]}\' failed.')
# read next frame
try: ret, frame = cap.read()
except: raise RuntimeError('Reading frame from \'{self.files[0]}\' failed.')
# if stream is empty
if not ret: raise RuntimeError('Reading frame from \'{self.files[0]}\' failed.')
return frame
|
bunjj/Catadioptric-Stereo
|
FrameIterator.py
|
FrameIterator.py
|
py
| 2,894 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 78,
"usage_type": "call"
}
] |
27550279361
|
import random
import urllib.request
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
res = openai.Image.create_variation(
image=open("838.png", "rb"), # kindly replace "838.png" with an image on your local computer
n=2, # no of variations to generate
size="1024x1024",
response_format="url"
)
resp = res["data"]
resp_list = [resp[x]["url"] for x in range(len(resp))] # list of all generated image variations
def download_image(url_list: list):
"""
this method will loop through the url_list,
a list,containing URLS
download the image from the URL, and save it locally
:param url_list:
"""
for url in url_list:
name = random.randrange(1, 100)
full_name = str(name) + '-variations.png'
urllib.request.urlretrieve(url, full_name)
print(f'image {full_name} download succssfully...')
download_image(resp_list)
|
Afeez1131/openAI-image-generation
|
image_variations.py
|
image_variations.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openai.Image.create_variation",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "openai.Image",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 28,
"usage_type": "name"
}
] |
11951628523
|
import plotly.express as px
import streamlit as st
from functions import *
st.set_page_config(
page_title="Time series annotations", page_icon="⬇"
)
# @st.cache(allow_output_mutation=True)
@st.cache_data
def load_data(op_data):
# df_despesa = pd.read_csv('https://raw.githubusercontent.com/jsaj/st_forecasting/master/datasets/despesa.csv')
# df_receita = pd.read_csv('https://raw.githubusercontent.com/jsaj/st_forecasting/master/datasets/receita.csv')
df = pd.read_excel(
'https://onedrive.live.com/download?resid=71AA33284B297464%21422&authkey=!ABm-ikjLePrrS74&excel=2.xslx',
sheet_name='{}'.format(op_data))
return df
# Criar uma barra deslizante (slider) para selecionar qual será a previsão: receitas ou despesas
op_data = st.sidebar.selectbox('O que deseja prever?', ['Receitas', 'Despesas'])
# Carrega os dados ou utiliza os dados em cache
df = load_data(op_data)
df_filtrado = processing_columns_values(df, op_data)
# df_receita_despesa = df_receita.merge(df_despesa, how='left', on=['ds', 'Unidade Gestora']).fillna(0)
# df_receita_despesa['ds'] = pd.to_datetime(df_receita_despesa['ds'])
if op_data == 'Despesas':
# Sidebar com o filtro
list_to_filter = ['TODOS'] + list(df['NATUREZA'].drop_duplicates())
filtro_type_data = st.sidebar.selectbox('Elemento: ',
list_to_filter,
index=list_to_filter.index(
'TODOS'))
else:
list_to_filter = ['TODAS'] + list(df['ESPÉCIE DA RECEITA'].drop_duplicates())
filtro_type_data = st.sidebar.selectbox('Espécie da Receita:', list_to_filter,
index=list_to_filter.index('TODAS'))
df_filtrado = processing_data(df, op_data, filtro_type_data)
st.write(df.head())
st.write(df_filtrado.head())
type_periods = st.sidebar.selectbox('Qual o intervalo da previsão? ', ['Mensal', 'Semestral'])
if type_periods == 'Mensal':
# Sidebar com o filtro dos meses de previsão
n_periods = st.sidebar.selectbox('Quantos meses?', list(range(1, 13)))
else:
# Sidebar com o filtro dos semestres de previsão
n_periods = st.sidebar.selectbox('Quantos semestres? ', list(range(1, 13)))
# Renomear as colunas para que o modelo possa reconhecê-las
df_filtrado.columns = ['ds', 'y']
# Criar uma barra deslizante (slider) para selecionar a variável exógerna
model_name = st.sidebar.selectbox('Modelo preditivo:', ['ARIMA', 'Prophet'])
if filtro_type_data in ['TODAS', 'TODOS']:
# Criar uma barra deslizante (slider) para selecionar a variável exógerna
op_exog = st.sidebar.selectbox('Usar variável exógena?:', ['Sim', 'Não'], index=list(['Sim', 'Não']).index('Não'))
if op_exog == 'Sim':
# Criar uma barra deslizante (slider) para selecionar a variável exógerna
if op_data == 'Receitas':
exog_var = st.sidebar.selectbox('Variável exógena:', list(df['ESPÉCIE DA RECEITA'].drop_duplicates()))
else:
exog_var = st.sidebar.selectbox('Variável exógena:', list(df['NATUREZA'].drop_duplicates()))
df_to_predict = create_exog_table(df, df_filtrado, op_data, exog_var)
# Criar uma barra deslizante (slider) para selecionar a porcentagem
porcentagem = st.sidebar.slider('% vs. Var. Exógena:', min_value=0, max_value=100, value=100, step=1)
# Aplicar a função ao DataFrame para criar uma nova coluna com os valores multiplicados
df_to_predict[exog_var] = df_to_predict[exog_var] * (porcentagem / 100)
st.write(df_to_predict)
# Criar o modelo de previsão
if model_name == 'ARIMA':
predictions = predict_ARIMA(df=df_to_predict, n_periods=n_periods, type_periods=type_periods, exog_var=exog_var)
df_to_predict = df_to_predict.reset_index()
elif model_name == 'Prophet':
predictions = predict_prophet(df=df_to_predict, n_periods=n_periods, type_periods=type_periods, exog_var=exog_var)
else:
# Criar o modelo de previsão
if model_name == 'ARIMA':
predictions = predict_ARIMA(df=df_filtrado, n_periods=n_periods, type_periods=type_periods, exog_var=None)
df_filtrado = df_filtrado.reset_index()
elif model_name == 'Prophet':
predictions = predict_prophet(df=df_filtrado, n_periods=n_periods, type_periods=type_periods, exog_var=None)
else:
# Criar o modelo de previsão
if model_name == 'ARIMA':
predictions = predict_ARIMA(df=df_filtrado, n_periods=n_periods, type_periods=type_periods, exog_var=None)
df_filtrado = df_filtrado.reset_index()
elif model_name == 'Prophet':
predictions = predict_prophet(df=df_filtrado, n_periods=n_periods, type_periods=type_periods, exog_var=None)
# st.write(df_filtrado)
# Converter valores para milhões (M) ou milhares (K)
def format_value(value):
if abs(value) >= 1e6:
return '{:.2f}M'.format(value / 1e6)
elif abs(value) >= 1e3:
return '{:.2f}K'.format(value / 1e3)
else:
return '{:.2f}'.format(value)
# Criar o gráfico de linhas usando Plotly Express
fig = px.line(df_filtrado, x='ds', y='y', text=[format_value(val) for val in df_filtrado['y']],
labels={'y': '{} atuais'.format(op_data)},
title='{} atuais vs. {} preditas'.format(op_data, op_data))
# Adicionar a série de previsão de receita
fig.add_scatter(x=predictions['ds'], y=predictions['yhat'], mode='lines+text', text=[format_value(val) for val in predictions['yhat']],
name='{} preditas'.format(op_data))
# Personalizar layout do gráfico
fig.update_traces(textposition='top center')
fig.update_layout(xaxis_title='Mês-Ano', yaxis_title='{}'.format(op_data), showlegend=True)
# Exibir o gráfico usando Streamlit
st.plotly_chart(fig)
# Calcular a média da previsão
mean_prediction = predictions['yhat'].mean()
df_filtrado = df_filtrado.loc[df_filtrado['ds'] >= '2023-01-01']
# Criar o gráfico de barras usando Plotly Express
fig = px.bar(df_filtrado, x='ds', y='y', text=[format_value(val) for val in df_filtrado['y']],
labels={'y': '{} atuais'.format(op_data)},
title='{} atuais vs. {} preditas - Média de previsão: {}'.format(op_data, op_data, format_value(mean_prediction)))
# Adicionar a série de previsão de receita
fig.add_bar(x=predictions['ds'], y=predictions['yhat'], text=[format_value(val) for val in predictions['yhat']],
name='{} preditas'.format(op_data))
# Personalizar layout do gráfico
fig.update_traces(textposition='outside')
fig.update_layout(xaxis_title='Mês-Ano', yaxis_title='{}'.format(op_data), showlegend=True)
# Exibir o gráfico usando Streamlit
st.plotly_chart(fig)
# m = Prophet()
#
# future = m.make_future_dataframe(periods=periods_input)
# @st.experimental_memo(ttl=60 * 60 * 24)
# def get_chart(data):
# hover = alt.selection_single(
# fields=["date"],
# nearest=True,
# on="mouseover",
# empty="none",
# )
#
# lines = (
# alt.Chart(data, height=500, title="Evolution of stock prices")
# .mark_line()
# .encode(
# x=alt.X("date", title="Date"),
# y=alt.Y("price", title="Price"),
# color="symbol",
# )
# )
#
# # Draw points on the line, and highlight based on selection
# points = lines.transform_filter(hover).mark_circle(size=65)
#
# # Draw a rule at the location of the selection
# tooltips = (
# alt.Chart(data)
# .mark_rule()
# .encode(
# x="yearmonthdate(date)",
# y="price",
# opacity=alt.condition(hover, alt.value(0.3), alt.value(0)),
# tooltip=[
# alt.Tooltip("date", title="Date"),
# alt.Tooltip("price", title="Price (USD)"),
# ],
# )
# .add_selection(hover)
# )
#
# return (lines + points + tooltips).interactive()
#
#
# st.title("⬇ Time series annotations")
#
# st.write("Give more context to your time series using annotations!")
#
# col1, col2, col3 = st.columns(3)
# with col1:
# ticker = st.text_input("Choose a ticker (⬇💬👇ℹ️ ...)", value="⬇")
# with col2:
# ticker_dx = st.slider(
# "Horizontal offset", min_value=-30, max_value=30, step=1, value=0
# )
# with col3:
# ticker_dy = st.slider(
# "Vertical offset", min_value=-30, max_value=30, step=1, value=-10
# )
#
# # Original time series chart. Omitted `get_chart` for clarity
# source = get_data()
# chart = get_chart(source)
#
# # Input annotations
# ANNOTATIONS = [
# ("Mar 01, 2008", "Pretty good day for GOOG"),
# ("Dec 01, 2007", "Something's going wrong for GOOG & AAPL"),
# ("Nov 01, 2008", "Market starts again thanks to..."),
# ("Dec 01, 2009", "Small crash for GOOG after..."),
# ]
#
# # Create a chart with annotations
# annotations_df = pd.DataFrame(ANNOTATIONS, columns=["date", "event"])
# annotations_df.date = pd.to_datetime(annotations_df.date)
# annotations_df["y"] = 0
# annotation_layer = (
# alt.Chart(annotations_df)
# .mark_text(size=15, text=ticker, dx=ticker_dx, dy=ticker_dy, align="center")
# .encode(
# x="date:T",
# y=alt.Y("y:Q"),
# tooltip=["event"],
# )
# .interactive()
# )
#
# # Display both charts together
# st.altair_chart((chart + annotation_layer).interactive(), use_container_width=True)
#
# st.write("## Code")
#
# st.write(
# "See more in our public [GitHub"
# " repository](https://github.com/streamlit/example-app-time-series-annotation)"
# )
#
# st.code(
# f"""
# import altair as alt
# import pandas as pd
# import streamlit as st
# from vega_datasets import data
#
# @st.experimental_memo
# def get_data():
# source = data.stocks()
# source = source[source.date.gt("2004-01-01")]
# return source
#
# source = get_data()
#
# # Original time series chart. Omitted `get_chart` for clarity
# chart = get_chart(source)
#
# # Input annotations
# ANNOTATIONS = [
# ("Mar 01, 2008", "Pretty good day for GOOG"),
# ("Dec 01, 2007", "Something's going wrong for GOOG & AAPL"),
# ("Nov 01, 2008", "Market starts again thanks to..."),
# ("Dec 01, 2009", "Small crash for GOOG after..."),
# ]
#
# # Create a chart with annotations
# annotations_df = pd.DataFrame(ANNOTATIONS, columns=["date", "event"])
# annotations_df.date = pd.to_datetime(annotations_df.date)
# annotations_df["y"] = 0
# annotation_layer = (
# alt.Chart(annotations_df)
# .mark_text(size=15, text="{ticker}", dx={ticker_dx}, dy={ticker_dy}, align="center")
# .encode(
# x="date:T",
# y=alt.Y("y:Q"),
# tooltip=["event"],
# )
# .interactive()
# )
#
# # Display both charts together
# st.altair_chart((chart + annotation_layer).interactive(), use_container_width=True)
#
# """,
# "python",
# )
|
jsaj/st_forecasting
|
st_forecasting.py
|
st_forecasting.py
|
py
| 10,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "streamlit.set_page_config",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.cache_data",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "streamlit.write",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.slider",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "streamlit.write",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "plotly.express.line",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "plotly.express.bar",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 152,
"usage_type": "call"
}
] |
3864065470
|
from collections import OrderedDict
""" https://www.scribbr.fr/elements-linguistiques/determinant/
Le déterminant permet de présenter le nom.
Il le précède et compose avec lui le groupe nominal.
Un adjectif ou un autre déterminant peuvent se placer entre le déterminant et le nom.
"""
déterminants = OrderedDict({
"articles": {
"indéfinis": ["un", "une", "des"],
"définis": ["le", "l’", "la", "les"],
"définis contractés": ["au", "du", "à la", "de la", "aux", "des"],
"partitifs": ["du", "de l’", "de la", "des"],
},
"démonstratifs": ["ce", "cet", "cette", "ces"],
"possessifs": ["mon", "ton", "son", "ma", "ta", "sa", "mes", "tes", "ses", "notre", "votre", "leur", "nos", "vos", "leurs"],
"exclamatifs et interrogatifs": ["quel", "quelle", "quels", "quelles"],
"numéraux": ["un", "deux", "trois", "quatre", "premier", "deuxième", "troisième", "quatrième"],
"relatifs": ["lequel", "laquelle", "lesquels", "lesquelles", "duquel", "de laquelle", "desquels", "desquelles", "auquel", "à laquelle", "auxquels", "auxquelles"],
"indéfinis": ["certain", "quelque", "aucun", "nul", "chaque", "différent", "plusieurs"],
})
""" https://www.scribbr.fr/elements-linguistiques/les-adjectifs/
Les adjectifs en français sont qualifiés de « qualificatifs », car ils permettent de donner des informations sur le nom auquel ils se rapportent.
Ils s’accordent en genre et en nombre avec le nom qu’ils qualifient.
"""
|
Fushy/PythonLib
|
Francais.py
|
Francais.py
|
py
| 1,495 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.OrderedDict",
"line_number": 8,
"usage_type": "call"
}
] |
38254722790
|
from django.shortcuts import render
from django.contrib.auth.models import User
from hknweb.utils import login_and_permission
from hknweb.candidate.utils_candportal import CandidatePortalData
@login_and_permission("candidate.change_offchallenge")
def summary(request):
cands = User.objects.filter(groups__name="candidate")
headers, rows = [], []
for cand in cands:
data = CandidatePortalData(cand).get_user_cand_data()
if not headers:
headers = [
"Name",
"Forms",
"Payments",
"Project",
"BitByte",
"Hangouts",
]
for event in data["events"]:
event_title = event["title"]
if len(event_title) > 15:
event_title = event_title.split()[0]
headers.append(event_title)
headers.append("Overall")
status = [
data["candidate_forms"]["all_done"],
data["due_payments"]["all_done"],
data["committee_project"]["all_done"],
data["bitbyte"]["status"],
data["interactivities"]["status"],
*(e["status"] for e in data["events"]),
]
status.append(all(status))
row = {
"name": f"{cand.first_name} {cand.last_name} ({cand.username})",
"status": status,
"link": f"/cand/portal/{cand.username}",
}
rows.append(row)
context = {
"headers": headers,
"rows": rows,
}
return render(request, "candidate/summary.html", context=context)
|
Gabe-Mitnick/hknweb
|
hknweb/candidate/views/summary.py
|
summary.py
|
py
| 1,635 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "hknweb.candidate.utils_candportal.CandidatePortalData",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "hknweb.utils.login_and_permission",
"line_number": 8,
"usage_type": "call"
}
] |
25228010428
|
import numpy as np
import cv2 as cv
from tkinter import *
from tkinter.filedialog import *
#button1 = Button(window,text="Upload",fg="black",bg="gray",command=upload).pack()
img = []
class eye:
def __init__(self,master):
frame = Frame(master)
frame.pack()
button1 = Button(frame,text="Upload",fg="green",bg="gray",command=self.upload).pack()
#quit_button = Button(frame, text ="Quit",bg="Red",fg="green",command=frame.destroy).pack()
def upload(self):
imgTemp = askopenfilename()
img = cv.imread(imgTemp)
img = np.array(img, dtype=np.uint8)
face_cascade = cv.CascadeClassifier('data/haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier('data/haarcascade_eye.xml')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindow()
#def exit(self):
#button1 = Button(window,text="Upload",fg="black",bg="gray",command=obj.upload).pack()
#button1 = Button(window,text="Upload",fg="black",bg="green",command=disp).pack()
def quit(root):
root.destroy()
root = Tk()
root.title("FaceIt___")
root.geometry("500x500")
button2 = Button(root,text="Exit",fg="green",bg="red",command=lambda root=root:quit(root)).pack()
root.configure(background="black")
obj = eye(root)
#button1 = Button(root,text="Exit",fg="green",bg="red",command=lambda root=root:quit(root)).pack()
root.mainloop()
|
gods-mack/face-Detection_project
|
eye.py
|
eye.py
|
py
| 1,816 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindow",
"line_number": 40,
"usage_type": "call"
}
] |
72627145467
|
import inspect
import os
from typing import Any, Dict, List, Optional, Union
import yaml
import pmd
import pmd.core
# These have to be written explicitly for typing
from pmd.core.Builder import Builder
from pmd.core.Job import Job
from pmd.core.Lammps import Lammps
from pmd.core.Procedure import Procedure
from pmd.core.System import System
from pmd.util import Pmdlogging, build_dir
PRIMITIVE_TYPES = (str, int, float, bool)
SUPPORTED_YAML_EXTS = (".yml", ".yaml")
OBJECT_PRFIX = 'pmd.'
# Custom yaml config file dictionary constructor
def to_yaml_dict(cls: Union[System, Builder, Lammps, Procedure, Job]) -> Dict:
return {
# strip off the front underscore and only add to dict
# if value is not None
k.lstrip('_'): custom_class_yaml_dumper(v)
for k, v in cls.__dict__.items() if v is not None
}
# Custom method to dump values of non-primitive type to the yaml config file
def custom_class_yaml_dumper(v: Any) -> Any:
return_value = v
# If value is a list, recursively go through each item in the list
# Specifically, this is for the Lammps procedure list
if isinstance(v, list):
return_value = [custom_class_yaml_dumper(i) for i in v]
# If value is a non-primitive type, expand it to a dict
elif not isinstance(v, PRIMITIVE_TYPES):
return_value = {f"{OBJECT_PRFIX}{v}": to_yaml_dict(v)}
return return_value
def instantiate_from_cls_name(class_name: str, prop_dict: dict):
# first obtain a list of all classes in this module
class_list = inspect.getmembers(pmd.core, inspect.isclass)
class_dict = {k: v for k, v in class_list}
# find the matching class
filtered_class_name = class_name.lstrip(OBJECT_PRFIX).split('-')[0]
the_class = class_dict.get(filtered_class_name, None)
if the_class is None:
raise NameError(
f'{class_name} type is not found in {pmd.core.__name__} module')
# get the constructor parameter list of the class
sig = inspect.signature(the_class.__init__)
param_keys = list(sig.parameters.keys())
if param_keys[0] == 'self':
param_keys = param_keys[1:]
# remove props not in the parameter list of the class
filtered_prop_dict = {
k: custom_class_yaml_loader(v)
for k, v in prop_dict.items() if k in param_keys
}
Pmdlogging.info(
f'{class_name} object successfully loaded from the YAML file.')
return the_class(**filtered_prop_dict)
# Custom method to load values from the yaml config file
def custom_class_yaml_loader(v: Any) -> Any:
return_value = v
# If value is a list, recursively go through each item in the list
# Specifically, this is for the Lammps procedure list
if isinstance(v, list):
return_value = [custom_class_yaml_loader(i) for i in v]
# If value is a dict, instantiate it to an object
elif isinstance(v, dict):
class_name, props_dict = next(iter(v.items()))
return_value = instantiate_from_cls_name(class_name, props_dict)
# If value is starts with pmd., instantiate it to an object with
# default params
elif isinstance(v, str) and v.startswith(OBJECT_PRFIX):
return_value = instantiate_from_cls_name(v, {})
return return_value
class Pmd:
'''Template object to perform tasks for Systems, Lammps, and Jobs
altogether (e.g. create data files, lammps input files, job scheduler
files, or config files)
Attributes:
system (System): a System object
lammps (Lammps or list[Lammps]): one or a list of Lammps objects
job (Job or list[Job]): one or a list of Job objects
'''
def __init__(
self,
system: Optional[System] = None,
lammps: Optional[Union[Lammps, List[Lammps]]] = None,
job: Optional[Union[Job, List[Job]]] = None,
):
if lammps and not isinstance(lammps, list):
lammps = [lammps]
if job and not isinstance(job, list):
job = [job]
self._system = system
self._lammps = lammps
self._job = job
@build_dir
def create(self,
output_dir: str = '.',
save_config: bool = False,
config_fname: str = 'config.yaml') -> None:
'''Method to create files from all the pmd objects. This method can
can also automatically generate a config file if `save_config` input
argument is set to True.
Parameters:
output_dir (str): Directory for all the generated files; default:
`"."`
save_config (bool): Whether to save a config file; default: `False`
config_fname (str): Name of the config file; default:
`"config.yaml"`
Returns:
None
'''
if self._system:
self._system.write_data(output_dir)
if self._lammps:
for lmp in self._lammps:
lmp.write_lammps(output_dir)
if self._job:
for job in self._job:
job.write_job(output_dir)
if save_config:
self.save_config(output_dir, config_fname)
@build_dir
def save_config(self, output_dir: str, config_fname: str = 'config.yaml'):
'''Method to create a config file with all the details of the System,
Lammps, or Job settings. This method only creates the config file.
Parameters:
output_dir (str): Directory for all the generated files; default:
`"."`
config_fname (str): Name of the config file; default:
`"config.yaml"`
Returns:
None
'''
config_dict = {'pmd.version': pmd.__version__}
if self._system:
config_dict[f'{OBJECT_PRFIX}{self._system}'] = to_yaml_dict(
self._system)
if self._lammps and len(self._lammps) > 1:
for i, lmp in enumerate(self._lammps):
config_dict[f'{OBJECT_PRFIX}{lmp}-{i}'] = to_yaml_dict(lmp)
elif self._lammps and len(self._lammps) == 1:
config_dict[f'{OBJECT_PRFIX}{self._lammps[0]}'] = to_yaml_dict(
self._lammps[0])
if self._job and len(self._job) > 1:
for i, job in enumerate(self._job):
config_dict[f'{OBJECT_PRFIX}{job}-{i}'] = to_yaml_dict(job)
elif self._job and len(self._job) == 1:
config_dict[f'{OBJECT_PRFIX}{self._job[0]}'] = to_yaml_dict(
self._job[0])
with open(os.path.join(output_dir, config_fname), 'w') as yaml_file:
yaml.safe_dump(config_dict, yaml_file, sort_keys=False)
Pmdlogging.info(f'Config file - {config_fname} successfully '
f'saved to {output_dir}')
@staticmethod
def load_config(config_file: str, output_dir: str = '.'):
'''Method to load a config file and create all the objects listed in
the config file
Parameters:
config_file (str): Config file to load
output_dir (str): Directory for all the generated files; default:
`"."`
Returns:
None
'''
if os.path.splitext(config_file)[1] not in SUPPORTED_YAML_EXTS:
raise ValueError(
f'The file you are loading does not seem to be a yaml file'
f'(file must end with {" ,".join(SUPPORTED_YAML_EXTS)})')
with open(config_file) as yaml_file:
yaml_dict = yaml.safe_load(yaml_file)
for k, v in yaml_dict.items():
# do not instantiate an object if it is the version item
if k == 'pmd.version':
if v != pmd.__version__:
Pmdlogging.warning('Config file version does not '
'match your current PMD version')
continue
obj = instantiate_from_cls_name(k, v)
if isinstance(obj, System):
obj.write_data(output_dir)
elif isinstance(obj, Lammps):
obj.write_lammps(output_dir)
elif isinstance(obj, Job):
obj.write_job(output_dir)
|
ritesh001/Polymer-Molecular-Dynamics
|
pmd/core/Pmd.py
|
Pmd.py
|
py
| 8,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pmd.core.System.System",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pmd.core.Builder.Builder",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pmd.core.Lammps.Lammps",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pmd.core.Procedure.Procedure",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pmd.core.Job.Job",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "inspect.getmembers",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pmd.core",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "inspect.isclass",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pmd.core",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "inspect.signature",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pmd.util.Pmdlogging.info",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pmd.util.Pmdlogging",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "pmd.core.System.System",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "pmd.core.Lammps.Lammps",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pmd.core.Job.Job",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pmd.util.build_dir",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "pmd.__version__",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_dump",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pmd.util.Pmdlogging.info",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pmd.util.Pmdlogging",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "pmd.util.build_dir",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "pmd.__version__",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "pmd.util.Pmdlogging.warning",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pmd.util.Pmdlogging",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "pmd.core.System.System",
"line_number": 232,
"usage_type": "argument"
},
{
"api_name": "pmd.core.Lammps.Lammps",
"line_number": 234,
"usage_type": "argument"
},
{
"api_name": "pmd.core.Job.Job",
"line_number": 236,
"usage_type": "argument"
}
] |
73706490427
|
"""Image tools interfaces."""
from nilearn.image import resample_to_img
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
from nipype import logging
from nipype.interfaces.base import (traits, TraitedSpec,
BaseInterfaceInputSpec, SimpleInterface,
File)
LOGGER = logging.getLogger('nipype.interface')
class _ResampleTPMInputSpec(BaseInterfaceInputSpec):
moving_file = File(exists=True,
mandatory=True,
desc='Eroded Tissues probability map file in T1 space')
fixed_file = File(exists=True,
mandatory=True,
desc=' timeseries mask in BOLD space')
class _ResampleTPMOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output Resampled WM file')
class ResampleTPM(SimpleInterface):
"""
Resample all white matter tissue prob mask to BOLD space.
"""
input_spec = _ResampleTPMInputSpec
output_spec = _ResampleTPMOutputSpec
# def _run_interface(self,runtime):
# self._results['out_file'] = resample_WM(
# self.inputs.moving_file,
# self.inputs.fixed_file,
# newpath=runtime.cwd
# )
# return runtime
def _run_interface(self, runtime):
out_file = _TPM_2_BOLD(
self.inputs.moving_file,
self.inputs.fixed_file,
newpath=runtime.cwd,
)
self._results['out_file'] = out_file
return runtime
def _TPM_2_BOLD(moving_file, fixed_file, newpath=None):
"""
Resample the input white matter tissues probability using resample_to_img from nilearn.
"""
out_file = fname_presuffix(moving_file,
suffix='_resampled',
newpath=newpath)
resample_wm = resample_to_img(source_img=moving_file,
target_img=fixed_file,
interpolation='nearest')
resample_wm.to_filename(out_file)
return out_file
|
jerdra/TIGR_PURR
|
bin/resample.py
|
resample.py
|
py
| 2,104 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nipype.logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "nipype.logging",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "nipype.interfaces.base.BaseInterfaceInputSpec",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "nipype.interfaces.base.File",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.File",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.TraitedSpec",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "nipype.interfaces.base.File",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.SimpleInterface",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "nipype.utils.filemanip.fname_presuffix",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nilearn.image.resample_to_img",
"line_number": 61,
"usage_type": "call"
}
] |
11485777017
|
import random
import subprocess
from difflib import SequenceMatcher
from typing import cast
from smellybot.access_control import Everyone, ModPlus
from smellybot.bot_command import BotCommand, SuperCommand
from smellybot.bot_module import BotModule
from smellybot.config.definition import ListConfigDefinition
from smellybot.config.element import ListConfigElement
from smellybot.config.secure_config import Config
from smellybot.config.types.string import CUsername, CString
from smellybot.context import MessageContext
class Owoifier(BotModule):
def __init__(self, config: Config, bot_channel):
super().__init__(config, bot_channel)
self.command_list()
self.auto_targets: ListConfigElement = cast(ListConfigElement, config.register(
ListConfigDefinition("owoifier.auto_targets", ctype=CUsername(), unique=True),
read_access_control=ModPlus()
))
self.endings: ListConfigElement = cast(ListConfigElement, config.register(
ListConfigDefinition("owoifier.endings", ctype=CString(), unique=True),
read_access_control=ModPlus()
))
@classmethod
def name(cls):
return "owoifier"
def command_list(self):
self.add_command(BotCommand(Config("owoify", self.config), self, self.owoify, name="owoify", access_control=Everyone()))
owoifier_command = SuperCommand(
Config("owoifier", self.config),
self,
access_control=ModPlus(),
name="owoifier"
)
target_command = BotCommand(
Config("target", owoifier_command.config),
self,
self.target,
access_control=ModPlus(),
name="target"
)
untarget_command = BotCommand(
Config("untarget", owoifier_command.config),
self,
self.untarget,
access_control=ModPlus(),
name="untarget"
)
owoifier_command.add_subcommand(target_command)
owoifier_command.add_subcommand(untarget_command)
self.add_command(owoifier_command)
async def _handle_message(self, context: MessageContext):
if context.author.username.lower() not in self.auto_targets.get():
return
if context.message.startswith("!"):
return
self.logger.info(f"{context.author.username}: {context.message}")
owo_message = self.owoify_message(context.message)
if not self.message_differs_significantly(context.message, owo_message):
return
owo_message = self.add_ending(owo_message)
await self.bot_channel.send(owo_message)
async def owoify(self, _context: MessageContext, arguments: str, _command: str, _head: str, **_kwargs):
if arguments:
await self.send_owo_message(arguments)
elif self.bot_channel.context.previous_context.message:
await self.send_owo_message(self.bot_channel.context.previous_context.message)
async def target(self, _context: MessageContext, arguments: str, _command: str, _head: str, **_kwargs):
self.auto_targets.add(arguments)
await self.bot_channel.send("Target acquired")
async def untarget(self, _context: MessageContext, arguments: str, _command: str, _head: str, **_kwargs):
self.auto_targets.remove(arguments)
await self.bot_channel.send("We'll get 'em next time")
async def send_owo_message(self, message: str):
owo_message = self.owoify_message(message)
owo_message = self.add_ending(owo_message)
await self.bot_channel.send(owo_message)
def message_differs_significantly(self, original_message: str, owo_message: str):
difference = 1 - SequenceMatcher(None, original_message.strip(), owo_message.strip()).ratio()
return difference > 0.04
def owoify_message(self, message: str):
result = subprocess.run(['owoifier', "-t", message], capture_output=True)
return result.stdout.decode("utf-8")
def add_ending(self, message: str):
separators = [", ", " "]
endings = self.endings.get()
if not endings:
return message
separator = random.choice(separators)
ending = random.choice(endings)
return message.rstrip() + separator + ending
|
schmarcel02/smellybot
|
smellybot/modules/owoifier.py
|
owoifier.py
|
py
| 4,337 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "smellybot.bot_module.BotModule",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "smellybot.config.secure_config.Config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "smellybot.config.element.ListConfigElement",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "smellybot.config.definition.ListConfigDefinition",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "smellybot.config.types.string.CUsername",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.ModPlus",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "smellybot.config.element.ListConfigElement",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "smellybot.config.definition.ListConfigDefinition",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "smellybot.config.types.string.CString",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.ModPlus",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "smellybot.bot_command.BotCommand",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "smellybot.config.secure_config.Config",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.Everyone",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "smellybot.bot_command.SuperCommand",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "smellybot.config.secure_config.Config",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.ModPlus",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "smellybot.bot_command.BotCommand",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "smellybot.config.secure_config.Config",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.ModPlus",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "smellybot.bot_command.BotCommand",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "smellybot.config.secure_config.Config",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "smellybot.access_control.ModPlus",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "smellybot.context.MessageContext",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "smellybot.context.MessageContext",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "smellybot.context.MessageContext",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "smellybot.context.MessageContext",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "difflib.SequenceMatcher",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 114,
"usage_type": "call"
}
] |
9259374186
|
from pca import PCA
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
import scipy.io as scio
from kmeans import KMeans
mat = scio.loadmat('./ExtYaleB10.mat')
Y_test = mat['test']
Y_train = mat['train']
def imageResizing(data):
resizedDatasET = []
for img in data:
resizedDatasET.append(resize(img, (20, 17), mode='constant'))
resizedDatasET = np.array(resizedDatasET)
return resizedDatasET
def imageReshaping(data):
dimension = data.shape[1] * data.shape[2]
return data.reshape(data.shape[0], dimension)
def inputProcessing(data):
X = [];Y = []
for i in range(len(data[0])):
people_count = data[0][i].T
for j in range(len(people_count)):
X.append(people_count[j].T);Y.append(i)
X = np.array(X);Y = np.array(Y)
fig, axis = plt.subplots(figsize=(12,8))
axis.imshow(X[1], cmap='gray')
X = imageResizing(X)
X = imageReshaping(X)
return X, Y
X,Y = inputProcessing(Y_train)
Xtst,Ytst = inputProcessing(Y_test)
# apply KMeans with k = 10
centers, predictedLabels = KMeans(X.T, 10, 10)
# Error
err = 0
for i in range(len(predictedLabels)):
if predictedLabels[i] != Y[i]:
err += 1
print("Clustering Error ratio with Kmeans: ", float(err) / len(predictedLabels))
# PCA with d = 2 and d = 100
pcaarray = [2,100]
for i in pcaarray:
print("For pca with dimensions = " , i)
X = PCA(X.T, i)[-1].T
# Plotting the graph
plt.style.use("classic")
colors = ['b', 'lime', 'c', 'r', 'y', 'm', 'k', 'teal', 'silver', 'aqua']
figure, axis = plt.subplots()
for i in range(10):
nodes = np.array([X[j] for j in range(len(X)) if predictedLabels[j] == i])
axis.scatter(nodes[:, 0], nodes[:, 1], s=16, c=colors[i])
plt.show()
|
nancyagrwal/Machine-Learning
|
Feed FOrward NN/testG.py
|
testG.py
|
py
| 1,799 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scipy.io.loadmat",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "skimage.transform.resize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "kmeans.KMeans",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pca.PCA",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
}
] |
21992599596
|
import collections
import numpy as np
from scipy.signal import butter
class ButterFilter(object):
""" Implements butterworth low-pass filter.
Based on https://github.com/google-research/motion_imitation/blob/master/motion_imitation/robots/action_filter.py
"""
def __init__(self, sampling_rate, action_size, highcut = [4.0]):
self.action_size = action_size
self.sampling_rate = sampling_rate
self.highcut = highcut
self.lowcut = [0.0]
self.order = 2
a_coeffs = []
b_coeffs = []
for i, h in enumerate(self.highcut):
b, a = self.butter_filter_coefficients(h, sampling_rate, self.order)
b_coeffs.append(b)
a_coeffs.append(a)
if isinstance(a, list):
self.a = a
self.b = b
else:
self.a = [a]
self.b = [b]
# Normalize by a[0]
for i in range(len(self.a)):
self.b[i] /= self.a[i][0]
self.a[i] /= self.a[i][0]
# Convert single filter to same format as filter per joint
if len(self.a) == 1:
self.a *= action_size
self.b *= action_size
self.a = np.stack(self.a)
self.b = np.stack(self.b)
assert len(self.b[0]) == len(self.a[0]) == self.order + 1
self.hist_len = self.order
self.yhist = collections.deque(maxlen=self.hist_len)
self.xhist = collections.deque(maxlen=self.hist_len)
self.reset()
def reset(self):
self.yhist.clear()
self.xhist.clear()
for _ in range(self.hist_len):
self.yhist.appendleft(np.zeros((self.action_size, 1)))
self.xhist.appendleft(np.zeros((self.action_size, 1)))
def filter(self, x):
xs = np.concatenate(list(self.xhist), axis=-1)
ys = np.concatenate(list(self.yhist), axis=-1)
y = np.multiply(x, self.b[:, 0]) + np.sum(
np.multiply(xs, self.b[:, 1:]), axis=-1) - np.sum(
np.multiply(ys, self.a[:, 1:]), axis=-1)
self.xhist.appendleft(x.reshape((self.action_size, 1)).copy())
self.yhist.appendleft(y.reshape((self.action_size, 1)).copy())
return y
def init_history(self, x):
x = np.expand_dims(x, axis=-1)
for i in range(self.hist_len):
self.xhist[i] = x
self.yhist[i] = x
def butter_filter_coefficients(self, highcut, fs, order=5):
nyq = 0.5 * fs
high = highcut / nyq
b, a = butter(order, [high], btype='low')
return b, a
|
bit-bots/deep_quintic
|
deep_quintic/butter_filter.py
|
butter_filter.py
|
py
| 2,574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.stack",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 78,
"usage_type": "call"
}
] |
9180027330
|
import argparse
import base64
try:
from http.server import BaseHTTPRequestHandler
except ImportError:
# Python 2.x compatibility hack.
from BaseHTTPServer import BaseHTTPRequestHandler
import os
import os.path
try:
from socketserver import TCPServer
if os.name != 'nt':
from socketserver import UnixStreamServer
except ImportError:
# Python 2.x compatibility hack.
from SocketServer import TCPServer
if os.name != 'nt':
from SocketServer import UnixStreamServer
import random
import socket
import sys
import time
class Handler(BaseHTTPRequestHandler):
"""Handlers for testing HTTP server."""
auth = False
not_found = False
simulate_timeout = False
filename = None
redirect = None
valid_headers = [
b'Basic ' + base64.b64encode('foo:bar'.encode('ascii')), b'Bearer TOKEN'
]
def do_HEAD(self): # pylint: disable=invalid-name
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self): # pylint: disable=invalid-name
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Bazel\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self): # pylint: disable=invalid-name
if not self.client_address:
# Needed for Unix domain connections as the response functions
# fail without this being set.
self.client_address = 'localhost'
if self.simulate_timeout:
while True:
time.sleep(1)
if self.not_found:
self.send_response(404)
self.end_headers()
return
if self.redirect is not None:
self.send_response(301)
self.send_header('Location', self.redirect)
self.end_headers()
return
if not self.auth:
self.do_HEAD()
self.serve_file()
return
auth_header = self.headers.get('Authorization', '').encode('ascii')
if auth_header in self.valid_headers:
self.do_HEAD()
self.serve_file()
else:
self.do_AUTHHEAD()
self.wfile.write(
'Bad authorization header: {}'.format(auth_header).encode('ascii')
)
def serve_file(self):
path_to_serve = self.path[1:]
if self.filename is not None:
path_to_serve = self.filename
to_serve = os.path.join(os.getcwd(), path_to_serve)
with open(to_serve, 'rb') as file_to_serve:
self.wfile.write(file_to_serve.read())
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--unix_socket', action='store')
parser.add_argument('mode', type=str, nargs='?')
parser.add_argument('target', type=str, nargs='?')
args = parser.parse_args(argv)
if args.mode:
if args.mode == 'always' and args.target:
Handler.filename = args.target
elif args.mode == 'redirect' and args.target:
Handler.redirect = args.target
elif args.mode == '404':
Handler.not_found = True
elif args.mode == 'timeout':
Handler.simulate_timeout = True
elif args.mode == 'auth':
Handler.auth = True
if args.target:
Handler.filename = args.target
httpd = None
if args.unix_socket:
httpd = UnixStreamServer(args.unix_socket, Handler)
sys.stderr.write('Serving forever on %s.\n' % args.unix_socket)
else:
port = None
while port is None:
try:
port = random.randrange(32760, 59760)
httpd = TCPServer(('', port), Handler)
except socket.error:
port = None
sys.stdout.write('%d\nstarted\n' % (port,))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.write('Serving forever on %d.\n' % port)
try:
httpd.serve_forever()
finally:
sys.stderr.write('Goodbye.\n')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bazelbuild/bazel
|
src/test/shell/bazel/testing_server.py
|
testing_server.py
|
py
| 3,746 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
[
{
"api_name": "os.name",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.name",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "BaseHTTPServer.BaseHTTPRequestHandler",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "base64.b64encode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "SocketServer.UnixStreamServer",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "SocketServer.TCPServer",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "socket.error",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.close",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 138,
"usage_type": "attribute"
}
] |
17378765276
|
import cv2
import numpy as np
import argparse
import os
from PIL import Image
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
# condition1dls pixel 검은색 확인
def pixel_is_black(arr,x,y):
if arr[x,y] ==1:
return True
return False
#condtion2 2개에서 6개의 검은 픽셀 가짐?
def pixel_has_2_to_6_black_neighbors(arr,x,y):
if(2<=arr[x, y-1] + arr[x+1, y-1] + arr[x+1, y] + arr[x+1, y+1] +
arr[x, y+1] + arr[x-1, y+1] + arr[x-1, y] + arr[x-1, y-1] <= 6):
return True
return False
#condition3 transition확인
def pixel_has_1_white_to_black_neighbor_transition(arr,x,y):
neighbors = [arr[x, y - 1], arr[x + 1, y - 1], arr[x + 1, y], arr[x + 1, y + 1],
arr[x, y + 1], arr[x, y + 1], arr[x - 1, y], arr[x - 1, y - 1],
arr[x, y - 1]]
transitions = sum((a, b) == (0, 1) for a, b in zip(neighbors, neighbors[1:]))
if transitions == 1:
return True
return False
#condition4 p2,
def at_least_one_of_P2_P4_P6_is_white(arr, x, y):
if (arr[x, y - 1] and arr[x + 1, y] and arr[x, y + 1]) == False:
return True
return False
#condition5
def at_least_one_of_P4_P6_P8_is_white(arr, x, y):
if (arr[x + 1, y] and arr[x, y + 1] and arr[x - 1, y]) == False:
return True
return False
#condition4 for step two
def at_least_one_of_P2_P4_P8_is_white(arr, x, y):
if (arr[x, y - 1] and arr[x + 1, y] and arr[x - 1, y]) == False:
return True
return False
def at_least_one_of_P2_P6_P8_is_white(arr, x, y):
if (arr[x, y - 1] and arr[x, y + 1] and arr[x - 1, y]) == False:
return True
return False
def main():
dirname = 'C:/Users/oeunju/Downloads/1500-1700'
filenames = os.listdir(dirname)
for i in range(486,1000, 1):
dirname2= dirname +'/'+ str(i)
if not os.path.exists(dirname2):
exit()
filenames =os.listdir(dirname2)
for filename in filenames:
full_filename =os.path.join(dirname2, filename)
print(filename)
img = cv2.imread(full_filename, 0)
retval, orig_thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)
bin_thresh = (orig_thresh == 0).astype(int)
thinned_thresh = bin_thresh.copy()
while 1:
# make a copy of the thinned threshold array to check for changes
thresh_copy = thinned_thresh.copy()
# step one
pixels_meeting_criteria = []
# check all pixels except for border and corner pixels
# if a pixel meets all criteria, add it to pixels_meeting_criteria list
for i in range(1, thinned_thresh.shape[0] - 1):
for j in range(1, thinned_thresh.shape[1] - 1):
if (pixel_is_black(thinned_thresh, i, j) and
pixel_has_2_to_6_black_neighbors(thinned_thresh, i, j) and
pixel_has_1_white_to_black_neighbor_transition(thinned_thresh, i, j) and
at_least_one_of_P2_P4_P6_is_white(thinned_thresh, i, j) and
at_least_one_of_P4_P6_P8_is_white(thinned_thresh, i, j)):
pixels_meeting_criteria.append((i, j))
# change noted pixels in thinned threshold array to 0 (white)
for pixel in pixels_meeting_criteria:
thinned_thresh[pixel] = 0
# step two
pixels_meeting_criteria = []
# check all pixels except for border and corner pixels
# if a pixel meets all criteria, add it to pixels_meeting_criteria list
for i in range(1, thinned_thresh.shape[0] - 1):
for j in range(1, thinned_thresh.shape[1] - 1):
if (pixel_is_black(thinned_thresh, i, j) and
pixel_has_2_to_6_black_neighbors(thinned_thresh, i, j) and
pixel_has_1_white_to_black_neighbor_transition(thinned_thresh, i, j) and
at_least_one_of_P2_P4_P8_is_white(thinned_thresh, i, j) and
at_least_one_of_P2_P6_P8_is_white(thinned_thresh, i, j)):
pixels_meeting_criteria.append((i, j))
# change noted pixels in thinned threshold array to 0 (white)
for pixel in pixels_meeting_criteria:
thinned_thresh[pixel] = 0
# if the latest iteration didn't make any difference, exit loop
if np.all(thresh_copy == thinned_thresh) == True:
break
# convert all ones (black pixels) to zeroes, and all zeroes (white pixels) to ones
thresh = (thinned_thresh == 0).astype(np.uint8)
# convert ones to 255 (white)
thresh *= 255
dirname_simple = dirname2[-3:]
# display original and thinned images
# cv2.imshow('original image', orig_thresh)
# cv2.imshow('thinned image', thresh)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
cv2.imwrite('C:/Users/oeunju/Desktop/1500-1700/'+dirname_simple+filename, thresh)
if __name__ == '__main__':
main()
|
Leegunmin/RecognizeKorean
|
zaung_shen.py
|
zaung_shen.py
|
py
| 5,571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.all",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 124,
"usage_type": "call"
}
] |
73036280829
|
import pandas as pd
from numpy.random import RandomState
from sklearn import preprocessing
# Read Data
data = pd.read_csv("/Users/yazen/Desktop/datasets/PimaDiabetes/pima.csv")
# Label columns
data.columns = ["pregnancy", "plasma/glucose concentration", "blood pressure","tricep skin fold thickness", "serum insulin", "body mass index", "diabetes pedigree function", "age", "label"]
# Remove rows with missing data
data = data.loc[data["plasma/glucose concentration"] > 20]
data = data.loc[data["blood pressure"] > 60]
data = data.loc[data["body mass index"] > 20]
# Under sample negative rows
negative = data.loc[data["label"] < 1].sample(frac=0.5, random_state=RandomState())
positive = data.loc[data["label"] > 0]
neutral = positive.append(negative)
# Normalize data
min_max_scaler = preprocessing.MinMaxScaler()
neutral = min_max_scaler.fit_transform(neutral)
neutral = pd.DataFrame(neutral,columns = data.columns)
# Create test and training set
train = neutral.sample(frac = .7, random_state=RandomState())
test = neutral.loc[~neutral.index.isin(train.index)]
train.to_csv('/Users/yazen/Desktop/datasets/PimaDiabetes/train.csv')
test.to_csv('/Users/yazen/Desktop/datasets/PimaDiabetes/test.csv')
|
yazsh/PimaDiabetesPrediction
|
PimaDataCleaning.py
|
PimaDataCleaning.py
|
py
| 1,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 27,
"usage_type": "call"
}
] |
16376172760
|
""" Basic commands and practice of Selenium library."""
import os
import time
from dotenv import load_dotenv
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
load_dotenv()
chrome_driver_path = os.environ.get("DRIVER_PATH")
driver = webdriver.Chrome(executable_path=chrome_driver_path)
# # Scrapping data from wikipedia and searching something
# driver.get("https://en.wikipedia.org/wiki/Main_Page")
# # Getting article count
# article_count = driver.find_element_by_css_selector("#articlecount a")
# print(article_count.text)
# # Visit the link
# article_count.click()
# # Locate search bar and send data
# search_bar = driver.find_element_by_name("search")
# search_bar.send_keys("Python")
# search_bar.send_keys(Keys.ENTER)
# Web page Automatic registration
driver.get("http://secure-retreat-92358.herokuapp.com/")
# Get the input boxes
first_box = driver.find_element_by_name("fName")
second_box = driver.find_element_by_name("lName")
third_box = driver.find_element_by_name("email")
# Populate data
first_box.send_keys("Agapito")
second_box.send_keys("Ramirez")
third_box.send_keys("[email protected]")
time.sleep(3)
# Find button and send data
send_button = driver.find_element_by_css_selector(".form-signin button")
send_button.click()
time.sleep(4)
driver.quit()
|
FstRms/selenium-basics
|
example_automation.py
|
example_automation.py
|
py
| 1,316 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 47,
"usage_type": "call"
}
] |
37048825490
|
#!/usr/bin/env python
# coding: utf-8
import itertools
import os
import re
from io import open
import yaml
from jinja2 import Template
HERE = os.path.abspath(os.path.dirname(__file__))
def read(fname):
with open(os.path.join(HERE, "..", fname), "r") as fd:
return fd.read()
def write(content, fname):
with open(os.path.join(HERE, "..", fname), "w") as fd:
fd.write(content)
def generate_pipeline_name(env_value):
images = re.findall(r"\.*IMAGE=(.*?)(?!\S)", env_value, re.DOTALL)
return "_".join(image.replace(":", "") for image in images)
def generate_pipeline_variables(env_value):
variables = {}
for key_value in env_value.split():
key, value = key_value.split("=")
variables[key] = value
return variables
def generate_pipelines():
"""Parse command-line arguments and execute bumpversion command."""
env = yaml.safe_load(read(".ci/env.yml"))
iterables = [[f"{key}={value}" for value in values] for key, values in env.items()]
env_list = [" ".join(t) for t in itertools.product(*iterables)]
pipelines = [
{
"env": env_value,
"name": generate_pipeline_name(env_value),
"variables": generate_pipeline_variables(env_value),
}
for env_value in env_list
]
write(
Template(read(".ci/travis.yml.j2")).render(pipelines=pipelines), ".travis.yml"
)
if __name__ == "__main__":
generate_pipelines()
|
itsolutionsfactory/dbcut
|
scripts/generate-ci-pipelines.py
|
generate-ci-pipelines.py
|
py
| 1,469 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "io.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "jinja2.Template",
"line_number": 53,
"usage_type": "call"
}
] |
34407258832
|
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv3d(in_channels=1, out_channels=64,
kernel_size=(3, 5, 5), stride=(1, 1, 1), bias=False)
self.max1 = nn.MaxPool3d(kernel_size=(2,2,2), stride=(2,2,2))
self.conv2 = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=(1, 1, 1), bias=False)
self.conv3 = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(1, 3, 3), stride=(1, 1, 1), bias=False)
#self.fc1 = nn.Linear(64,150)
self.fc1 = nn.Conv3d(in_channels=64, out_channels=150, kernel_size=(2, 2, 2), stride=(1, 1, 1), bias=False)
#self.fc2 = nn.Linear(150, 2)
self.fc2 = nn.Conv3d(in_channels=150, out_channels=2, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)
def forward(self, x):
#print('Input: ',x.shape)
x = self.conv1(x)
x = F.relu(x)
print('After conv 1: ',x.shape)
x = self.max1(x)
print('After max pool 1: ', x.shape)
x = self.conv2(x)
x = F.relu(x)
print('After conv 2: ',x.shape)
x = self.conv3(x)
x = F.relu(x)
print('After conv 3: ', x.shape)
# x = x.reshape(x.size(0), -1)
# #print('After reshape :', x.shape)
x = self.fc1(x)
x = F.relu(x)
print('After full conv 1: ', x.shape)
x = self.fc2(x)
x = F.relu(x)
print('After full conv 2: ', x.shape)
return x
import train_val_split
import torch.optim as optim
from torch.autograd import Variable
train_dset, val_dset, test_dset = train_val_split.train_test_split()
model = CNN().cuda()
model.train()
for i, (images, labels) in enumerate(train_dset):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs = model(images)
break
|
kishanbala/BrainLesionSegmentation
|
cmb_3dcnn/build_screening_stage.py
|
build_screening_stage.py
|
py
| 2,008 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool3d",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "train_val_split.train_test_split",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 62,
"usage_type": "call"
}
] |
37126840757
|
import matplotlib.pyplot as plt
import numpy as np
import time
def plot_voxel(voxels, filename):
start = time.time()
colors = np.where(voxels, "blue", "red")
fig = plt.figure()
ax = fig.gca(projection='3d')
template = np.ones(voxels.shape, dtype=object)
ax.voxels(template, facecolors=colors, edgecolor='k')
ax.set(xlabel='x', ylabel='y', zlabel='z')
# plt.show()
plt.savefig(f'processed/mesh_image/{filename}.png')
fig = plt.figure()
ax = fig.gca(projection='3d')
temp = np.where(voxels, False, True)
ax.voxels(temp, facecolors=colors, edgecolor='k')
ax.set(xlabel='x', ylabel='y', zlabel='z')
plt.savefig(f'processed/mesh_image/{filename}_pole.png')
print("ploting time:", time.time()-start)
|
born9507/Prediction-of-E-using-CNN
|
src/data/plot.py
|
plot.py
|
py
| 782 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
}
] |
36062246358
|
from django.urls import path
from .views import addtask, mark_as_done, mark_as_undone, edit, delete_task
urlpatterns = [
# adding a task
path('addtask/', addtask, name='addtask'),
# mark as done task
path('mark_as_done/<int:pk>/', mark_as_done, name='mark_as_done'),
# mark as undone task
path('mark_as_undone/<int:pk>/', mark_as_undone, name='mark_as_undone'),
# edit task
path('edit/<int:pk>/', edit, name='edit'),
# delete task
path('delete/<int:pk>/', delete_task, name='delete'),
]
|
shaikmoinuddin/todo_django
|
todo_app/urls.py
|
urls.py
|
py
| 530 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.addtask",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.mark_as_done",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.mark_as_undone",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.edit",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.delete_task",
"line_number": 15,
"usage_type": "argument"
}
] |
40428611431
|
#!/usr/bin/env python3
"""
Name: cluster_create_update_all.py
Description: Create/update clusters defined in ``--yaml`
"""
import argparse
from netbox_tools.common import netbox, load_yaml
from netbox_tools.cluster import Cluster
OUR_VERSION = 101
def get_parser():
"""
return an argparse parser object
"""
help_yaml = "YAML file containing cluster type information."
ex_prefix = "Example: "
ex_yaml = f"{ex_prefix} --yaml ./clusters.yml"
parser = argparse.ArgumentParser(
description="DESCRIPTION: Create/update Netbox clusters defined in ``--yaml``"
)
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
mandatory.add_argument(
"--yaml", dest="yaml", required=True, help=help_yaml + ex_yaml
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {OUR_VERSION}"
)
return parser.parse_args()
cfg = get_parser()
netbox_obj = netbox()
info = load_yaml(cfg.yaml)
for key in info["clusters"]:
c = Cluster(netbox_obj, info["clusters"][key])
c.create_or_update()
|
allenrobel/netbox-tools
|
scripts/cluster_create_update_all.py
|
cluster_create_update_all.py
|
py
| 1,091 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "netbox_tools.common.netbox",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "netbox_tools.common.load_yaml",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "netbox_tools.cluster.Cluster",
"line_number": 43,
"usage_type": "call"
}
] |
8097009121
|
"""
Image utility functions.
"""
import PIL.Image
import PIL.ImageChops
import numpy
def equalize(image, levels=256, grayscale=False):
"""
Equalizes an image such that the darkest pixels become black, the lightest become white, and others are based on
their percentile. If a pixel is brighter than 25% of the other pixels, it will be 25% grey in the output.
If the image has multiple channels, they will be processed separately and merged into a new image.
If the image only has one color, the return image will be 50% gray.
:param image: Source image
:param levels: Number of grayscale levels. If this is less than 256, there will be different discrete bands in
the output image.
:param grayscale: If True, the image is forced to grayscale rather than splitting bands.
:return: A new equalized image.
"""
if image.mode != 'L':
if not grayscale:
# merge requires a list (not a generator), so this comprehension produces a list instead of a generator.
return PIL.Image.merge(image.mode, [equalize(band, levels) for band in image.split()])
image = image.convert('L')
histogram = image.histogram()
# Compute divisor
divisor = (
(image.width * image.height) # Total number of pixels
- next(filter(None, reversed(histogram)))
# Minus the last nonzero amount, otherwise it won't turn out white
) / (levels - 1) # Divided by levels, which effectively multiplies them in the rounding phase.
if not divisor:
return PIL.Image.new('L', image.size, 127)
# Multiplier to scale back up after dividing.
multiplier = 255 / (levels - 1)
# Generate remap table
remap = []
pixels = 0
for count in histogram:
remap.append(max(0, min(255, round(round(pixels / divisor) * multiplier))))
pixels += count
# Apply to image.
return PIL.Image.eval(image, remap.__getitem__) # lambda x: remap[x] but faster
def convert(image, mode):
"""
Equivalent to image.convert(mode), except returns the source image if already in that mode.
:param image: Source image
:param mode: Desired mode
:return: Image in desired mode
"""
if image.mode != mode:
image = image.convert(mode)
return image
def score(composite, image, exponent=1):
"""
Determines how a particular image scores against a composite. Lower scores indicate a closer match.
:param composite: The composite reference
:param image: The image being scored.
:param exponent: Arbitrary exponent to make a large difference in a small area more significant than a small
difference in a large one.
:return: An arbitrary score value where 0 is a perfect match and (255**exponent)*numchannels is the theoretical
upper bound.
"""
diff = PIL.ImageChops.difference(composite, image)
if composite.mode != 'L':
return sum(sum(c**exponent for c in x) for x in diff.getdata()) / (diff.width * diff.height)
# return
return sum(x**exponent for x in diff.getdata(0)) / (diff.width * diff.height)
def numpify(image):
# result = numpy.frombuffer(image.tobytes(), dtype=numpy.uint8)
# return result.reshape((*image.size, 3))
# return (
# numpy.array(image, dtype=numpy.uint8).reshape((image))
# # .frombuffer(image.tobytes(), dtype=numpy.uint8)
# # .reshape((image.size[0], image.size[1], -1))
# # .transpose((1, 0, 2))
# )
return numpy.array(image, dtype=numpy.uint8)
|
dewiniaid/sigsolve
|
sigsolve/imageutil.py
|
imageutil.py
|
py
| 3,598 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.Image.merge",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image.new",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image.eval",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ImageChops.difference",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ImageChops",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 99,
"usage_type": "attribute"
}
] |
21367680266
|
import serial
import serial.tools.list_ports as lp
class serialPort():
def __init__(self) -> None:
self.ser = serial.Serial()
self.timeout = None # specify timeout when using readline()
self.ports = lp.comports()
def connectPort(self, port_name, baudrate=115200):
self.ser.port = port_name # "/dev/cu.usbmodem14101" # 'COM3' # Arduino serial port
self.ser.baudrate = baudrate
self.ser.timeout = self.timeout # specify timeout when using readline()
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_ONE
# self.ser.bytesize = serial.EIGHTBITS
try:
self.ser.open()
return self.ser.is_open
except serial.serialutil.SerialException:
return False
# self.ser.reset_input_buffer()
# self.ser.write(str.encode('1\r\n', 'UTF-8'))
def disconnectPort(self):
self.ser.close()
return
|
PhysiologicAILab/PhysioKit
|
utils/devices.py
|
devices.py
|
py
| 1,003 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.comports",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "serial.STOPBITS_ONE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "serial.serialutil",
"line_number": 20,
"usage_type": "attribute"
}
] |
73939813946
|
import mira
assert mira.__file__ == '/liulab/alynch/projects/multiomics/BatchEffect/MIRA/mira/__init__.py'
from scipy import sparse
import shutil
import frankencell as fc
import scanpy as sc
from .utils import read_and_process, plot_umaps
import os
import optuna
def run_mira(dynframe, out_h5, plot_file, threads = 1):
shutil.copy(dynframe, out_h5)
data = read_and_process(out_h5)
data.layers['sparse_counts'] = sparse.csr_matrix(data.layers['counts'])
model = mira.topics.TopicModel(
*data.shape,
feature_type='expression',
exogenous_key='highly_variable',
counts_layer='sparse_counts',
categorical_covariates='batch',
cost_beta = 2.
)
model.set_learning_rates(3e-3, 0.25)
def faux_print(*x):
return 'Trial completed.'
mira.topic_model.trainer._print_study = faux_print
train_data = dynframe+'_train'
test_data = dynframe+'_test'
if os.path.isdir(train_data):
shutil.rmtree(train_data)
shutil.rmtree(test_data)
train, test = mira.topics.SpeedyTuner.train_test_split(data,
train_size=0.8,
stratify=data.obs_vector('batch'), seed = 0
)
model.write_ondisk_dataset(train, dirname= train_data)
model.write_ondisk_dataset(test, dirname= test_data)
del train, test
try:
optuna.delete_study(
storage = 'sqlite:///mira-BENCHMARKING.db',
study_name = dynframe
)
except KeyError:
pass
tuner = mira.topics.SpeedyTuner(
model = model,
save_name = dynframe,
min_topics = 3,
max_topics = 10,
seed = 2556,
min_trials = 32,
max_trials = 64,
n_jobs = threads,
stop_condition = 8,
storage = 'sqlite:///mira-BENCHMARKING.db',
)
tuner.fit(train_data, test_data)
model = tuner.fetch_best_weights()
model.predict(data)
model.get_umap_features(data, box_cox=0.33)
sc.pp.neighbors(data, use_rep='X_umap_features', metric='manhattan')
sc.tl.umap(data, min_dist=0.1)
plot_umaps(data, plot_file)
fc.add_dimred_prior(out_h5, data.obsm['X_umap_features'])
def main(args):
run_mira(
args.dynframe,
args.outh5,
args.plotfile,
threads = args.threads,
)
def add_arguments(parser):
parser.add_argument('dynframe', type = str)
parser.add_argument('outh5', type = str)
parser.add_argument('plotfile', type = str)
parser.add_argument('--threads','-t', type = int, default = 1)
|
AllenWLynch/CODA-reproduction
|
disentangler/frankencell/dimred_methods/disentangler.py
|
disentangler.py
|
py
| 2,566 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "mira.__file__",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.read_and_process",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mira.topics.TopicModel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mira.topics",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mira.topic_model",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "mira.topics.SpeedyTuner.train_test_split",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "mira.topics",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "optuna.delete_study",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "mira.topics.SpeedyTuner",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "mira.topics",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "scanpy.pp.neighbors",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "scanpy.pp",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "scanpy.tl.umap",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "scanpy.tl",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "utils.plot_umaps",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "frankencell.add_dimred_prior",
"line_number": 86,
"usage_type": "call"
}
] |
28672716901
|
from trello import TrelloClient, util
from atlassian import Confluence
from os import listdir, path
import pystache
import datetime
import json
from traceback import print_exc
from time import sleep
from re import sub
try:
keys = {}
if path.exists('.keys'):
with open('.keys') as f:
keys = json.load(f)
url = keys.get('url') or input('Confluence URL:').strip()
email = keys.get('email') or input('Email address:').strip()
api_key = keys.get('api_key') or input('API Key for Atlassian (https://id.atlassian.com/manage/api-tokens):').strip()
confluence = Confluence(
url=url,
username=email,
password=api_key
)
parent = keys.get('parent') or int(input("Parent page ID:").strip())
parent_page = confluence.get_page_by_id(parent)
while not isinstance(parent_page, dict):
email = input('Email address:').strip()
api_key = input('API Key for Atlassian (https://id.atlassian.com/manage/api-tokens):').strip()
confluence = Confluence(
url=url,
username=email,
password=api_key
)
parent_page = confluence.get_page_by_id(parent)
while not input(f"Create page under {parent_page['title']}? [y/n]:").strip().lower().startswith('y'):
space = input("Confluence Space ID:").strip()
parent = input("Parent page ID:").strip()
parent_page = confluence.get_page_by_id(parent)
boards = None
while not boards:
trello_api_key = keys.get('trello_api_key') or input("Trello API Key (https://trello.com/app-key):").strip()
trello_api_secret = keys.get('trello_api_secret') or input("Trello API Secret (https://trello.com/app-key):").strip()
if 'oauth_token' not in keys or 'oauth_token_secret' not in keys:
try:
oauth_result = util.create_oauth_token('never', 'read,write', trello_api_key, trello_api_secret)
keys['oauth_token'] = oauth_result['oauth_token']
keys['oauth_token_secret'] = oauth_result['oauth_token_secret']
except:
try:
del keys['trello_api_key']
del keys['trello_api_secret']
except:
pass
oauth_token = keys.get('oauth_token')
oauth_token_secret = keys.get('oauth_token_secret')
trello = TrelloClient(
api_key=trello_api_key,
api_secret=trello_api_secret,
token=oauth_token,
token_secret=oauth_token_secret
)
try:
boards = trello.list_boards()
with open('.keys', 'w') as f:
json.dump({
"url": url,
"email": email,
"api_key": api_key,
"trello_api_key": trello_api_key,
"trello_api_secret": trello_api_secret,
"parent": parent,
"oauth_token": oauth_token,
"oauth_token_secret": oauth_token_secret
}, f)
except:
del keys['oauth_token']
del keys['oauth_token_secret']
print("\n\nPlease select a board:")
for i, board in enumerate(boards):
print(f"{board.name} - {i+1}")
board_index = int(input("id [1]: ").strip() or 1)
board = boards[board_index - 1]
print(f"\nSelected board {board.name}")
columns = board.get_lists(None)
templates = listdir('templates')
print("\n\nPlease select the template for the page")
for i, template in enumerate(templates):
print(f"{template} - {i+1}")
template_index = int(input("\nSelect template to use [1]:").strip() or 1)
template_filename = path.join("templates", templates[template_index - 1])
print("\n\nPlease select relevant columns")
for i, column in enumerate(columns):
print(f"{column.name} - {i+1}")
config = {}
if path.exists('columns.json'):
with open('columns.json') as f:
config = json.load(f)
column_config = config.get(template_filename, {})
done = False
column_index = 0
if column_config:
print("\n\nCurrent column configuration is:")
for name, col in column_config.items():
print(f"{columns[col].name} => {name}")
done = (input("\nKeep this configuration? [y]:").strip() or 'y').lower().startswith('y')
if not done:
column_config = {}
if not done:
print("\n\n")
while not done and column_index < len(columns):
column_or_done = input(f'Select a column or type n to stop [{column_index + 1}]:').strip()
if column_or_done.startswith('n'):
break
column_index = int(column_or_done or (column_index + 1))
if column_index > len(columns):
print(f"Column {column_index} does not exist!")
continue
column_name = sub('[^a-z0-9]+', '_', columns[column_index - 1].name.lower())
column_name = input(f"Select a name for the column [{column_name}]:").strip() or column_name
column_config[column_name] = column_index - 1
config[template_filename] = column_config
with open('columns.json', 'w') as f:
json.dump(config, f)
data = {k: columns[i].list_cards() for k, i in column_config.items()}
with open(template_filename) as f:
body = pystache.render(f.read(), data)
print("\n\n####################################################################\n\n")
print(body)
ok = input("\nDoes this look good? y/n [n]:").strip() or 'n'
if not ok.lower().startswith('y'):
print("\n\nPlease start again")
else:
all_cards = [c for v in data.values() for c in v]
if all_cards:
today = datetime.date.today()
title = f"{today.strftime('%d %B %Y')} - {today.strftime('%A')} Retrospective" #TODO: Make this more generic
title = input(f"\n\nSelect a page title [{title}]: ").strip() or title
confluence.create_page(
space=parent_page['space']['key'],
title=title,
parent_id=parent,
body=body,
representation='wiki'
)
else:
print("\n\nNo cards to add to page")
for card in all_cards:
card.set_closed(True)
except:
print_exc()
sleep(2)
input("\n\nPress enter to close")
|
iain-neirfeno/trello-to-confluence
|
create_confluence_page.py
|
create_confluence_page.py
|
py
| 6,643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "atlassian.Confluence",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "atlassian.Confluence",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "trello.util.create_oauth_token",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "trello.util",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "trello.TrelloClient",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "trello.list_boards",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pystache.render",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 180,
"usage_type": "call"
}
] |
2235060581
|
import xml.etree.ElementTree as ET
import subprocess
import os
import glob
import time
def clonerep(url):
name = url.split("/")[-1].split(".")[0]
os.system("git"+ " clone " + "https://github.com/" + url + " repos/" + name + "/" )
def insertIntoPom(repdir):
# ET.register_namespace("", "http://maven.apache.org/POM/4.0.0")
# tree = ET.parse("apollo/pom.xml")
# plugs = tree.findall("./{http://maven.apache.org/POM/4.0.0}build/{http://maven.apache.org/POM/4.0.0}plugins")
# cloverplug = ET.fromstring("<plugin> <groupId>org.openclover</groupId> <artifactId>clover-maven-plugin</artifactId> <version>4.2.0</version> <configuration> <generateXml>true</generateXml> </configuration> </plugin>")
# if len(plugs) != 0:
# plugs[0].insert(0, cloverplug)
# tree.write("pom.xml")
# stre = "<plugin> <groupId>org.openclover</groupId> <artifactId>clover-maven-plugin</artifactId> <version>4.2.0</version> <configuration> <generateXml>true</generateXml> </configuration> </plugin>"
stre = "<plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>2.7</version> <configuration> <formats> <format>html</format> <format>xml</format> </formats><aggregate>true</aggregate> </configuration> </plugin>"
fileHandle = open ( repdir + '/pom.xml',"r")
lines = fileHandle.readlines()
fileHandle.close()
lastlineind = len(lines) - 1
idd = 0
i = 0
alreadyHas = False
alreadyI = 0
alreadyIndex = 0
for line in lines:
if (line.strip() == "<artifactId>cobertura-maven-plugin</artifactId>"):
alreadyIndex = alreadyI
break
alreadyI += 1
for line in lines:
if (line.strip() == "<plugin>"):
idd = i
break
i += 1
if alreadyIndex:
lines.insert(alreadyIndex, "<configuration> <formats> <format>html</format> <format>xml</format> </formats><aggregate>true</aggregate> </configuration>")
fileHandle = open(repdir + "/pom.xml", "w")
contents = "".join(lines)
fileHandle.write(contents)
fileHandle.close()
elif idd != 0:
lines.insert(idd, stre)
fileHandle = open(repdir + "/pom.xml", "w")
contents = "".join(lines)
fileHandle.write(contents)
fileHandle.close()
else:
projend = 0
j = 0
#plugins tag not found so append to end
for line in lines:
if (line.strip() == "</project>"):
projend = j
break
j += 1
#projend -= 1
# lines.insert(projend, "<build><plugins><plugin> <groupId>org.openclover</groupId> <artifactId>clover-maven-plugin</artifactId> <version>4.2.0</version> <configuration> <generateXml>true</generateXml> </configuration> </plugin> </plugins> </build>")
lines.insert(projend, "<build><plugins><plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>2.7</version> <configuration> <formats> <format>html</format> <format>xml</format> </formats> <aggregate>true</aggregate></configuration> </plugin> </plugins> </build>")
fileHandle = open(repdir + "/pom.xml", "w")
contents = "".join(lines)
fileHandle.write(contents)
fileHandle.close()
# print(contents)
# runs cobertura
def runcov(repdir):
os.chdir("repos/" + repdir + "/")
subprocess.call(["mvn", "cobertura:cobertura", "-Dlicense.skip=true"])
# subprocess.run(["mvn", "clean" ,"clover:setup" ,"test" ,"clover:aggregate" ,"clover:clover"])
os.chdir("../..")
def getAllCovXML(repdir):
covXMLs = []
for dirpath, dirnames, files in os.walk('repos/' + repdir + '/'):
for name in files:
if name == "coverage.xml":
covXMLs.append(os.path.join(dirpath, name))
# print(covXMLs)
return covXMLs
def getTotalCodeCov(covList):
linesCovered = 0
totalLines = 0
for covFile in covList:
root = ET.parse(covFile)
c = root.find(".")
percent = c.attrib["line-rate"]
print(percent)
linesCovered += int(c.attrib["lines-covered"])
totalLines += int(c.attrib["lines-valid"])
return float(linesCovered/totalLines)
def main():
# repoURL = "https://github.com/ctripcorp/apollo.git"
# repoURL = "https://github.com/shuzheng/zheng.git"
# repoURL = "https://github.com/alibaba/arthas.git"
# repoURL = "https://github.com/openzipkin/zipkin"
"""
'ctripcorp/apollo'
'google/auto'
'low perc dbeaver/dbeaver'
'dropwizard/dropwizard'
'low perc google/guava'
'google/guice'
'failed build hankcs/HanLP'
'apache/incubator-druid'
'apache/incubator-shardingsphere'
'xetorthio/jedis'
'mybatis/mybatis-3'
'naver/pinpoint'
'broken builds proxyee-down-org/proxyee-down'
'broken builds redisson/redisson'
'broken build spring-projects/spring-boot'
'b3log/symphony'
'code4craft/webmagic'
'xuxueli/xxl-job'
'openzipkin/zipkin'
"""
# hardcodedList = ['ctripcorp/apollo', 'google/auto', 'dbeaver/dbeaver', 'dropwizard/dropwizard', 'google/guava', 'google/guice', 'hankcs/HanLP', 'apache/incubator-druid', 'apache/incubator-shardingsphere', 'xetorthio/jedis']
hardcodedList = ['openzipkin/zipkin']
for hardcoded in hardcodedList:
clonerep(hardcoded)
repdir = hardcoded.split("/")[-1].split(".")[0]
# for a single repo...
coms = open("commits/" + repdir + ".csv")
lines = coms.readlines()
csv = open("codecov/" + repdir + ".csv", "w")
csv.write("id,tag_name,covpercent,dayDifference, dayDifferenceHours\n")
for line in lines:
llist = line.split(",")
print(llist)
os.chdir("repos/" + repdir)
subprocess.run(["git", "checkout", "--", "."])
subprocess.run(["git", "checkout", llist[2]])
subprocess.run(["git", "checkout", "--", "."])
os.chdir("../..")
insertIntoPom("repos/" + repdir)
#codecov lines
runcov(repdir)
codeCovFiles = getAllCovXML(repdir)
if (len(codeCovFiles) == 0):
print("NO COV FILES FOUND SKIP")
continue
totalCoveragePercent = getTotalCodeCov(codeCovFiles)
id = llist[0]
tag = llist[1]
daydiff = llist[3].strip()
toWrite = id + "," + tag + "," + str(totalCoveragePercent)+ "," + daydiff
if len(llist) == 5:
daydiffhr = llist[4].strip()
toWrite += "," + daydiffhr
toWrite += "\n"
csv.write(toWrite)
csv.close
main()
# codeCovFiles = getAllCovXML("auto")
# totalCoveragePercent = getTotalCodeCov(codeCovFiles)
# print(totalCoveragePercent)
|
tyheise/402-Course-Project
|
codecov.py
|
codecov.py
|
py
| 6,907 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "os.chdir",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 173,
"usage_type": "call"
}
] |
35704005613
|
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index, name='home'),
path('gallery', views.gallery, name='gallery'),
path('login', views.login_view, name='login'),
path('pricing', views.price, name='pricing'),
path('signup', views.handleSignup, name='signup'),
path('contact', views.contact, name='contact'),
path('about', views.about, name='about'),
path('logout', views.logout_view, name='logout')
]
|
Shivam-08/gymdesign
|
home/urls.py
|
urls.py
|
py
| 511 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "home.views.index",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "home.views.gallery",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "home.views.login_view",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "home.views.price",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "home.views.handleSignup",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "home.views.contact",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "home.views.about",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "home.views.logout_view",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 13,
"usage_type": "name"
}
] |
71576276027
|
import os
import numpy as np
import torch
from matplotlib import pyplot as plt
from ..experiments.attention.attention import AttentionHookModule, group_by_type, interpolate, low_mem, sum_over_dim, stack_attentions
from .. import util
from ..StableDiffuser import StableDiffuser
def edit_output(activation, name):
activation = interpolate(activation, name)
activation = low_mem(activation, name)
return activation
def to_image(att, title, vmax, vmin):
plt.figure(figsize=(5,5), dpi=200)
plt.imshow(att, cmap='inferno', interpolation='nearest', vmin=vmin, vmax=vmax)
plt.title(title)
plt.axis('off')
plt.tight_layout(pad=0)
image = util.figure_to_image(plt.gcf())
plt.close()
return image
def main(prompt, outpath):
os.makedirs(outpath, exist_ok=True)
diffuser = StableDiffuser(scheduler='EA').to(torch.device('cuda:0')).half()
layers = set([module_name for module_name, module in diffuser.named_modules() if 'attnprobshook' in module_name and 'attn2' in module_name])
images, trace_steps = diffuser(prompt,
generator=torch.manual_seed(50),
n_steps=50,
trace_args={'layers' : layers, 'edit_output' : edit_output}
)
images[0][-1].save(os.path.join(outpath, 'image.png'))
attentions = stack_attentions(trace_steps)
self_attentions, cross_attentions = group_by_type(attentions)
tokens = diffuser.text_tokenize([prompt])['input_ids'][0][1:]
tokens = diffuser.text_detokenize(tokens)
layers = cross_attentions.keys()
cross_attentions = np.stack(list(cross_attentions.values()))
attention_over_time = cross_attentions.mean(axis=0)
attention_over_time = attention_over_time.mean(axis=1)
vmin = attention_over_time[:,1:(len(tokens)+1)].min()
vmax = attention_over_time[:,1:(len(tokens)+1)].max()
aot_images = []
for timestep in range(attention_over_time.shape[0]):
token_images = []
for token_idx in range(len(tokens)):
token_images.append(to_image(attention_over_time[timestep, token_idx+1], tokens[token_idx], vmax, vmin))
aot_images.append(util.image_grid([token_images]))
util.to_gif(aot_images, os.path.join(outpath, 'aot.gif'))
os.makedirs(outpath, exist_ok=True)
for layer_idx, layer in enumerate(layers):
attention_over_time = cross_attentions[layer_idx].mean(axis=1)
vmin = attention_over_time[:,1:(len(tokens)+1)].min()
vmax = attention_over_time[:,1:(len(tokens)+1)].max()
aot_images = []
for timestep in range(attention_over_time.shape[0]):
token_images = []
for token_idx in range(len(tokens)):
token_images.append(to_image(attention_over_time[timestep, token_idx+1], tokens[token_idx], vmax, vmin))
aot_images.append(util.image_grid([token_images]))
util.to_gif(aot_images, os.path.join(outpath, f'{layer}.gif'))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('prompt')
parser.add_argument('outpath')
main(**vars(parser.parse_args()))
|
JadenFiotto-Kaufman/thesis
|
thesis/final/cross_attention.py
|
cross_attention.py
|
py
| 3,146 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "experiments.attention.attention.interpolate",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "experiments.attention.attention.low_mem",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "StableDiffuser.StableDiffuser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "experiments.attention.attention.stack_attentions",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "experiments.attention.attention.group_by_type",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 108,
"usage_type": "call"
}
] |
34038250008
|
import logging
from datetime import timedelta
from aio_proxy.request.search_type import SearchType
from aio_proxy.search.es_index import StructureMapping
from aio_proxy.search.geo_search import build_es_search_geo_query
from aio_proxy.search.helpers.helpers import (
execute_and_agg_total_results_by_identifiant,
extract_ul_and_etab_from_es_response,
page_through_results,
)
from aio_proxy.search.text_search import build_es_search_text_query
from aio_proxy.utils.cache import cache_strategy
TIME_TO_LIVE = timedelta(days=31)
MIN_EXECUTION_TIME = 400
MAX_TOTAL_RESULTS = 10000
class ElasticSearchRunner:
def __init__(self, search_params, search_type):
self.es_search_client = StructureMapping.search()
self.search_type = search_type
self.search_params = search_params
self.has_full_text_query = False
self.es_search_results = None
self.total_results = None
self.execution_time = None
self.run()
def sort_es_search_query(self):
# Sorting is very heavy on performance if there are no
# search terms (only filters). As there is no search terms, we can
# exclude this sorting because score is the same for all results
# documents. Beware, nom and prenoms are search fields.
if self.has_full_text_query:
self.es_search_client = self.es_search_client.sort(
{"_score": {"order": "desc"}},
{"unite_legale.etat_administratif_unite_legale": {"order": "asc"}},
{"unite_legale.nombre_etablissements_ouverts": {"order": "desc"}},
)
# If only filters are used, use nombre établissements ouverts to sort the
# results
else:
self.es_search_client = self.es_search_client.sort(
{"unite_legale.nombre_etablissements_ouverts": {"order": "desc"}},
)
def execute_and_format_es_search(self):
self.es_search_client = page_through_results(self)
es_response = self.es_search_client.execute()
self.total_results = es_response.hits.total.value
self.execution_time = es_response.took
# Due to performance issues when aggregating on filter queries, we use
# aggregation on total_results only when total_results is lower than
# 10 000 results. If total_results is higher than 10 000 results,
# the aggregation causes timeouts on API. We return by default 10 000 results.
max_results_exceeded = self.total_results >= MAX_TOTAL_RESULTS
if not max_results_exceeded:
execute_and_agg_total_results_by_identifiant(self)
self.es_search_results = []
for matching_structure in es_response.hits:
matching_structure_dict = extract_ul_and_etab_from_es_response(
matching_structure
)
self.es_search_results.append(matching_structure_dict)
def sort_and_execute_es_search_query(self):
self.es_search_client = self.es_search_client.extra(
track_scores=True, explain=True
)
# Collapse is used to aggregate the results by siren. It is the consequence of
# separating large documents into smaller ones
self.es_search_client = self.es_search_client.update_from_dict(
{"collapse": {"field": "identifiant"}}
)
# Sort results
self.sort_es_search_query()
# Execute search, only called if key not found in cache
# (see cache strategy below)
def get_es_search_response():
self.execute_and_format_es_search()
es_results_to_cache = {
"total_results": self.total_results,
"response": self.es_search_results,
"execution_time": self.execution_time,
}
return es_results_to_cache
# To make sure the page and page size are part of the cache key
cache_key = page_through_results(self)
cached_search_results = cache_strategy(
cache_key,
get_es_search_response,
self.should_cache_search_response,
TIME_TO_LIVE,
)
self.total_results = cached_search_results["total_results"]
self.es_search_results = cached_search_results["response"]
self.execution_time = cached_search_results["execution_time"]
def should_cache_search_response(self):
"""Cache search response if execution time is higher than 400 ms"""
try:
if self.execution_time > MIN_EXECUTION_TIME:
return True
return False
except KeyError as error:
logging.info(f"Error getting search execution time: {error}")
return False
def run(self):
if self.search_type == SearchType.TEXT:
build_es_search_text_query(self)
elif self.search_type == SearchType.GEO:
build_es_search_geo_query(self)
self.sort_and_execute_es_search_query()
|
etalab/annuaire-entreprises-search-api
|
aio/aio-proxy/aio_proxy/search/es_search_runner.py
|
es_search_runner.py
|
py
| 5,014 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "aio_proxy.search.es_index.StructureMapping.search",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "aio_proxy.search.es_index.StructureMapping",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "aio_proxy.search.helpers.helpers.page_through_results",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "aio_proxy.search.helpers.helpers.execute_and_agg_total_results_by_identifiant",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "aio_proxy.search.helpers.helpers.extract_ul_and_etab_from_es_response",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "aio_proxy.search.helpers.helpers.page_through_results",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "aio_proxy.utils.cache.cache_strategy",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "aio_proxy.request.search_type.SearchType.TEXT",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "aio_proxy.request.search_type.SearchType",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "aio_proxy.search.text_search.build_es_search_text_query",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "aio_proxy.request.search_type.SearchType.GEO",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "aio_proxy.request.search_type.SearchType",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "aio_proxy.search.geo_search.build_es_search_geo_query",
"line_number": 123,
"usage_type": "call"
}
] |
28521533515
|
from sqlalchemy import Column, Float, ForeignKey, Integer, \
String, Text, and_
from sqlalchemy.orm import contains_eager, load_only, relationship
from sqlalchemy.orm.exc import NoResultFound
from Sugar import Dictifiable
from extensions import celery, db
class SixteenPReport(Dictifiable, db.Model):
__tablename__ = 'sixteen_p_report'
test_attempt_id = Column(Integer,
ForeignKey('test_attempt.id', ondelete="CASCADE"),
primary_key=True)
personality_type = Column(String(512))
role = Column(String(512))
strategy = Column(String(512))
mind_value = Column(Float)
mind_text = Column(Text)
energy_value = Column(Float)
energy_text = Column(Text)
nature_value = Column(Float)
nature_text = Column(Text)
tactics_value = Column(Float)
tactics_text = Column(Text)
identity_value = Column(Float)
identity_text = Column(Text)
test_attempt = relationship("TestAttempt",
back_populates="sixteen_p_report",
uselist=False)
@staticmethod
@celery.task()
def generate_report(test_attempt_id):
from models import Question
from models import QuestionAttempt
from models import SectionAttempt
from models import Choice
from Algos.SixteenP import scraping
question_attempts = (QuestionAttempt.query
.join(QuestionAttempt.question)
.outerjoin(Question.choices)
.join(SectionAttempt,
and_(
SectionAttempt.id == QuestionAttempt.section_attempt_id,
SectionAttempt.test_attempt_id == test_attempt_id))
.options(load_only(QuestionAttempt.choice_id))
.options(contains_eager(QuestionAttempt.question)
.load_only(Question.id)
.contains_eager(Question.choices)
.load_only(Choice.id))
.all()
)
scrapped_info = scraping.scrape(question_attempts)
if scrapped_info is None:
return
try:
report = SixteenPReport.query.filter(
SixteenPReport.test_attempt_id == test_attempt_id).one()
db.session.delete(report)
except NoResultFound:
pass
report = SixteenPReport(test_attempt_id=test_attempt_id,
**scrapped_info)
db.session.add(report)
db.session.commit()
return question_attempts
|
harveyslash/backend-cleaned
|
beatest/models/SixteenPReport.py
|
SixteenPReport.py
|
py
| 2,816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "Sugar.Dictifiable",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "extensions.db.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "extensions.db",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Float",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "models.QuestionAttempt.query.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.QuestionAttempt.query",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.QuestionAttempt",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "models.QuestionAttempt.question",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "models.QuestionAttempt",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "models.Question.choices",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "models.SectionAttempt",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.and_",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models.SectionAttempt.id",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.SectionAttempt",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "models.QuestionAttempt.section_attempt_id",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.QuestionAttempt",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "models.SectionAttempt.test_attempt_id",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.SectionAttempt",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.load_only",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.QuestionAttempt.choice_id",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "models.QuestionAttempt",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.contains_eager",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.QuestionAttempt.question",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "models.QuestionAttempt",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "models.Question.id",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "models.Question.choices",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "models.Choice.id",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "models.Choice",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "Algos.SixteenP.scraping.scrape",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "Algos.SixteenP.scraping",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "{'Question': 'models.Question', 'QuestionAttempt': 'models.QuestionAttempt', 'SectionAttempt': 'models.SectionAttempt', 'Choice': 'models.Choice', 'scraping': 'Algos.SixteenP.scraping'}.query.filter",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "{'Question': 'models.Question', 'QuestionAttempt': 'models.QuestionAttempt', 'SectionAttempt': 'models.SectionAttempt', 'Choice': 'models.Choice', 'scraping': 'Algos.SixteenP.scraping'}.query",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "{'Question': 'models.Question', 'QuestionAttempt': 'models.QuestionAttempt', 'SectionAttempt': 'models.SectionAttempt', 'Choice': 'models.Choice', 'scraping': 'Algos.SixteenP.scraping'}.test_attempt_id",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "extensions.db.session.delete",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "extensions.db.session",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "extensions.db",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "{'Question': 'models.Question', 'QuestionAttempt': 'models.QuestionAttempt', 'SectionAttempt': 'models.SectionAttempt', 'Choice': 'models.Choice', 'scraping': 'Algos.SixteenP.scraping'}",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "extensions.db.session.add",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "extensions.db.session",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "extensions.db",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "extensions.db.session.commit",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "extensions.db.session",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "extensions.db",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "extensions.celery.task",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "extensions.celery",
"line_number": 41,
"usage_type": "name"
}
] |
12570928349
|
from .strings import Strings
from .actions import Action
from dataclasses import dataclass, field
from telegram import User, InlineKeyboardButton, InlineKeyboardMarkup
import random
from datetime import datetime
REMOVE_ID_INDEX = 13 # в коллбеке для удаления человека передаём айди, начинается с 13 индекса
@dataclass
class Tasting:
chat_id: int
name: str | None = None
tasting_message_id: int | None = None
people: int = 0
users: dict[int, User] = field(default_factory=lambda: {})
initiated_user: User | None = None
shuffled_ids: list[int] = field(default_factory=list)
def __post_init__(self):
self.name = f'{self.chat_id} {datetime.now().strftime("%m/%d/%Y, %H:%M:%S")}'
def clear(self):
self.tasting_message_id = None
self.people = 0
self.users.clear()
def add(self, user: User) -> bool:
if user.id not in self.users.keys():
self.users[user.id] = user
return True
return False
def remove(self, user: User) -> bool:
if user.id in self.users.keys():
del self.users[user.id]
return True
return False
def generate_keyboard(self) -> InlineKeyboardMarkup:
keyboard = [
[InlineKeyboardButton(Strings.KEYBOARD_TITLE, callback_data=Action.ROLL.value)],
[
InlineKeyboardButton(Strings.KEYBOARD_MINUS, callback_data=Action.MINUS.value),
InlineKeyboardButton(Strings.KEYBOARD_PEOPLE.format(self.people), callback_data=Action.NUM.value),
InlineKeyboardButton(Strings.KEYBOARD_PLUS, callback_data=Action.PLUS.value)
],
[InlineKeyboardButton(Strings.KEYBOARD_ADD, callback_data=Action.ADD_ME.value)]
]
if len(self.users) > 0:
for user_id, user in self.users.items():
# use last_name if username is not present
last = f'(@{user.username})' if user.username else user.last_name
single_user = [
InlineKeyboardButton(f'{user.first_name} {last}', callback_data=Action.NAME.value),
InlineKeyboardButton(Strings.KEYBOARD_REMOVE,
callback_data=f'{Action.REMOVE_ME.value} id:{user_id}'),
]
keyboard.append(single_user)
return InlineKeyboardMarkup(keyboard)
def roll(self, initiated_user: User):
self.initiated_user = initiated_user
all_ids = list(self.users.keys())
random.shuffle(all_ids)
self.shuffled_ids = all_ids
def winners_message(self) -> str:
def get_user_info(num: int, user_id: int) -> str:
user = self.users.get(user_id)
user_string = f'{num + 1}) {user.full_name}'
if user.username:
user_string += f' (@{user.username})'
user_string += "\n"
return user_string
winners = f'{Strings.TITLE}\n\n'
winners += f'{Strings.WINNERS}\n'
for counter, shuffle_id in enumerate(self.shuffled_ids):
if counter < self.people:
winners += get_user_info(counter, shuffle_id)
elif counter == self.people:
winners += f'{Strings.WAITING_LIST}\n'
winners += get_user_info(counter, shuffle_id)
else:
winners += get_user_info(counter, shuffle_id)
winners += f'\n@{self.initiated_user.username}'
return winners
|
maxkupetskii/kurwabotV2
|
kurwa_bot/tasting.py
|
tasting.py
|
py
| 3,575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "telegram.User",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "telegram.User",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "telegram.User",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "telegram.User",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_TITLE",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "actions.Action.ROLL",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_MINUS",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "actions.Action.MINUS",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_PEOPLE.format",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_PEOPLE",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "actions.Action.NUM",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_PLUS",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "actions.Action.PLUS",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_ADD",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "actions.Action.ADD_ME",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "actions.Action.NAME",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardButton",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "strings.Strings.KEYBOARD_REMOVE",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "actions.Action.REMOVE_ME",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "actions.Action",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "telegram.InlineKeyboardMarkup",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "telegram.InlineKeyboardMarkup",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "telegram.User",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "strings.Strings.TITLE",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "strings.Strings.WINNERS",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "strings.Strings.WAITING_LIST",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "strings.Strings",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 12,
"usage_type": "name"
}
] |
22762975242
|
import cv2
import time
import PoseModule as pm
cap = cv2.VideoCapture('Videos/14.mp4')
ok_flag = False
if cap.isOpened():
ok_flag = cap.isOpened()
else:
print("Cannot open camera")
exit()
pTime = 0
detector = pm.poseDetector()
while ok_flag:
success, img = cap.read()
# if frame is read correctly ret is True
if not success:
print("Can't receive frame (stream end?). Exiting ...")
break
img = detector.findPose(img)
lmList = detector.findPosition(img)
if len(lmList)!=0:
print(lmList[14])
cv2.circle(img, (lmList[14][1], lmList[14][2]), 10, (0, 0, 100), cv2.FILLED)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(10)
if cv2.waitKey(10) == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if cv2.getWindowProperty("Image", cv2.WND_PROP_VISIBLE) < 1:
cap.release()
cv2.destroyAllWindows()
break
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(10)
if cv2.waitKey(1) == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if cv2.getWindowProperty("Image", cv2.WND_PROP_VISIBLE) < 1:
cap.release()
cv2.destroyAllWindows()
break
|
GabrielaVasileva/ComputerVision
|
pose_estimation/PoseProject.py
|
PoseProject.py
|
py
| 1,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PoseModule.poseDetector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.getWindowProperty",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_VISIBLE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.getWindowProperty",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_VISIBLE",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 61,
"usage_type": "call"
}
] |
40694907934
|
# coding: utf-8
from fabric.api import local, sudo, lcd, put, cd
from fabric.context_managers import settings
from fabric.contrib.files import exists
from fabric.operations import local as lrun, run
from fabric.api import task
from fabric.state import env
import os
env.user = 'adminbuy'
proj_dir = '/home/user/www'
root_folder = '/adminbuy'
proj_fullpath = proj_dir + root_folder
local_config_dir = proj_fullpath + '/config'
local_config_dir_super = local_config_dir + "/supervisor"
remote_nginx_dir = '/etc/nginx/sites-enabled'
remote_supervisor_dir = '/etc/supervisor/conf.d'
super_flikiss = "flikiss.conf"
remote_wiki_dir = '/home/www/wiki'
wiki_conf_file = '.flikissrc'
@task
def localhost():
global proj_dir, local_config_dir, local_config_dir_super
proj_dir = "/home/user/bitbucket"
local_config_dir = proj_dir + root_folder + '/config'
local_config_dir_super = local_config_dir + "/supervisor"
env.run = lrun
env.hosts = ['localhost']
env.port = '22'
env.user = 'user'
@task
def remote():
env.run = run
env.hosts = ['46.101.216.62']
env.port = '22'
@task
def deploy():
create_user()
with settings(user='user'):
install_env()
clone_proj()
install_dependency()
install_rabbitmq()
install_redis()
prepare_db()
configure_nginx()
configure_supervisor_proj()
configure_supervisor_socket()
configure_supervisor_celery()
reload_nginx()
reload_super()
create_superuser()
@task
def update():
install_dependency()
with settings(user='user'):
with cd(proj_dir + root_folder):
run('git pull origin master')
migration()
reload_super()
reload_nginx()
@task
def migration():
with cd(proj_dir + root_folder):
run("python manage.py db upgrade")
@task
def prepare_db():
with cd(proj_dir + root_folder):
user = run('python -c "from config import USER; print USER"')
password = run('python -c "from config import PASSWORD; print PASSWORD"')
db = run('python -c "from config import DB; print DB"')
run('sudo -u postgres psql -c "CREATE ROLE {0} WITH PASSWORD \'{1}\' NOSUPERUSER CREATEDB NOCREATEROLE LOGIN;"'.format(user, password))
run('sudo -u postgres psql -c "CREATE DATABASE {0} WITH OWNER={1} TEMPLATE=template0 ENCODING=\'utf-8\';"'.format(db, user))
migration()
@task
def clone_proj():
run('mkdir ' + proj_dir + ' -p')
with cd(proj_dir):
run('git clone https://github.com/StasEvseev/adminbuy.git')
put("config_local.py", proj_fullpath)
@task
def create_user():
sudo('adduser user')
sudo('gpasswd -a user sudo')
@task
def install_env():
sudo('add-apt-repository ppa:chris-lea/nginx-devel -y')
sudo('apt-get update')
sudo('apt-get install -y python')
sudo('apt-get install python-setuptools')
sudo('easy_install pip')
sudo('apt-get install -y python-virtualenv')
sudo('apt-get install -y nginx')
sudo('apt-get install -y supervisor')
sudo('apt-get install -y git')
sudo('apt-get install build-essential gcc libxml2-dev libxslt1-dev -y')
sudo('apt-get install libpq-dev python-dev -y')
sudo('apt-get install postgresql-9.3 -y')
sudo('apt-get install libjpeg-dev')
@task
def install_dependency():
with cd(proj_dir + root_folder):
sudo('pip install -r REQUIREMENTS')
@task
def create_superuser():
with settings(user='user'):
with cd(proj_dir + root_folder):
run('python manage.py create_superuser')
@task
def install_rabbitmq():
try:
sudo("dpkg -l | grep rabbitmq-server")
except:
sudo("echo 'deb http://www.rabbitmq.com/debian/ testing main' | tee -a /etc/apt/sources.list")
sudo("wget https://www.rabbitmq.com/rabbitmq-signing-key-public.asc")
sudo("apt-key add rabbitmq-signing-key-public.asc")
sudo("apt-get update")
sudo("apt-get install rabbitmq-server -y")
sudo("rabbitmqctl add_user myuser mypassword")
sudo("rabbitmqctl add_vhost myvhost")
sudo("rabbitmqctl set_permissions -p myvhost myuser \".*\" \".*\" \".*\"")
@task
def install_redis():
sudo("apt-get install redis-server -y")
@task
def configure_wiki():
local("pip install flikiss")
if os.path.exists(remote_wiki_dir + "/" + wiki_conf_file) is False:
local("sudo mkdir %s -p" % remote_wiki_dir)
local("sudo cp %s/%s %s/%s " % (
local_config_dir, wiki_conf_file,
remote_wiki_dir, wiki_conf_file))
if os.path.exists(remote_supervisor_dir + "/" + super_flikiss) is False:
local("sudo cp %s/%s %s/%s" % (
local_config_dir_super, super_flikiss,
remote_supervisor_dir, super_flikiss))
@task
def reload_nginx():
sudo('/etc/init.d/nginx restart')
@task
def reload_super():
try:
sudo('service supervisor start')
except:
pass
sudo('supervisorctl reread')
sudo('supervisorctl reload')
@task
def configure_nginx():
"""
"""
with settings(user='user'):
sudo('/etc/init.d/nginx start')
if exists('/etc/nginx/sites-enabled/default'):
sudo('rm /etc/nginx/sites-enabled/default')
put("./config/buyapi", remote_nginx_dir, use_sudo=True)
put("private.key", '/etc/nginx/', use_sudo=True)
put("ssl.crt", '/etc/nginx/', use_sudo=True)
if exists("/etc/nginx/sites-enabled/buyapi") is False:
sudo('ln -s /etc/nginx/sites-available/buyapi' +
' /etc/nginx/sites-enabled/buyapi')
@task
def configure_supervisor_proj():
"""
"""
if exists(remote_supervisor_dir + '/buyapi.conf') is False:
sudo('cp ' + local_config_dir_super + '/buyapi.conf ' + remote_supervisor_dir + '/buyapi.conf')
@task
def configure_supervisor_socket():
"""
"""
if exists(remote_supervisor_dir + '/socket.conf') is False:
sudo('cp ' + local_config_dir_super + '/socket.conf ' + remote_supervisor_dir + '/socket.conf')
@task
def configure_supervisor_celery():
if exists(remote_supervisor_dir + "/celery.conf") is False:
sudo('cp ' + local_config_dir_super + '/celery.conf ' + remote_supervisor_dir + '/celery.conf')
sudo('mkdir /var/log/celery')
if exists(remote_supervisor_dir + "/celerybeats.conf") is False:
sudo('cp ' + local_config_dir_super + '/celerybeats.conf ' + remote_supervisor_dir + '/celerybeats.conf')
sudo('mkdir /var/log/celerybeats')
|
StasEvseev/adminbuy
|
fabfile.py
|
fabfile.py
|
py
| 6,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fabric.state.env.user",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.run",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "fabric.operations.local",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.hosts",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.port",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.user",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "fabric.api.task",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.run",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "fabric.operations.run",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.hosts",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "fabric.state.env.port",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "fabric.state.env",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "fabric.api.task",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "fabric.context_managers.settings",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "fabric.context_managers.settings",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "fabric.api.cd",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "fabric.api.cd",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "fabric.api.cd",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "fabric.operations.run",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "fabric.api.cd",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "fabric.api.put",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "fabric.api.cd",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "fabric.context_managers.settings",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "fabric.api.cd",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "fabric.operations.run",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "fabric.api.local",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "fabric.api.local",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "fabric.api.local",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "fabric.api.local",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "fabric.api.sudo",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "fabric.context_managers.settings",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "fabric.api.put",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "fabric.api.put",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "fabric.api.put",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "fabric.contrib.files.exists",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "fabric.api.sudo",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "fabric.api.task",
"line_number": 230,
"usage_type": "name"
}
] |
73746980669
|
import warnings
import time
import os
import joblib
import json
import pandas as pd
from src.training import hyper_param_tuning, test_models
from src.utils import parse_terminal_arguments, get_repr_model, dict_cartesian_product
warnings.simplefilter(action='ignore', category=FutureWarning)
#%%
start = time.time()
dataset, model, save_name = parse_terminal_arguments()
model_save_path = f'./results/{dataset}/{save_name}/'
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
with open('configs.json') as f:
configs = json.load(f)
with open('xgb_params.json') as f:
xgb_params = json.load(f)
#%%
sb_threshold = configs[f'{dataset}_sb_threshold']
train_folds_path = configs[f'{dataset}_folds']
train_folds = [pd.read_csv(f'{train_folds_path}fold_{fold_idx}.csv') for fold_idx in range(5)]
for fold in train_folds:
fold['ligand_id'] = fold['ligand_id'].astype(str)
test = pd.read_csv(configs[f'{dataset}_test'])
test['ligand_id'] = test['ligand_id'].astype(str)
print('Read the training/test data')
representation_model = get_repr_model(dataset, model, configs)
n_phases = len(xgb_params['search_params'])
fixed_params = xgb_params['fixed_params']
best_params = {}
for phase in range(n_phases):
print(f'Fine-tuning. Phase: {phase+1}')
param_combinations = dict_cartesian_product(xgb_params['search_params'][phase])
fixed_params = {**fixed_params, **best_params}
best_models, best_params, cv_scores = hyper_param_tuning(fixed_params,
param_combinations,
representation_model,
train_folds,
sb_threshold,
model_save_path)
cv_scores.to_csv(f'{model_save_path}cv_scores_p{phase+1}.csv', index=None)
#%%
joblib.dump(best_models, model_save_path + 'models.pkl ', compress=3)
with open(model_save_path + 'best_params.json', 'w') as f:
json.dump({**fixed_params, **best_params}, f)
print('Done tuning. Testing...')
test_models(best_models, representation_model, train_folds, test, sb_threshold, model_save_path)
print('DONE!')
elapsed_total_time = time.time() - start
total_time = time.strftime('%H:%M:%S', time.gmtime(elapsed_total_time))
print(f'Whole program took {total_time}')
|
boun-tabi/chemboost
|
src/runner.py
|
runner.py
|
py
| 2,510 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "warnings.simplefilter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.utils.parse_terminal_arguments",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "src.utils.get_repr_model",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "src.utils.dict_cartesian_product",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "src.training.hyper_param_tuning",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "joblib.dump",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "src.training.test_models",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 64,
"usage_type": "call"
}
] |
32402735389
|
#!/usr/bin/env python
# coding: utf-8
#Project Information
# We will create a classifier that can distinguish spam (junk or commercial or bulk) emails from ham (non-spam) emails.
# Spam/Ham Classification
# EDA, Feature Engineering, Classifier
# Dataset Information
# In email classification, our goal is to classify emails as spam or not spam (referred to as "ham") using features generated from the text in the email.
# The dataset consists of email messages and their labels (0 for ham, 1 for spam).
# Your labeled training dataset contains 8348 labeled examples, and the test set contains 1000 unlabeled examples.
# Run the following cells to load in the data into DataFrames.
# The `train` DataFrame contains labeled data that we will use to train your model. It contains four columns:
# 1. `id`: An identifier for the training example
# 1. `subject`: The subject of the email
# 1. `email`: The text of the email
# 1. `spam`: 1 if the email is spam, 0 if the email is ham (not spam)
# The `test` DataFrame contains 1000 unlabeled emails.
# We will predict labels for these emails and submit your predictions to Kaggle for evaluation.
#Importing libraries
from client.api.notebook import Notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils import fetch_and_cache_gdrive
from sklearn.model_selection import train_test_split
from IPython.display import display, Markdown
from sklearn.linear_model import LogisticRegression
from datetime import datetime
import re
get_ipython().system('pip install wordcloud')
from wordcloud import WordCloud, STOPWORDS
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set(style = "whitegrid",
color_codes = True,
font_scale = 1.5)
# 1. Load the dataset
fetch_and_cache_gdrive('1SCASpLZFKCp2zek-toR3xeKX3DZnBSyp', 'train.csv')
fetch_and_cache_gdrive('1ZDFo9OTF96B5GP2Nzn8P8-AL7CTQXmC0', 'test.csv')
original_training_data = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# 2. Preprocessing
## a) Convert the emails to lower case as a first step to processing the text
original_training_data['email'] = original_training_data['email'].str.lower()
test['email'] = test['email'].str.lower()
original_training_data.head()
## b) We will check if our data contains any missing values and replace them with appropriate filler values.
## (i.e., NaN values in the `subject` or `email` columns should be replaced with empty strings).
## Note that while there are no NaN values in the `spam` column, we should be careful when replacing NaN labels.
# Doing so without consideration may introduce significant bias into our model when fitting.
original_training_data['subject'].fillna("",inplace = True)
original_training_data['email'].fillna("",inplace = True)
original_training_data.isnull().sum()
## c) Print the text of first ham and first spam email in the original training set to see the difference between the two emails that might relate to the identification of spam.
first_ham = original_training_data[original_training_data['spam'] == 0]['email'].iloc[0]
first_spam = original_training_data[original_training_data['spam'] == 1]['email'].iloc[0]
print(first_ham)
print(first_spam)
## We notice that spam email contains a lot of tags like head, body, html, br, href etc as compared to the ham email.
## These tags could be used to differentiate between two emails and determine if an email is spam or ham.
## d) Training Validation Split
# The training data we downloaded is all the data we have available for both training models and testing the models that we train. We therefore need to split the training data into separate training and testing datsets. Note that we set the seed (random_state) to 42. This will produce a pseudo-random sequence of random numbers that is the same for every student.
train, test = train_test_split(original_training_data, test_size=0.1, random_state=42)
### Basic Feature Engineering
'''
We would like to take the text of an email and predict whether the email is ham or spam.
This is a classification problem, so we can use logistic regression to train a classifier.
Recall that to train an logistic regression model we need a numeric feature matrix $X$ and a vector of corresponding binary labels $y$.
Unfortunately, our data are text, not numbers.
To address this, we can create numeric features derived from the email text and use those features for logistic regression.
Each row of $X$ is an email.
Each column of $X$ contains one feature for all the emails.
'''
# Create a 2-dimensional NumPy array containing one row for each email text and that row should contain either a 0 or 1 for each word in the list.
def words_in_texts(words, texts):
'''
Args:
words (list-like): words to find
texts (Series): strings to search in
Returns:
NumPy array of 0s and 1s with shape (n, p) where n is the
number of texts and p is the number of words.
'''
indicator_array = []
for text in texts:
list = []
for word in words:
val = [1 if (word in text) else 0]
list += val
indicator_array.append(list)
return indicator_array
# 3. BASIC EDA
# We need to identify some features that allow us to distinguish spam emails from ham emails.
# One idea is to compare the distribution of a single feature in spam emails to the distribution of the same feature in ham emails.
# If the feature is itself a binary indicator, such as whether a certain word occurs in the text,
# Then this amounts to comparing the proportion of spam emails with the word to the proportion of ham emails with the word.
# The following plot (which was created using `sns.barplot`) compares the proportion of emails in each class containing a particular set of words.
# 
df = pd.DataFrame({
'word_1': [1, 0, 1, 0],
'word_2': [0, 1, 0, 1],
'type': ['spam', 'ham', 'ham', 'ham']
})
display(Markdown("> Our Original DataFrame has some words column and a type column. You can think of each row as a sentence, and the value of 1 or 0 indicates the number of occurances of the word in this sentence."))
display(df);
display(Markdown("> `melt` will turn columns into variale, notice how `word_1` and `word_2` become `variable`, their values are stored in the value column"))
display(df.melt("type"))
## Create a bar chart like the one above comparing the proportion of spam and ham emails containing certain words.
## Choose a set of words that have different proportions for the two classes.
## Make sure to only consider emails from `train`.
train=train.reset_index(drop=True) # We must do this in order to preserve the ordering of emails to labels for words_in_texts
set_of_words=['head','href','br']
matrix = np.matrix(words_in_texts(set_of_words, train['email']))
new_df = pd.DataFrame(matrix).rename(columns={0:'head',1:'href',2:'br'})
new_df['type'] = train['spam']
new_df = new_df.melt('type')
new_df['type'] = new_df['type'].map({0:'ham',1:'spam'})
sns.barplot(x='variable', y='value', hue='type', data=new_df, ci=None);
## When the feature is binary, it makes sense to compare its proportions across classes (as in the previous question).
## Otherwise, if the feature can take on numeric values, we can compare the distributions of these values for different classes.
## 
## Create a class conditional density plot to compare the distribution of the length of spam emails to the distribution of the length of ham emails in the training set.
df = pd.DataFrame({'length': train['email'].apply(len),'spam': train['spam']})
df = df.melt('spam')
df['spam'] = df['spam'].map({0:'ham',1:'spam'})
x=df[df['spam']=='ham']
y=df[df['spam']=='spam']
plt.figure()
plt.xlim(0,50000)
a=sns.distplot(x['value'], label='ham', hist=False)
b=sns.distplot(y['value'], label='spam', hist=False)
a.set(xlabel='Length of email', ylabel='Distribution')
plt.legend();
## We notice in general, the length of spam emails is more than the length of ham emails.
# 4. Basic Classification
## Notice that the output of `words_in_texts(words, train['email'])` is a numeric matrix containing features for each email.
## This means we can use it directly to train a classifier!
## `X_train` should be a matrix of 0s and 1s created by using your `words_in_texts` function on all the emails in the training set.
## `Y_train` should be a vector of the correct labels for each email in the training set.
some_words = ['drug', 'bank', 'prescription', 'memo', 'private']
X_train = np.array(words_in_texts(some_words, train['email']))
Y_train = train['spam']
X_train[:5], Y_train[:5]
# Now that we have matrices, we can use to scikit-learn!
# Using the [`LogisticRegression`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) classifier.
# Train a logistic regression model using `X_train` and `Y_train`.
# Then, output the accuracy of the model (on the training data) in the cell below.
model = LogisticRegression()
model.fit(X_train, Y_train)
training_accuracy = model.score(X_train, Y_train)
print("Training Accuracy: ", training_accuracy)
# We have trained our first logistic regression model and it can correctly classify around 76% of the training data!
# We can definitely do better than this by selecting more and better features.
# 5. Evaluating Classifiers
''' The model we trained doesn't seem too shabby! But the classifier you made above isn't as good as this might lead us to believe.
First, we are evaluating accuracy on the training set, which may provide a misleading accuracy measure, especially if we used the training set to identify discriminative features.
In future parts of this analysis, it will be safer to hold out some of our data for model validation and comparison.
Presumably, our classifier will be used for filtering, i.e. preventing messages labeled `spam` from reaching someone's inbox. There are two kinds of errors we can make:
- False positive (FP): a ham email gets flagged as spam and filtered out of the inbox.
- False negative (FN): a spam email gets mislabeled as ham and ends up in the inbox.
These definitions depend both on the true labels and the predicted labels.
False positives and false negatives may be of differing importance, leading us to consider more ways of evaluating a classifier, in addition to overall accuracy.
'''
'''
Precision measures the proportion $\frac{\text{TP}}{\text{TP} + \text{FP}}$ of emails flagged as spam that are actually spam.
Recall measures the proportion $\frac{\text{TP}}{\text{TP} + \text{FN}}$ of spam emails that were correctly flagged as spam.
False-alarm rate measures the proportion $\frac{\text{FP}}{\text{FP} + \text{TN}}$ of ham emails that were incorrectly flagged as spam.
'''
# The following image might help:
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Precisionrecall.svg/700px-Precisionrecall.svg.png" width="500px">
#
'''
Note that a true positive (TP) is a spam email that is classified as spam, and a true negative (TN) is a ham email that is classified as ham.
'''
Y_train_hat = model.predict(X_train)
true_pos = np.sum(Y_train_hat & Y_train)
total_pos = np.sum(Y_train_hat)
false_neg = np.sum(Y_train) - true_pos
false_pos = total_pos - true_pos
true_neg = np.sum(Y_train==0) - false_pos
logistic_predictor_precision = true_pos/ total_pos
logistic_predictor_recall = true_pos/ (total_pos + false_neg)
logistic_predictor_far = false_pos/ (false_pos + true_neg)
print(logistic_predictor_precision, logistic_predictor_recall,logistic_predictor_far )
## ham and spam emails
ham_emails = train[train['spam'] == 0]
spam_emails = train[train['spam'] == 1]
'''
Finding better features based on the email text:
- Number of characters in the subject / body
- Number of words in the subject / body
- Use of punctuation (e.g., how many '!' were there?)
- Number / percentage of capital letters
- Whether the email is a reply to an earlier email or a forwarded email
- Number of html tags
'''
# Number of characters in the subject
def subject_char(df):
return df['subject'].str.findall('\w').str.len()
# Number of words in the subject
def subject_words(df):
return df['subject'].str.findall("\w+").str.len().fillna(0)
# Use of punctuation (e.g., how many '!' were there?)
def punc_exclamation(df):
return df['email'].str.findall("!").str.len()
def punc(df):
return df['email'].str.findall('[^A-Za-z0-9]').str.len() / df['email'].str.findall('\w+').str.len()
# Number / percentage of capital letters
def capital_letters_percentage(df):
return (df['subject'].str.findall(r'[A-Z]').str.len() / df['subject'].str.len())
# Whether the email is a reply to an earlier email or a forwarded email
def reply_email(df):
return df['subject'].apply(lambda x: 1 if "Re:" in x else 0)
def forward_email(df):
return df['subject'].apply(lambda x: 1 if "Fw:" in x else 0)
# Number of html tags
def html_tag(df):
return df['email'].str.findall("/>").str.len()
# Number of characters in the subject
sns.distplot(subject_char(spam_emails), label = 'spam', hist=False)
sns.distplot(subject_char(ham_emails), label = 'ham', hist=False)
plt.xlabel('Number of characters in Subject');
# *We can notice that both that both the spam and ham emails have a similar amount of number of characters in the subject/body.*
# Number of words in the subject
sns.distplot(subject_words(spam_emails), label = 'spam', hist=False)
sns.distplot(subject_words(ham_emails), label = 'ham', hist=False)
plt.xlabel('Number of words in Subject');
# *We can notice that both that both the spam and ham emails have a similar amount of number of words in the subject/body.*
# Number of ! punctuations in the email
sns.distplot(punc_exclamation(spam_emails), label = 'spam', hist=False)
sns.distplot(punc_exclamation(ham_emails), label = 'ham', hist=False)
plt.xlabel('Number of punctuations (!) in emails');
# *We can notice here that spam emails have a higher use of exclamation marks as compared to the ham emails.*
# Number of punctuations in the email
sns.distplot(punc(spam_emails), label = 'spam', hist=False)
sns.distplot(punc(ham_emails), label = 'ham', hist=False)
plt.xlabel('Number of punctuations in email per word');
# *We can notice here that spam emails have a higher use of punctuations per word as compared to the ham emails.*
# Number / percentage of capital letters
sns.distplot(capital_letters_percentage(spam_emails), label = 'spam', hist=False)
sns.distplot(capital_letters_percentage(ham_emails), label = 'ham', hist=False)
plt.xlabel('percentage of capital letters in Subject');
# *Again, we find that the percentage of capital letters in the subject for both the emails are similar.*
# 2. Improving word features :
# Top words in spam and ham emails to help us find better word features.
def word_bags(df):
wordList = {}
for email in df['email']:
words = re.findall('\w+', email)
for w in words:
if (w in wordList):
wordList[w] += 1
else:
wordList[w] = 1
return wordList
spam_bag = (pd.Series(word_bags(spam_emails)) / spam_emails.shape[0]).sort_values(ascending=False).iloc[:20]
ham_bag = (pd.Series(word_bags(ham_emails)) / ham_emails.shape[0]).sort_values(ascending=False).iloc[:20]
fig, axs = plt.subplots(ncols=2)
fig.set_size_inches(8,10)
spam_bar = sns.barplot(x=spam_bag.values, y=spam_bag.index, ax=axs[0])
spam_bar.set_title("Top words in spam emails")
hams_bar = sns.barplot(x=ham_bag.values, y=ham_bag.index, ax=axs[1])
hams_bar.set_title("Top words in ham emails")
train_word_bag = (pd.Series(word_bags(train)) / train.shape[0]).sort_values(ascending=False)[:300]
train_word_bag
## Adding new words
from sklearn.linear_model import LogisticRegressionCV
def process_data_set(df):
some_words = ['$', '!', 'body', 'html', '/>'] + train_word_bag.index.tolist()
X_train = np.array(words_in_texts(some_words, df['email'])).astype(int)
feature = pd.concat([subject_words(df), punc_exclamation(df), punc(df)], axis = 1).values
X_train = np.concatenate((X_train, feature), axis=1)
return X_train
X_train = process_data_set(train)
Y_train = train['spam']
model = LogisticRegressionCV(Cs=4, fit_intercept=True, cv=10, verbose =1, random_state=42)
model.fit(X_train, Y_train)
training_accuracy = model.score(X_train, Y_train)
print("Training Accuracy: ", training_accuracy)
# 5. Feature/Model Selection Process
'''
- I used the idea mentioned in the section moving forward.
I visualised these features like (Number of characters in the subject, Number of words in the subject, use of punctuation, percentage of capital letters, etc.
I also digged into the email text itself to find words which could be used to distinguish between the emails. I have shown the process in the previous part.
- While plotting, I compared the distribution of the feature in ham and spam emails.
A lot of the features had similar distributions. For example, features inlcuding number of words in subjects, number of characters in the subject and number of capital letters had similar distribution for both the ham and spam emails.
While distribution of features like punctuation (!) and general punctuations were different for the ham and spam emails which means these features were good features.
I also found better words to distinguish between the emails using word bag method and inquiring the emails.
Some of these words include '$', '!', 'body', 'html', '/>', 'http', 'com', etc.
- It is suprising to see opposite distribution of general use of punctuation in the emails and specific use of exclamation marks in the emails.
Basically, we notice that ham emails use more punctuations in the emails as compared to spam emails.
We notice the opposite effect where significantly higher exclamation marks are utilised by spam emails as compared to the ham emails.
'''
# I have used wordCloud library on spam and ham emails to visualise which words are used more.
# We can notice that spam emails use words like font, html, td, tr, etc while the ham emails use words like https, com, etc.
# We can use this visualisation to choose better word features to distinguish between the spam and ham emails.
ham_emails = train[train['spam'] == 0]
spam_emails = train[train['spam'] == 1]
spam_text = spam_emails['email'].values
ham_text = ham_emails['email'].values
wordcloud = WordCloud(
width = 3000,
height = 2000,
background_color = 'black',
stopwords = STOPWORDS).generate(str(spam_text))
print("SPAM EMAILS")
fig = plt.figure(
figsize = (40, 30),
facecolor = 'k',
edgecolor = 'k')
plt.imshow(wordcloud)
wordcloud1 = WordCloud(
width = 3000,
height = 2000,
background_color = 'black',
stopwords = STOPWORDS).generate(str(ham_text))
print("HAM EMAILS")
fig1 = plt.figure(
figsize = (40, 30),
facecolor = 'k',
edgecolor = 'k')
plt.imshow(wordcloud1)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
## 5. Submitting to Kaggle
test_predictions = model.predict(process_data_set(test))
# The following saves a file to submit to Kaggle.
submission_df = pd.DataFrame({
"Id": test['id'],
"Class": test_predictions,
}, columns=['Id', 'Class'])
timestamp = datetime.isoformat(datetime.now()).split(".")[0]
submission_df.to_csv("submission_{}.csv".format(timestamp), index=False)
print('Created a CSV file: {}.'.format("submission_{}.csv".format(timestamp)))
print('You may now upload this CSV file to Kaggle for scoring.')
## We got a 99.7% accuracy.
|
muskaangoyal/data-science-portfolio
|
spam-ham-master/proj.py
|
proj.py
|
py
| 19,773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "seaborn.set",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "utils.fetch_and_cache_gdrive",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "utils.fetch_and_cache_gdrive",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "IPython.display.Markdown",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "IPython.display.Markdown",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "seaborn.barplot",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "seaborn.barplot",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "seaborn.barplot",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegressionCV",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.isoformat",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 402,
"usage_type": "call"
}
] |
73338806587
|
import gc
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, ContextManager, Iterator
import pytest
from typing_extensions import Protocol, runtime_checkable
from antidote._internal import enforce_subclass_if_possible, Singleton
from antidote._internal.utils import CachedMeta
from tests.utils import Box
@contextmanager
def does_not_raise() -> Iterator[None]:
yield
does_raise = pytest.raises(TypeError, match="(?i).*(isinstance|subclass|implement).*")
class DummyProtocol(Protocol):
def dummy(self) -> None:
...
@runtime_checkable
class DummyRuntimeProtocol(Protocol):
def dummy(self) -> None:
...
class ValidDummy:
def dummy(self) -> None:
...
class InvalidDummy:
pass
class SubDummy(ValidDummy):
pass
@pytest.mark.parametrize(
"expectation, sub, tpe",
[
(does_not_raise(), ValidDummy, DummyProtocol),
(does_not_raise(), ValidDummy, DummyRuntimeProtocol),
(does_not_raise(), InvalidDummy, DummyProtocol),
(does_raise, InvalidDummy, DummyRuntimeProtocol),
(does_raise, InvalidDummy, ValidDummy),
(does_not_raise(), SubDummy, ValidDummy),
(does_not_raise(), 1, 1),
(does_not_raise(), 1, int),
(does_not_raise(), int, 1),
],
)
def test_enforce_subtype(expectation: ContextManager[Any], sub: type, tpe: type) -> None:
with expectation:
enforce_subclass_if_possible(sub, tpe)
def test_singleton() -> None:
class Dummy(Singleton):
pass
assert Dummy() is Dummy()
def test_cached_instances() -> None:
@dataclass(eq=True, unsafe_hash=True)
class Dummy(metaclass=CachedMeta):
__slots__ = ("value", "__weakref__")
value: Box[str]
def __init__(self, value: Box[str]) -> None:
self.value = value
def __repr__(self) -> str:
return "Dummy"
hello = Box("hello")
a = Dummy(hello)
assert a.value is hello
assert Dummy(hello) is a
assert Dummy(Box("hello")) is a
assert Dummy(Box("Different")) is not a
john = Box("John")
def f() -> None:
Dummy(john) # create instance without keeping a reference to it
f()
gc.collect()
b = Dummy(Box("John"))
assert b.value is not john
|
Finistere/antidote
|
tests/internal/test_utils.py
|
test_utils.py
|
py
| 2,310 |
python
|
en
|
code
| 88 |
github-code
|
6
|
[
{
"api_name": "contextlib.contextmanager",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing_extensions.Protocol",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Protocol",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing_extensions.runtime_checkable",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.ContextManager",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "antidote._internal.enforce_subclass_if_possible",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "antidote._internal.Singleton",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "antidote._internal.utils.CachedMeta",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "tests.utils.Box",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "tests.utils.Box",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tests.utils.Box",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "tests.utils.Box",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tests.utils.Box",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tests.utils.Box",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tests.utils.Box",
"line_number": 99,
"usage_type": "call"
}
] |
43755872386
|
'''
Measures the square area of colonies in several image files and exports data as an excel sheet.
Written by George Walters-Marrah
Last updated: 6/26/2019
'''
# import needed packages
import colSizeMeasurer as cm
import numpy as np
import pandas as pd
import os.path
from os import path
import imageio
# analyzes several images at processively
class analyzer:
# initializes the analyzer with all the information it needs
def __init__(self, imFolders, imVectors, imStrains, imPlates, imRepNums, imType, firstMask, secondMaskLow, secondMaskHigh, smallSize, largeSize, stdThreshold, control):
self._imFolders = imFolders
self._imVectors = imVectors
self._imStrains = imStrains
self._imPlates = imPlates
self._imRepNums = imRepNums
self._imType = imType
self._control = control
self._firstMask = firstMask
self._secondMaskLow = secondMaskLow
self._secondMaskHigh = secondMaskHigh
self._smallSize = smallSize
self._largeSize = largeSize
self._stdThreshold = stdThreshold
self._control = control
def checkFiletype(self):
fileList = []
for folder in range(len(self._imFolders)):
imFolder = self._imFolders[folder]
for vector in range(len(self._imVectors)):
imVector = self._imVectors[vector]
for strain in range(len(self._imStrains)):
imStrain = self._imStrains[strain]
for plate in range(len(self._imPlates)):
imPlate = self._imPlates[plate]
for repNum in range(len(self._imRepNums)):
imRepNum = self._imRepNums[repNum]
# Check if the PATH exists
filePath = imFolder + '/' + imVector + '_' + imStrain + '_' + imPlate + '_' + imRepNum + self._imType
if path.exists(filePath):
imCheck = imageio.imread(filePath)
dtypeCheck = imCheck.dtype
if dtypeCheck != 'uint8':
fileList.append(filePath)
if len(fileList) == 0:
print('Files in folder(s) ' + str(self._imFolders) + ' checked.')
else:
raise ValueError(str(fileList) + ' must be uint8. Change image file(s) to uint8 then try again.')
# defines and gets the size of the control where it will be
def getControl(self):
data = []
for Folder in range(len(self._imFolders)):
imFolder = self._imFolders[Folder]
for repNum in range(len(self._imRepNums)):
imRepNum = self._imRepNums[repNum]
# Check if the PATH exists
controlPath = imFolder + '/' + self._control[0] + '_' + self._control[1] + '_' + self._control[2] + '_' + imRepNum + self._imType
if path.exists(controlPath):
# Analyze data if the PATH exists
controlData = cm.measure(imFolder, self._control[0], self._control[1], self._control[2], imRepNum, self._imType, self._firstMask, self._secondMaskLow, self._secondMaskHigh, self._smallSize, self._largeSize, self._stdThreshold, False, True)
data.append(controlData[1])
# Decide what to do if PATH does not exist
else:
check = input('The PATH "' + controlPath + '" does not exist. Do you want to continue? If yes, type Y. If no, type N:')
if check == 'Y' or check == 'y':
print('PATH ignored.')
elif check == 'N' or check == 'n':
raise ValueError('Program stopped. Change PATH and try again.')
else:
doubleCheck = input('Did you mean to put N?:')
if doubleCheck == 'Y' or doubleCheck == 'y':
raise ValueError('Program stopped. Change PATH and try again.')
else:
print('PATH ignored.')
np_data = np.array(data)
print('')
print('||| Control created using', self._control[0] + '_' + self._control[1] + '_' + self._control[2], '|||')
return np.around(np.mean(np_data),2)
# analyzes the data images in a processive manner
def analyze(self, control):
fin_data = []
colName = []
colMean = []
colMedian = []
colStd = []
colFolder = []
colVector = []
colStrain = []
colPlate = []
colRepNum = []
colData = []
colRatio = []
for folder in range(len(self._imFolders)):
imFolder = self._imFolders[folder]
for vector in range(len(self._imVectors)):
imVector = self._imVectors[vector]
for strain in range(len(self._imStrains)):
imStrain = self._imStrains[strain]
for plate in range(len(self._imPlates)):
imPlate = self._imPlates[plate]
for repNum in range(len(self._imRepNums)):
imRepNum = self._imRepNums[repNum]
# Check if the PATH exists
dataPath = imFolder + '/' + imVector + '_' + imStrain + '_' + imPlate + '_' + imRepNum + self._imType
if path.exists(dataPath):
# Analyze data if the PATH exists
initial_data = cm.measure(imFolder, imVector, imStrain, imPlate, imRepNum, self._imType, self._firstMask, self._secondMaskLow, self._secondMaskHigh, self._smallSize, self._largeSize, self._stdThreshold, False, True)
ratio = np.around(initial_data[1]/control,3)
initial_data.append(ratio)
fin_data.append(initial_data)
# Decide what to do if PATH does not exist
else:
check = input('The PATH "' + dataPath + '" does not exist. Do you want to continue? If yes, type Y. If no, type N:')
if check == 'Y' or check == 'y':
print('PATH ignored.')
elif check == 'N' or check == 'n':
raise ValueError('Program stopped. Change PATH and try again.')
else:
doubleCheck = input('Did you mean to put N?:')
if doubleCheck == 'Y' or doubleCheck == 'y':
raise ValueError('Program stopped. Change PATH and try again.')
else:
print('PATH ignored.')
for l in fin_data:
colName.append(l[0])
colMean.append(l[1])
colMedian.append(l[2])
colStd.append(l[3])
colFolder.append(l[4])
colVector.append(l[5])
colStrain.append(l[6])
colPlate.append(l[7])
colRepNum.append(l[8])
colData.append(l[9])
colRatio.append(l[10])
all_data = [colName, colMean, colMedian, colStd, colFolder, colVector, colStrain, colPlate, colRepNum, colData, colRatio]
return all_data
# makes and returns the data as an excel sheet. Can also combine data if you choose
def makeData(exportNameSum, exportNameRaw, listRawData):
# combines data if there is more than one dataset
if len(listRawData) > 1:
rawData = [[],[],[],[],[],[],[],[],[],[],[]]
for data in listRawData:
for index in range(len(rawData)):
rawData[index] = rawData[index] + data[index]
else:
rawData = listRawData[0]
# Make the dataframe of summary data
dicSum = {'imName': rawData[0],
'ratio': rawData[10],
'mean': rawData[1],
'median': rawData[2],
'standardDeviation': rawData[3],
'folder': rawData[4],
'vector': rawData[5],
'strain': rawData[6],
'plate': rawData[7],
'repetitionNumber': rawData[8],
'rawData': rawData[9]}
finalDataSum = pd.DataFrame(dicSum)
colsSum = ['imName', 'ratio', 'mean', 'median', 'standardDeviation', 'folder', 'vector', 'strain', 'plate', 'repetitionNumber', 'rawData']
finalDataSum = finalDataSum[colsSum]
print('Summary Data')
print(finalDataSum.iloc[:, 0:5])
# folders where raw data(size of every individual colony) will be stored
imNameRaw = []
measRaw = []
amountRaw = []
folderRaw = []
# creates the raw data
for data in range(len(rawData[9])):
for value in rawData[9][data]:
imNameRaw.append(rawData[0][data])
measRaw.append(value)
amountRaw.append(len(rawData[9][data]))
folderRaw.append(rawData[4][data])
dicRaw = {'imName': imNameRaw,
'area': measRaw,
'dataPointNum': amountRaw,
'folder': folderRaw}
finalDataRaw = pd.DataFrame(dicRaw)
colsRaw = ['imName', 'area', 'dataPointNum', 'folder']
finalDataRaw = finalDataRaw[colsRaw]
# Write the data to the excel sheet
excelFileSum = exportNameSum + '.xlsx'
excelFileRaw = exportNameRaw + '.xlsx'
finalDataSum.to_excel(excelFileSum)
finalDataRaw.to_excel(excelFileRaw)
print('')
print('Check folder to see new ' + exportNameSum + ' and ' + exportNameRaw + ' file.')
def main():
# Input info here
Folders = []
imVectors = []
imStrains = []
imPlates = []
imRepNums = []
imType = ''
control = []
col = analyzer(Folders, imVectors, imStrains, imPlates, imRepNums, imType, 190, 50, 185, 2, 235, 1.5, control)
control_size = col.getControl()
data = col.analyze(control_size)
makeData('', '', [data])
if __name__ == '__main__': main()
|
gwmarrah/colony-measurer
|
colSizeAnalyzer.py
|
colSizeAnalyzer.py
|
py
| 10,361 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "imageio.imread",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "colSizeMeasurer.measure",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "colSizeMeasurer.measure",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 206,
"usage_type": "call"
}
] |
23699044088
|
from datetime import date, timedelta
from bs4 import BeautifulSoup
import pandas as pd
import requests
import os
import warnings
warnings.filterwarnings('ignore')
def is_workday(day: date) -> bool:
"""Функция определяет рабочий день или выходной согласно рабочему календарю.
True - рабочий
False - выходной"""
res = requests.get(f"https://isdayoff.ru/{day.strftime('%Y%m%d')}")
return not bool(int(res.text))
def get_rate_df(start_date: date, end_date: date) -> (pd.DataFrame, int):
"""Функция для формирования датафрейма со ставками ЦБ в указанном временном диапазоне
"""
# Выполняем запрос на сайт ЦБ и вытягиваем данные о ключевых ставках в указанный период
url = f"https://www.cbr.ru/hd_base/KeyRate/?UniDbQuery.Posted=True&UniDbQuery.From={start_date.strftime('%d.%m.%Y')}&UniDbQuery.To={end_date.strftime('%d.%m.%Y')}"
full_page = requests.get(url)
soup = BeautifulSoup(full_page.content, 'html.parser')
res = soup.find_all("td")
date_list = []
rate_list = []
[rate_list.append(float(res[i].text.replace(',', '.'))) if i % 2 != 0
else date_list.append(res[i].text) for i in range(len(res))]
# Для удобства работы формируем датафрейм
df = pd.DataFrame()
df['date'] = date_list
df['rate'] = rate_list
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
# Данные с сайта ЦБ имеют пропуски в выходные дни. Нам необходимо добавить пропущенные даты, а пустые ячейки
# со ставками заполняем последним актуальным значением
df_date = pd.DataFrame(pd.date_range(start=df['date'].min(), end=df['date'].max()), columns=['date'])
comm_df = pd.merge(df_date, df, on='date', how='left')
comm_df['rate'] = comm_df['rate'].ffill()
comm_df['is_work_day'] = comm_df['date'].map(is_workday)
return comm_df, full_page.status_code
def rate_bd_update(first_date: date, last_date: date) -> int or None:
"""Функция для обновления базы ставок ЦБ, если запрошенный диапазон дат отсутствует"""
status_code = None
# Если файла с базой ставок нет, то берем весь диапазон и результат записываем в файл
if not os.path.exists('tables'):
os.mkdir('tables')
if not os.path.exists('tables/rate_db.csv'):
df, status_code = get_rate_df(start_date=first_date, end_date=last_date)
df.to_csv('tables/rate_db.csv', index=False)
return status_code
# Если файла с базой ставок - есть, подгружаем только необходимый диапазон
df_rate = pd.read_csv('tables/rate_db.csv', parse_dates=['date'])
max_date = df_rate['date'].max()
min_date = df_rate['date'].min()
if first_date < min_date:
df, status_code = get_rate_df(start_date=first_date, end_date=min_date - timedelta(days=1))
df_rate = pd.concat((df_rate, df), axis=0, ignore_index=True)
df_rate = df_rate.sort_values('date')
df_rate = df_rate.reset_index(drop=True)
df_rate.to_csv('tables/rate_db.csv', index=False)
if last_date > max_date:
df, status_code = get_rate_df(start_date=max_date + timedelta(days=1), end_date=last_date)
df_rate = pd.concat((df_rate, df), axis=0, ignore_index=True)
df_rate = df_rate.sort_values('date')
df_rate = df_rate.reset_index(drop=True)
df_rate.to_csv('tables/rate_db.csv', index=False)
return status_code
def calc_pay_before_day(sale_date: date, days_for_pay: str):
"""
Функция - агрегатор. Определяет тип дней по договору(рабочие/календарные) и вызывает соответсвующую функцию.
:param sale_date: дата продажи или оказания услуги.
:param days_for_pay: количество и тип дней в виде строки (Например: 20 календарных).
:return:
- дату последнего дня отсрочки
или
- строку 'Дата не определена' в случае некорректных входных данных
"""
count_days, type_days = days_for_pay.strip().split()
if type_days == 'рабочих':
return pay_before_for_workdays(sale_date=sale_date, count_days=int(count_days))
elif type_days == 'календарных':
return pay_before_for_cal_days(sale_date=sale_date, count_days=int(count_days))
else:
return 'Дата не определена'
def pay_before_for_cal_days(sale_date: date, count_days: int) -> date:
"""
Функция расчета последнего дня отсрочки платежа с учетом календарных дней.
:param sale_date: дата продажи или оказания услуги.
:param count_days: количество дней по договору.
:return: дата последнего дня отсрочки.
"""
rate_df = pd.read_csv('tables/rate_db.csv', parse_dates=['date'])
temp_df = rate_df[rate_df['date'] > sale_date].reset_index(drop=True)
day_index = count_days - 1
while not temp_df['is_work_day'][day_index]:
day_index += 1
return temp_df['date'][day_index]
def pay_before_for_workdays(sale_date: date, count_days: int) -> date:
"""
Функция расчета последнего дня отсрочки платежа с учетом только рабочих дней.
:param sale_date: дата продажи или оказания услуги.
:param count_days: количество дней по договору.
:return: дата последнего дня отсрочки.
"""
rate_df = pd.read_csv('tables/rate_db.csv', parse_dates=['date'])
return rate_df[(rate_df['date'] > sale_date) & (rate_df['is_work_day'])].reset_index(drop=True)['date'][count_days - 1]
def is_leap(date: pd.Timestamp) -> int:
year = date.year
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
return 366
else:
return 365
def calc_penalty(row):
return round((row['sum'] * (row['rate']/100) * row['delay_period']) / row['day_in_year'], 2)
def date2str(date):
return date.strftime('%d.%m.%Y')
def bild_and_save_final(df: pd.DataFrame, name: str):
"""Функция выполнят преобразование итогового датафрейма для получения формата в соответствии
с требованиями заказчика"""
name = name.split('.')[0]
final_col = ['document', 'sum', 'sale_date', 'pay_before', 'payment_date', 'delay_period', 'rate', 'penalty']
col_with_dubl = ['document', 'sum', 'sale_date', 'pay_before', 'payment_date']
# Отбираем только необходимые колонки
final_df = df.copy()[final_col]
# Переводим формат даты в строку
for col in ['sale_date', 'pay_before', 'payment_date']:
final_df[col] = final_df[col].map(date2str)
# Меняем дубликаты на пустые ячейки
final_df[col_with_dubl] = final_df[col_with_dubl].mask(final_df[col_with_dubl].duplicated(), "")
final_df = final_df.reset_index().rename(columns={'index': 'num_row'})
final_df.loc[len(final_df)] = ['', '', 'Итого:', '', '', '', '', '', final_df['penalty'].sum()]
final_df = final_df.rename(columns={'num_row': '№ строки',
'document': 'док-ты о реализации(акт, накладная, УПД)',
'sum': 'Сумма долга',
'sale_date': 'Дата реализации',
'pay_before': 'Оплатить до',
'payment_date': 'Дата оплаты',
'delay_period': 'Срок просрочки',
'rate': 'Ставка ЦБ',
'penalty': 'Неустойка'})
final_df.to_excel(f'tables/{name}_result.xlsx', index=False)
return os.path.abspath(f'tables/{name}_result.xlsx')
|
garick161/penalty_calculator
|
functions.py
|
functions.py
|
py
| 8,809 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.year",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "datetime.date.strftime",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 168,
"usage_type": "attribute"
}
] |
70281340028
|
import argparse
from pathlib import Path
import pdb
from bert_score import BERTScorer
import numpy as np
from util import read_test_data, read_generations, match_data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pred-path", type=str, required=True)
parser.add_argument("--ann-path", type=str, required=True)
args = parser.parse_args()
questions, annotations = read_test_data(Path(args.ann_path))
pred_data = read_generations(Path(args.pred_path))
pairs = match_data(questions, annotations, pred_data, tokenize=False)
scorer = BERTScorer(lang='en', device='cuda:0')
refs, preds = zip(*pairs)
bs = scorer.score(refs, preds)
scores = []
for batch in bs:
for score in batch:
scores.append(score)
avg_score = np.mean(scores)
print(f"prediction file: {args.pred_path}")
print(f"BERT Score: {avg_score:.2f}")
|
esteng/ambiguous_vqa
|
models/eval/my_bert_score.py
|
my_bert_score.py
|
py
| 932 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "util.read_test_data",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "util.read_generations",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "util.match_data",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bert_score.BERTScorer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 30,
"usage_type": "call"
}
] |
8967390830
|
#!/opt/anaconda3/envs/PECANS-env/bin/python
import argparse
import os
import numpy as np
from pecans.ensembles.api import EnsembleRunner
from pecans.utilities.config import load_config_file
_mydir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
config_file = os.path.join(_mydir, 'pecans_config.cfg')
def name_first_order_output_files(index, **config_opts):
lifetime_hours = config_opts['CHEMISTRY/mechanism_opts/lifetime_seconds'] / 3600
emissions_width_km = config_opts['EMISSIONS/emission_opts/width_x'] / 1000
return 'pecans_ens_tau-{}h_emwidth-{}km'.format(lifetime_hours, emissions_width_km)
def name_first_order_winds_output_files(index, **config_opts):
winds = config_opts['TRANSPORT/wind_speeds/x']
return 'pecans_ens_windspeed_{}m_s'.format(winds)
def name_two_phases_first_order_output_files(index, **config_opts):
first_lifetime_hours = config_opts['CHEMISTRY/mechanism_opts/first_lifetime_seconds'] / 3600
second_lifetime_horus = config_opts['CHEMISTRY/mechanism_opts/second_lifetime_seconds'] / 3600
first_phase_width = config_opts['CHEMISTRY/mechanism_opts/first_phase_width'] / 1000
emissions_width_km = config_opts['EMISSIONS/emission_opts/width_x'] / 1000
return 'pecans_ens_first_tau-{}h_second_tau-{}h_fpwidth-{}km_emwidth-{}km'.format(first_lifetime_hours,
second_lifetime_horus,
first_phase_width,
emissions_width_km)
def sims_first_order_run_winds():
# We want lifetimes that vary from 1-9 hours. This covers about the most extreme values we'd expect for summer NOx
# lifetime
winds= np.arange(3, 11, 1)
ens = EnsembleRunner(config_file,
ensemble_variables={'TRANSPORT/wind_speeds/x': winds},
ensemble_mode='combinations',
save_in_individual_dirs=False,
save_final_output_only=True,
member_naming_fxn=name_first_order_winds_output_files,
root_output_dir=os.path.join(_mydir, '../../MATLAB/PAN_Data', 'Workspaces', 'PECANS',
'lifetime-ensemble'))
ens.run()
def sims_first_order_run():
# We want lifetimes that vary from 1-9 hours. This covers about the most extreme values we'd expect for summer NOx
# lifetime
taus = np.arange(3600, 9*3600+1, 3600)
# We also want to test what happens when emissions widths are similar or greater than lifetimes. So we'll calculate
# emissions widths equal to each expected lifetime
config = load_config_file(config_file)
winds = config.get('TRANSPORT', 'wind_speeds')
x_wind = winds['x']
widths = taus * x_wind
widths = np.concatenate(([3000], widths)) # add a smaller width as an extra test
ens = EnsembleRunner(config_file,
ensemble_variables={'CHEMISTRY/mechanism_opts/lifetime_seconds': taus,
'EMISSIONS/emission_opts/width_x': widths},
ensemble_mode='combinations',
save_in_individual_dirs=False,
save_final_output_only=True,
member_naming_fxn=name_first_order_output_files,
root_output_dir=os.path.join(_mydir, '../../MATLAB/PAN_Data', 'Workspaces', 'PECANS',
'lifetime-ensemble'))
ens.run()
def sims_two_phases_first_order_run():
first_tau = np.arange(3600, 9*3600, 3600)
second_tau = np.arange(3600, 9*3600, 3600)
first_phase_width = np.arange(20*1000, 100*1000, 10*1000)
config = load_config_file(config_file)
winds = config.get('TRANSPORT', 'wind_speeds')
x_wind = winds['x']
widths = first_tau * x_wind
widths = np.concatenate(([3000], widths)) # add a smaller width as an extra test
#widths = [20000, 30000]
ens = EnsembleRunner(config_file,
ensemble_variables={'CHEMISTRY/mechanism_opts/first_lifetime_seconds': first_tau,
'CHEMISTRY/mechanism_opts/second_lifetime_seconds': second_tau,
'CHEMISTRY/mechanism_opts/first_phase_width': first_phase_width,
'EMISSIONS/emission_opts/width_x': widths},
ensemble_mode='combinations',
save_in_individual_dirs=False,
save_final_output_only=True,
member_naming_fxn=name_two_phases_first_order_output_files,
root_output_dir=os.path.join(_mydir, '../../MATLAB/PAN_Data', 'Workspaces', 'PECANS',
'lifetime-ensemble-twophases'))
ens.run()
def main():
parser = argparse.ArgumentParser(description='Choose one of the chemical solvers')
parser.add_argument('solver', type=str, help='What the chemical solver is. Default is "first_order"')
args = parser.parse_args()
if args.solver == 'first_order':
sims_first_order_run_winds()
elif args.solver == 'two_phases_first_order':
sims_two_phases_first_order_run()
else:
print("The chemical solver is not implemented.")
quit()
if __name__ == '__main__':
main()
|
ChiLi90/PECANS-PMOx
|
run_pecans_sims.py
|
run_pecans_sims.py
|
py
| 5,605 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pecans.ensembles.api.EnsembleRunner",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pecans.utilities.config.load_config_file",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pecans.ensembles.api.EnsembleRunner",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pecans.utilities.config.load_config_file",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pecans.ensembles.api.EnsembleRunner",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 102,
"usage_type": "call"
}
] |
30162915076
|
from django import forms
from .models import publishing
class CreateAdForm(forms.ModelForm):
class Meta:
model = publishing
fields = (
'title',
'type',
'brand',
'model',
'category',
'year',
'transmission',
'milage',
'fuel',
'engine',
'image1',
'image2',
'image3',
'image4',
'image5',
'description',
'condition',
'price',
'tel',
'city',
'address',
)
|
DenukaSandeepa/Avehiz-Project
|
publishing/forms.py
|
forms.py
|
py
| 556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "models.publishing",
"line_number": 6,
"usage_type": "name"
}
] |
8731736072
|
import numpy as np
import matplotlib.pyplot as plt
a = np.array([1, 2, 3])
print(a)
plt.plot([1, 2, 3], [2, 4, 6])
plt.show()
for num in [1, 2, 3, 4]:
print(num)
def sqaure(x):
return x**2
|
SMaC-3/GitProject_test
|
hello.py
|
hello.py
|
py
| 204 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
}
] |
17748417007
|
from django.test import TestCase
from .forms import MakeBooking
# Create a test class for the MakeBooking form
class MakeBookingFormTest(TestCase):
# Test when the form is valid
def test_make_booking_form_is_valid(self):
form = MakeBooking(data={
'date': '2022-10-25',
'time': '14:00',
'party_of': 4
})
self.assertTrue(form.is_valid()) # Check that the form is valid
# Test when the form has no data (empty form)
def test_make_booking_form_no_data(self):
form = MakeBooking(data={}) # Create an empty form
self.assertFalse(form.is_valid()) # Check that the form is invalid
# Check that it has three errors
self.assertEquals(len(form.errors), 3)
# Test when the form has invalid 'party_of' data (party size is 0)
def test_make_booking_form_invalid_party_of_data(self):
form = MakeBooking(data={
'date': '2022-10-25',
'time': '14:00',
'party_of': 0
})
self.assertFalse(form.is_valid()) # Check that the form is invalid
# Check that 'party_of' is in the list of errors
self.assertIn('party_of', form.errors)
|
JustinFourie1993/tables
|
website/test_forms.py
|
test_forms.py
|
py
| 1,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "forms.MakeBooking",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "forms.MakeBooking",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "forms.MakeBooking",
"line_number": 27,
"usage_type": "call"
}
] |
38473764222
|
from pathlib import Path
import pickle
import pandas as pd
import numpy as np
import json
import torch
import random
import torch
from util.tasksim_args import TaskSimArgs
BASE_RESULTS_PATH = './results'
def get_full_results_dir(args: TaskSimArgs):
run_id = args.get_run_id()
if args.results_dir == None:
results_dir = Path(run_id)
else:
results_dir = Path(args.results_dir) / run_id
return Path(BASE_RESULTS_PATH) / results_dir
def get_model_state_dict(args, task_id):
results_dir = get_full_results_dir(args)
path = results_dir / f'fe_ckpt_task_{task_id}.pt'
path = Path(str(path).replace('nmc', 'linear'))
if path.exists():
return torch.load(path)
else:
return None
def save_model(args, state_dict, task_id):
results_dir = get_full_results_dir(args)
if not results_dir.exists():
results_dir.mkdir(parents=True)
torch.save(state_dict, results_dir / f'fe_ckpt_task_{task_id}.pt')
def save_results(args: TaskSimArgs, results, embeddings):
results_dir = get_full_results_dir(args)
if not results_dir.exists():
results_dir.mkdir(parents=True)
with open(results_dir / 'config.txt', 'w') as config:
json.dump(vars(args), config, indent=2)
if results is not None:
results.to_csv(results_dir / 'results.csv', float_format='%.5f')
if args.save_embeddings and embeddings is not None and len(embeddings) > 0:
torch.save(embeddings, results_dir / 'embeddings.pt')
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
|
salemohamedo/tasksim
|
util/utils.py
|
utils.py
|
py
| 1,757 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "util.tasksim_args.TaskSimArgs",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "util.tasksim_args.TaskSimArgs",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 57,
"usage_type": "call"
}
] |
5693104002
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
city = input("City name: ")
key = 'http://api.openweathermap.org/data/2.5/weather?q={}&appid=2e535070ac9219e3c58f19ac7227c197&q='.format(city)
res = requests.get(key)
data = res.json()
print(res)
print(data)
# In[ ]:
|
tkeady/Software-Engineering
|
weather api.py
|
weather api.py
|
py
| 293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
}
] |
5356434475
|
import numpy as np
from scipy.spatial import distance_matrix
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
def knn_dist(d_matrix, k):
D_knn = np.zeros(d_matrix.shape)
#get the indices of the k lowest values and set these indices in D_knn to the same values as in D
#the rest stays 0
for i, row in enumerate(d_matrix):
index = row.argsort()[:k]
D_knn[i][index] = row[index]
return D_knn
# create 0,1 graph from k_nearest neighbour matrix
def create_graph(knn_matrix):
graph = knn_matrix > 0
graph = graph*1
return graph
#compute the k_nearest neighbour matrix with the new connections
def tuned_knn(d_matrix, n_components, labels, knn_d):
#get individual combinations
comb = [(i,j) for i in range(n_components) for j in range(i,n_components) if i != j]
tuned_knn = np.copy(knn_d)
dist = []
for c in comb:
dist.append(component_dist(labels, d_matrix, c[0], c[1]))
dist = sorted(dist, key=lambda x: x[0])
for i in range(n_components-1):
l,j = dist[i][1]
tuned_knn[l,j] = dist[i][0]
return tuned_knn
#calculate the shortest distance between the components c1 and c2
def component_dist(labels, d_matrix, c1, c2):
l1 = [i for i,j in enumerate(labels) if j==c1]
l2 = [i for i,j in enumerate(labels) if j==c2]
n,n = d_matrix.shape
temp_d = d_matrix + np.eye(n)*10**20 #avoid that the diagonal is measured as shortest distance
dist = 100000
lab = 0
for i in l1:
temp_dist = min(temp_d[i][l2])
ind = np.argmin(temp_d[i][l2])
if temp_dist < dist:
dist = temp_dist
lab = [i,l2[ind]]
return dist, lab
#check for components in the given graph according to the k_nearest neighbour matrix
def check_components(knn_matrix):
graph = create_graph(knn_matrix)
graph = csr_matrix(graph)
n_components, labels = connected_components(csgraph=graph, directed=False, return_labels=True)
return n_components, labels
|
Tobi-r9/assignment1
|
isomap.py
|
isomap.py
|
py
| 2,055 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csgraph.connected_components",
"line_number": 59,
"usage_type": "call"
}
] |
16543455717
|
import ast
import fnmatch
import os
from nuitka.__past__ import iter_modules
from nuitka.importing.Importing import locateModule
from nuitka.importing.Recursion import decideRecursion
from nuitka.plugins.PluginBase import NuitkaPluginBase
from nuitka.utils.ModuleNames import ModuleName
from nuitka.utils.Utils import isMacOS, isWin32Windows
from nuitka.utils.Yaml import getYamlPackageConfiguration
class NuitkaPluginImplicitImports(NuitkaPluginBase):
plugin_name = "implicit-imports"
plugin_desc = (
"Provide implicit imports of package as per package configuration files."
)
def __init__(self):
self.config = getYamlPackageConfiguration()
self.lazy_loader_usages = {}
@staticmethod
def isAlwaysEnabled():
return True
def _resolveModulePattern(self, pattern):
parts = pattern.split(".")
current = None
for count, part in enumerate(parts):
if not part:
self.sysexit(
"Error, invalid pattern with empty parts used '%s'." % pattern
)
# TODO: Checking for shell pattern should be done in more places and shared code.
if "?" in part or "*" in part or "[" in part:
if current is None:
self.sysexit(
"Error, cannot use pattern for first part '%s'." % pattern
)
module_filename = self.locateModule(
module_name=ModuleName(current),
)
for sub_module in iter_modules([module_filename]):
if not fnmatch.fnmatch(sub_module.name, part):
continue
if count == len(parts) - 1:
yield current.getChildNamed(sub_module.name)
else:
child_name = current.getChildNamed(sub_module.name).asString()
for value in self._resolveModulePattern(
child_name + "." + ".".join(parts[count + 1 :])
):
yield value
return
else:
if current is None:
current = ModuleName(part)
else:
current = current.getChildNamed(part)
yield current
def _handleImplicitImportsConfig(self, module, config):
full_name = module.getFullName()
for dependency in config.get("depends", ()):
if dependency.startswith("."):
if (
module.isUncompiledPythonPackage()
or module.isCompiledPythonPackage()
):
dependency = full_name.getChildNamed(dependency[1:]).asString()
elif full_name.getPackageName() is None:
# Not a package, potentially a naming conflict, when
# compiling with "--module" something that matches a PyPI
# name.
continue
else:
dependency = full_name.getSiblingNamed(dependency[1:]).asString()
if "*" in dependency or "?" in dependency:
for resolved in self._resolveModulePattern(dependency):
yield resolved
else:
yield dependency
def _getImportsByFullname(self, module, full_name):
"""Provides names of modules to imported implicitly."""
# Many variables, branches, due to the many cases, pylint: disable=too-many-branches,too-many-statements
# Checking for config, but also allowing fall through.
for entry in self.config.get(full_name, section="implicit-imports"):
if self.evaluateCondition(
full_name=full_name, condition=entry.get("when", "True")
):
for dependency in self._handleImplicitImportsConfig(
config=entry, module=module
):
yield dependency
# Support for both pycryotodome (module name Crypto) and pycyptodomex (module name Cryptodome)
if full_name.hasOneOfNamespaces("Crypto", "Cryptodome"):
crypto_module_name = full_name.getTopLevelPackageName()
if full_name == crypto_module_name + ".Cipher._mode_ofb":
yield crypto_module_name + ".Cipher._raw_ofb"
elif full_name == crypto_module_name + ".Cipher.CAST":
yield crypto_module_name + ".Cipher._raw_cast"
elif full_name == crypto_module_name + ".Cipher.DES3":
yield crypto_module_name + ".Cipher._raw_des3"
elif full_name == crypto_module_name + ".Cipher.DES":
yield crypto_module_name + ".Cipher._raw_des"
elif full_name == crypto_module_name + ".Cipher._mode_ecb":
yield crypto_module_name + ".Cipher._raw_ecb"
elif full_name == crypto_module_name + ".Cipher.AES":
yield crypto_module_name + ".Cipher._raw_aes"
yield crypto_module_name + ".Cipher._raw_aesni"
yield crypto_module_name + ".Util._cpuid"
elif full_name == crypto_module_name + ".Cipher._mode_cfb":
yield crypto_module_name + ".Cipher._raw_cfb"
elif full_name == crypto_module_name + ".Cipher.ARC2":
yield crypto_module_name + ".Cipher._raw_arc2"
elif full_name == crypto_module_name + ".Cipher.DES3":
yield crypto_module_name + ".Cipher._raw_des3"
elif full_name == crypto_module_name + ".Cipher._mode_ocb":
yield crypto_module_name + ".Cipher._raw_ocb"
elif full_name == crypto_module_name + ".Cipher._EKSBlowfish":
yield crypto_module_name + ".Cipher._raw_eksblowfish"
elif full_name == crypto_module_name + ".Cipher.Blowfish":
yield crypto_module_name + ".Cipher._raw_blowfish"
elif full_name == crypto_module_name + ".Cipher._mode_ctr":
yield crypto_module_name + ".Cipher._raw_ctr"
elif full_name == crypto_module_name + ".Cipher._mode_cbc":
yield crypto_module_name + ".Cipher._raw_cbc"
elif full_name == crypto_module_name + ".Util.strxor":
yield crypto_module_name + ".Util._strxor"
elif full_name == crypto_module_name + ".Util._cpu_features":
yield crypto_module_name + ".Util._cpuid_c"
elif full_name == crypto_module_name + ".Hash.BLAKE2s":
yield crypto_module_name + ".Hash._BLAKE2s"
elif full_name == crypto_module_name + ".Hash.BLAKE2b":
yield crypto_module_name + ".Hash._BLAKE2b"
elif full_name == crypto_module_name + ".Hash.SHA1":
yield crypto_module_name + ".Hash._SHA1"
elif full_name == crypto_module_name + ".Hash.SHA224":
yield crypto_module_name + ".Hash._SHA224"
elif full_name == crypto_module_name + ".Hash.SHA256":
yield crypto_module_name + ".Hash._SHA256"
elif full_name == crypto_module_name + ".Hash.SHA384":
yield crypto_module_name + ".Hash._SHA384"
elif full_name == crypto_module_name + ".Hash.SHA512":
yield crypto_module_name + ".Hash._SHA512"
elif full_name == crypto_module_name + ".Hash.MD2":
yield crypto_module_name + ".Hash._MD2"
elif full_name == crypto_module_name + ".Hash.MD4":
yield crypto_module_name + ".Hash._MD4"
elif full_name == crypto_module_name + ".Hash.MD5":
yield crypto_module_name + ".Hash._MD5"
elif full_name == crypto_module_name + ".Hash.keccak":
yield crypto_module_name + ".Hash._keccak"
elif full_name == crypto_module_name + ".Hash.RIPEMD160":
yield crypto_module_name + ".Hash._RIPEMD160"
elif full_name == crypto_module_name + ".Hash.Poly1305":
yield crypto_module_name + ".Hash._poly1305"
elif full_name == crypto_module_name + ".Protocol.KDF":
yield crypto_module_name + ".Cipher._Salsa20"
yield crypto_module_name + ".Protocol._scrypt"
elif full_name == crypto_module_name + ".Cipher._mode_gcm":
yield crypto_module_name + ".Hash._ghash_clmul"
yield crypto_module_name + ".Hash._ghash_portable"
yield crypto_module_name + ".Util._galois"
elif full_name == crypto_module_name + ".Cipher.Salsa20":
yield crypto_module_name + ".Cipher._Salsa20"
elif full_name == crypto_module_name + ".Cipher.ChaCha20":
yield crypto_module_name + ".Cipher._chacha20"
elif full_name == crypto_module_name + ".PublicKey.ECC":
yield crypto_module_name + ".PublicKey._ec_ws"
yield crypto_module_name + ".PublicKey._ed25519"
yield crypto_module_name + ".PublicKey._ed448"
elif full_name == crypto_module_name + ".Cipher.ARC4":
yield crypto_module_name + ".Cipher._ARC4"
elif full_name == crypto_module_name + ".Cipher.PKCS1_v1_5":
yield crypto_module_name + ".Cipher._pkcs1_decode"
elif full_name == crypto_module_name + ".Math._IntegerCustom":
yield crypto_module_name + ".Math._modexp"
elif full_name in ("pynput.keyboard", "pynput.mouse"):
if isMacOS():
yield full_name.getChildNamed("_darwin")
elif isWin32Windows():
yield full_name.getChildNamed("_win32")
else:
yield full_name.getChildNamed("_xorg")
elif full_name == "cryptography":
yield "_cffi_backend"
elif full_name == "bcrypt._bcrypt":
yield "_cffi_backend"
def getImplicitImports(self, module):
full_name = module.getFullName()
# TODO: This code absolutely doesn't belong here.
# Read the .pyi file, and provide as implicit dependency.
if module.isPythonExtensionModule():
for used_module_name in module.getPyIModuleImportedNames():
yield used_module_name
if full_name == "pkg_resources.extern":
# TODO: A package specific lookup of compile time "pkg_resources.extern" could
# be done here, but this might be simpler to hardcode for now. Once we have
# the infrastructure to ask a module that after optimization, we should do
# that instead, as it will not use a separate process.
for part in (
"packaging",
"pyparsing",
"appdirs",
"jaraco",
"importlib_resources",
"more_itertools",
"six",
"platformdirs",
):
yield "pkg_resources._vendor." + part
for item in self._getImportsByFullname(module=module, full_name=full_name):
yield item
def _getPackageExtraScanPaths(self, package_dir, config):
for config_package_dir in config.get("package-dirs", ()):
yield os.path.normpath(os.path.join(package_dir, "..", config_package_dir))
yield package_dir
for config_package_name in config.get("package-paths", ()):
module_filename = self.locateModule(config_package_name)
if module_filename is not None:
if os.path.isfile(module_filename):
yield os.path.dirname(module_filename)
else:
yield module_filename
def getPackageExtraScanPaths(self, package_name, package_dir):
for entry in self.config.get(package_name, section="import-hacks"):
if self.evaluateCondition(
full_name=package_name, condition=entry.get("when", "True")
):
for item in self._getPackageExtraScanPaths(
package_dir=package_dir, config=entry
):
yield item
def _getModuleSysPathAdditions(self, module_name, config):
module_filename = self.locateModule(module_name)
if os.path.isfile(module_filename):
module_filename = yield os.path.dirname(module_filename)
for relative_path in config.get("global-sys-path", ()):
candidate = os.path.abspath(os.path.join(module_filename, relative_path))
if os.path.isdir(candidate):
yield candidate
def getModuleSysPathAdditions(self, module_name):
for entry in self.config.get(module_name, section="import-hacks"):
if self.evaluateCondition(
full_name=module_name, condition=entry.get("when", "True")
):
for item in self._getModuleSysPathAdditions(
module_name=module_name, config=entry
):
yield item
def onModuleSourceCode(self, module_name, source_filename, source_code):
if module_name == "numexpr.cpuinfo":
# We cannot intercept "is" tests, but need it to be "isinstance",
# so we patch it on the file. TODO: This is only temporary, in
# the future, we may use optimization that understands the right
# hand size of the "is" argument well enough to allow for our
# type too.
source_code = source_code.replace(
"type(attr) is types.MethodType", "isinstance(attr, types.MethodType)"
)
if module_name == "site":
if source_code.startswith("def ") or source_code.startswith("class "):
source_code = "\n" + source_code
source_code = """\
__file__ = (__nuitka_binary_dir + '%ssite.py') if '__nuitka_binary_dir' in dict(__builtins__ ) else '<frozen>';%s""" % (
os.path.sep,
source_code,
)
# Debian stretch site.py
source_code = source_code.replace(
"PREFIXES = [sys.prefix, sys.exec_prefix]", "PREFIXES = []"
)
# Source code should use lazy_loader, this may not be good enough
# for all things yet.
attach_call_replacements = (
(
"lazy.attach_stub(__name__, __file__)",
"lazy.attach('%(module_name)s', %(submodules)s, %(attrs)s)",
),
)
for attach_call, attach_call_replacement in attach_call_replacements:
if attach_call in source_code:
result = self._handleLazyLoad(
module_name=module_name,
source_filename=source_filename,
)
# Inline the values, to avoid the data files.
if result is not None:
source_code = source_code.replace(
attach_call,
attach_call_replacement
% {
"module_name": module_name.asString(),
"submodules": result[0],
"attrs": result[1],
},
)
if module_name == "huggingface_hub":
if (
"__getattr__, __dir__, __all__ = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS)"
in source_code
):
huggingface_hub_lazy_loader_info = (
self.queryRuntimeInformationSingle(
setup_codes="import huggingface_hub",
value="huggingface_hub._SUBMOD_ATTRS",
info_name="huggingface_hub_lazy_loader",
)
)
self.lazy_loader_usages[module_name] = (
[],
huggingface_hub_lazy_loader_info,
)
return source_code
def _handleLazyLoad(self, module_name, source_filename):
pyi_filename = source_filename + "i"
if os.path.exists(pyi_filename):
try:
import lazy_loader
except ImportError:
pass
else:
with open(pyi_filename, "rb") as f:
stub_node = ast.parse(f.read())
# We are using private code here, to avoid use duplicating,
# pylint: disable=protected-access
visitor = lazy_loader._StubVisitor()
visitor.visit(stub_node)
self.lazy_loader_usages[module_name] = (
visitor._submodules,
visitor._submod_attrs,
)
return self.lazy_loader_usages[module_name]
def createPreModuleLoadCode(self, module):
full_name = module.getFullName()
for entry in self.config.get(full_name, section="implicit-imports"):
if "pre-import-code" in entry:
if self.evaluateCondition(
full_name=full_name, condition=entry.get("when", "True")
):
code = "\n".join(entry.get("pre-import-code"))
# TODO: Add a description to the Yaml file.
yield code, "According to Yaml configuration."
def createPostModuleLoadCode(self, module):
full_name = module.getFullName()
for entry in self.config.get(full_name, section="implicit-imports"):
if "post-import-code" in entry:
if self.evaluateCondition(
full_name=full_name, condition=entry.get("when", "True")
):
code = "\n".join(entry.get("post-import-code"))
# TODO: Add a description to the Yaml file.
yield code, "According to Yaml configuration."
unworthy_namespaces = (
"setuptools", # Not performance relevant.
"distutils", # Not performance relevant.
"wheel", # Not performance relevant.
"pkg_resources", # Not performance relevant.
"pycparser", # Not performance relevant.
# "cffi", # Not performance relevant.
"numpy.distutils", # Largely unused, and a lot of modules.
"numpy.f2py", # Mostly unused, only numpy.distutils import it.
"numpy.testing", # Useless.
"nose", # Not performance relevant.
"coverage", # Not performance relevant.
"docutils", # Not performance relevant.
"pytest", # Not performance relevant.
"_pytest", # Not performance relevant.
"unittest", # Not performance relevant.
"pexpect", # Not performance relevant.
"Cython", # Mostly unused, and a lot of modules.
"cython",
"pyximport",
"IPython", # Mostly unused, and a lot of modules.
"wx._core", # Too large generated code
"pyVmomi.ServerObjects", # Too large generated code
"pyglet.gl", # Too large generated code
"telethon.tl.types", # Not performance relevant and slow C compile
"importlib_metadata", # Not performance relevant and slow C compile
"comtypes.gen", # Not performance relevant and slow C compile
"win32com.gen_py", # Not performance relevant and slow C compile
"phonenumbers.geodata", # Not performance relevant and slow C compile
"site", # Not performance relevant and problems with .pth files
"packaging", # Not performance relevant.
"appdirs", # Not performance relevant.
"dropbox.team_log", # Too large generated code
"asyncua.ua.object_ids", # Too large generated code
"asyncua.ua.uaerrors._auto", # Too large generated code
"asyncua.server.standard_address_space.standard_address_space_services", # Too large generated code
"azure.mgmt.network", # Too large generated code
"azure.mgmt.compute", # Too large generated code
"transformers.utils.dummy_pt_objects", # Not performance relevant.
"transformers.utils.dummy_flax_objects", # Not performance relevant.
"transformers.utils.dummy_tf_objects", # Not performance relevant.
)
def decideCompilation(self, module_name):
if module_name.hasOneOfNamespaces(self.unworthy_namespaces):
return "bytecode"
def onModuleUsageLookAhead(
self, module_name, module_filename, module_kind, get_module_source
):
# Getting the source code will also trigger our modification
# and potentially tell us if any lazy loading applies.
if get_module_source() is None:
return
if module_name in self.lazy_loader_usages:
from nuitka.HardImportRegistry import (
addModuleAttributeFactory,
addModuleDynamicHard,
addModuleTrust,
trust_module,
trust_node,
)
addModuleDynamicHard(module_name)
sub_module_names, sub_module_attr = self.lazy_loader_usages[module_name]
for sub_module_name in sub_module_names:
addModuleTrust(module_name, sub_module_name, trust_module)
sub_module_name = module_name.getChildNamed(sub_module_name)
addModuleDynamicHard(sub_module_name)
_lookAhead(using_module_name=module_name, module_name=sub_module_name)
for (
sub_module_name,
attribute_names,
) in sub_module_attr.items():
sub_module_name = module_name.getChildNamed(sub_module_name)
addModuleDynamicHard(sub_module_name)
_lookAhead(using_module_name=module_name, module_name=sub_module_name)
for attribute_name in attribute_names:
addModuleTrust(module_name, attribute_name, trust_node)
addModuleAttributeFactory(
module_name,
attribute_name,
makeExpressionImportModuleNameHardExistsAfterImportFactory(
sub_module_name=sub_module_name,
attribute_name=attribute_name,
),
)
def makeExpressionImportModuleNameHardExistsAfterImportFactory(
sub_module_name,
attribute_name,
):
from nuitka.HardImportRegistry import trust_node_factory
from nuitka.nodes.ImportHardNodes import (
ExpressionImportModuleNameHardExists,
)
key = (sub_module_name, attribute_name)
if key in trust_node_factory:
return lambda source_ref: trust_node_factory[key](source_ref=source_ref)
return lambda source_ref: ExpressionImportModuleNameHardExists(
module_name=sub_module_name,
import_name=attribute_name,
module_guaranteed=False,
source_ref=source_ref,
)
def _lookAhead(using_module_name, module_name):
(
_module_name,
package_filename,
package_module_kind,
finding,
) = locateModule(
module_name=module_name,
parent_package=None,
level=0,
)
assert module_name == _module_name
if finding != "not-found":
decideRecursion(
using_module_name=using_module_name,
module_filename=package_filename,
module_name=module_name,
module_kind=package_module_kind,
)
|
Nuitka/Nuitka
|
nuitka/plugins/standard/ImplicitImports.py
|
ImplicitImports.py
|
py
| 23,781 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
[
{
"api_name": "nuitka.plugins.PluginBase.NuitkaPluginBase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "nuitka.utils.Yaml.getYamlPackageConfiguration",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.ModuleNames.ModuleName",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "nuitka.__past__.iter_modules",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "fnmatch.fnmatch",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.ModuleNames.ModuleName",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.Utils.isMacOS",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.Utils.isWin32Windows",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "ast.parse",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "lazy_loader._StubVisitor",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleDynamicHard",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleTrust",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.trust_module",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleDynamicHard",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleDynamicHard",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleTrust",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.trust_node",
"line_number": 533,
"usage_type": "name"
},
{
"api_name": "nuitka.HardImportRegistry.addModuleAttributeFactory",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "nuitka.HardImportRegistry.trust_node_factory",
"line_number": 554,
"usage_type": "name"
},
{
"api_name": "nuitka.HardImportRegistry.trust_node_factory",
"line_number": 555,
"usage_type": "name"
},
{
"api_name": "nuitka.nodes.ImportHardNodes.ExpressionImportModuleNameHardExists",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "nuitka.importing.Importing.locateModule",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "nuitka.importing.Recursion.decideRecursion",
"line_number": 580,
"usage_type": "call"
}
] |
74920129787
|
from bs4 import BeautifulSoup
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
html = """
<html><body>
<ul>
<li><a href="http://www.naver.com">naver</a></li>
<li><a href="http://www.daun.net">daum</a></li>
<li><a href="http://www.daun.com">daum</a></li>
<li><a href="http://www.google.com">google</a></li>
<li><a href="http://www.tistory.com">tistory</a></li>
</ul>
</body></html>
"""
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all("a")
# print('links', type(links))
a = soup.find_all("a", string='daum')
# print('a', a)
b = soup.find_all("a", limit=3)
# print('b', b)
c = soup.find_all(string=["naver", "google"])
print(c)
for link in links:
# print('link', type(link), link)
href = link.attrs['href']
txt = link.string
# print('txt >> ', txt, 'href >> ', href)
|
lcy8417/Python
|
download2-5-3.py
|
download2-5-3.py
|
py
| 994 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.stdout",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdout.detach",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stderr.detach",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
}
] |
25175601159
|
import random
import json
import numpy as np
import torch
# Custom imports
import data
from model_eval import evaluate
class LSTM(torch.nn.Module):
def __init__(self, embedding: torch.FloatTensor):
super().__init__()
# Embedding wrapper
self.__embedding = torch.nn.Embedding.from_pretrained(
embedding, freeze=True, padding_idx=0)
# RNN layers
self.__rnn1 = torch.nn.LSTM(300, 150,
num_layers=2, batch_first=False)
self.__rnn2 = torch.nn.LSTM(300, 150,
num_layers=2, batch_first=False)
# FC layers
self.__fc1 = torch.nn.Linear(150, 150)
self.__fc2 = torch.nn.Linear(150, 1)
def all_params(self):
params = []
params.extend(self.__rnn1.parameters())
params.extend(self.__rnn2.parameters())
params.extend(self.__fc1.parameters())
params.extend(self.__fc2.parameters())
params.extend(self.__embedding.parameters())
return params
def forward(self, x):
x = self.__embedding(x)
x = torch.transpose(x, 0, 1)
# Consists of (h, c)
hidden = None
y, hidden = self.__rnn1(x, hidden)
y, hidden = self.__rnn2(x, hidden)
# Last output
y = y[-1]
# Linear layer
y = self.__fc1(y)
y = torch.relu(y)
return self.__fc2(y)
def predict(self, x):
with torch.no_grad():
y = torch.sigmoid(self.forward(x))
y = y.round().int().squeeze(-1)
return y
def train(model: torch.nn.Module, data,
optimizer, criterion):
# Set state for training
model.train()
# Go through batches
losses = list()
for batch_num, batch in enumerate(data):
model.zero_grad()
# Calculate loss
logits = model.forward(batch[0]).squeeze(-1)
y = batch[1].float()
loss = criterion(logits, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.all_params(), 0.25)
optimizer.step()
losses.append(float(loss))
# At the end of an epoch print loss
#print(f"loss = {np.mean(losses)}")
return np.mean(losses)
if __name__ == "__main__":
# Statistics
hyperparameters = dict()
hyperparameters["max_size"] = -1
hyperparameters["min_freq"] = 1
hyperparameters["train_batch_size"] = 10
hyperparameters["valid_batch_size"] = 32
hyperparameters["test_batch_size"] = 32
hyperparameters["learning_rate"] = 1e-4
statistics = dict()
statistics["hyperparameters"] = hyperparameters
# Frequencies
frequencies = data.getFrequencies(data.TRAIN_DATASET_PATH)
labelFrequencies = data.getLabelFrequencies(data.TRAIN_DATASET_PATH)
# Vocabs
x_vocab = data.Vocab(
frequencies, max_size=hyperparameters["max_size"], min_freq=hyperparameters["min_freq"])
y_vocab = data.Vocab(labelFrequencies, labels=True)
# Datasets
train_dataset = data.NLPDataset.from_file(data.TRAIN_DATASET_PATH)
valid_dataset = data.NLPDataset.from_file(data.VALID_DATASET_PATH)
test_dataset = data.NLPDataset.from_file(data.TEST_DATASET_PATH)
# Embedding matrix
embedding = data.generateEmbeddingMatrix(
x_vocab, data.VECTOR_REPR_PATH)
# Baseline model
lstm = LSTM(embedding)
optimizer = torch.optim.Adam(
lstm.all_params(), lr=hyperparameters["learning_rate"])
criterion = torch.nn.BCEWithLogitsLoss()
iters = 5
epochs = 5
for i in range(iters):
print(f"RUN {i+1}")
# Set seed
seed = random.randint(0, 7052020)
np.random.seed(seed)
torch.manual_seed(seed)
statistics[seed] = dict()
statistics[seed]["train_loss"] = None
statistics[seed]["valid"] = list()
for epoch in range(epochs):
print(f"Epoch {epoch+1}:")
dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=hyperparameters["train_batch_size"],
shuffle=True, collate_fn=data.pad_collate_fn)
print("\tTraining...")
train_loss = train(lstm, dataloader,
optimizer, criterion)
statistics[seed]["train_loss"] = train_loss
dataloader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=hyperparameters["valid_batch_size"],
shuffle=False, collate_fn=data.pad_collate_fn)
print("\tValidating...")
valid_evals = evaluate(lstm, dataloader, criterion)
statistics[seed]["valid"].append(valid_evals)
# Test dataset
dataloader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=32,
shuffle=False, collate_fn=data.pad_collate_fn)
print("Testing...")
test_evals = evaluate(lstm, dataloader, criterion)
statistics[seed]["test"] = test_evals
print("\nAll done.")
# Write to statistics file
with open("c:/workspace/fer-dl/lab03/stats/lstm_stats.json", "w") as stats_file:
stats_file.write(json.dumps(statistics))
|
ftodoric/fer-du
|
lab03/rnn.py
|
rnn.py
|
py
| 5,265 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Embedding.from_pretrained",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.transpose",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.relu",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "data.getFrequencies",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "data.TRAIN_DATASET_PATH",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "data.getLabelFrequencies",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "data.TRAIN_DATASET_PATH",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "data.Vocab",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "data.Vocab",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "data.NLPDataset.from_file",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "data.NLPDataset",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "data.TRAIN_DATASET_PATH",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "data.NLPDataset.from_file",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "data.NLPDataset",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "data.VALID_DATASET_PATH",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "data.NLPDataset.from_file",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "data.NLPDataset",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "data.TEST_DATASET_PATH",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "data.generateEmbeddingMatrix",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "data.VECTOR_REPR_PATH",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "data.pad_collate_fn",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "data.pad_collate_fn",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "model_eval.evaluate",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "data.pad_collate_fn",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "model_eval.evaluate",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 172,
"usage_type": "call"
}
] |
34487423583
|
import serial
import time
import struct
import logging as trace
import threading
class Communication(object):
data_chars = [b'!', b'"', b'#', b'$', b'%', b'&', b"'", b'(']
response_timeout = 2 #second
_handle = serial.Serial()
_error_counter = 0
_done = False
_thread = None
def __init__(self, address=None):
# Initialize class parameters
# perform port configuration at start-up
self._handle.port = "/dev/ttyUSB0"
self._handle.baudrate = 115200
self._handle.bytesize = serial.EIGHTBITS # number of bits per bytes
self._handle.parity = serial.PARITY_NONE # set parity check: no parity
self._handle.stopbits = serial.STOPBITS_ONE # number of stop bits
self._handle.timeout = 0.25 # non-block read
self._handle.writeTimeout = 0.25 # timeout for write
trace.debug('serial port configuration done')
# self._address = address
# Only one data stream per port
self.data = []
self.devices = {}
self.sync_data_ready = threading.Event()
self.async_data_ready = threading.Event()
self.bin_data_ready = threading.Event()
self._thread = threading.Thread(name='serial_thr', target= self._read_from_device)
def __del__(self):
self.disconnect()
def connect(self, port_name="/dev/ttyUSB0"):
"""
Connect device
:param port_name: Specify serial port name if different
than /dev/ttyUSB0
:return: True if connected, False if connection failed
"""
# if port is different than default use it
if self._handle.port != port_name:
self._handle.port = port_name
# start connecting
trace.debug("Trying to connect..")
try:
self._handle.open()
except Exception as e:
trace.error("error open serial port: " + str(e))
return False
if self._handle.isOpen():
trace.debug('serial port opened')
else:
trace.debug('serial port not opened')
return False
# flush buffers at start-up
try:
self._handle.flushInput()
self._handle.flushOutput()
except Exception as e:
trace.error("error flushing input " + str(e))
# at this point device should be connected
self._thread.start()
return True
def disconnect(self):
# mark job as done (this flag is for background thread)
self._done = True
# wait until background thread is done
# if it is still running
self._thread.join()
# close serial port
if self._handle.isOpen():
self._handle.close()
trace.debug('serial port closed')
def init_device(self, idn):
self.devices[idn] = {'sync': [], 'async': []}
def write_command(self, command, idn):
"""
Write command to device
:param command: self-explanatory
:return: None
"""
# add prefix and CR on the end
command = str(idn) + ":" + command + '\n'
trace.debug('writing command: ' + command)
self._handle.write(bytes(command, 'utf8'))
def write_command_ret(self, command, idn):
"""
Writes a command to device and waits for standard response
:param command: self-explanatory
:return: None
"""
self.sync_data_ready.clear()
self.write_command(command,idn)
self.sync_data_ready.wait(self.response_timeout)
if not bool(self.devices.get(idn).get('sync')):
resp=self.devices.get(idn).get('sync').pop()
trace.debug("Command: \""+str(command)+"\" successfully sent. Response: \""+str(resp)+"\"")
return resp
else:
trace.debug("No response for command: \"" + str(command) + "\"")
return None
def write_command_stdr(self, command, idn):
"""
Writes a command to device and waits for standard response
:param command: self-explanatory
:return: None
"""
self.sync_data_ready.clear()
self.write_command(command,idn)
self.sync_data_ready.wait(self.response_timeout)
if not bool(self.devices.get(idn).get('sync')):
resp=self.devices.get(idn).get('sync').pop()
if resp.rsplit()[0] == command.rsplit()[0]:
trace.debug("Command: \""+str(command)+"\" successfully sent. Response: \""+str(resp)+"\"")
else:
trace.error("Wrong response for command: \"" + str(command) + "\". Response: \"" + str(resp) + "\" , expected: \""+str(command.rsplit()[0]))
if len(resp.rsplit()) > 1:
return resp.rsplit()[1]
else:
return None
def decode_binvalue(self,seq):
# Data format is: CSHHHHH\r
# C - is a prefix that also serves as a 3 bit-long counter (starts with prefix0)
# S - status byte (6 bits: 1 + negated 5 bits representing input lines)
# HHHHH - is a 18-bit hex value (should be treated as value with sign)
# \r - terminating CR
# Extended format is: CSHHHHHhH\r
# 0 ascii 0/1 sin+- X X 0/1 shutter 0/1 shutter~ X X
# where CSH are as above and h is H (hex digit) with highest bit set
# this signals the fact that also fractional part is sent so the bit should
# be cleared, whole value treated as int and later divided by 256
flag_count=seq[0]-ord('!')
c = seq[1]
flag_al=bool(c & 0b01000000)
flag_dl=bool(c & 0b00001000)
c = seq[2]
value = (-1 if c >= ord('8') else 0) # test for sign bit (in hex digit)
shift = False
for c in list(seq)[3:]:
if (c & 0x80):
c &= 0x7F
shift = True
if c >= ord('0') and c <= ord('9'):
nibble = c - ord('0')
elif c >= ord('A') and c <= ord('F'):
nibble = c - (ord('A') - 10)
else:
break
value <<= 4
value |= nibble
return (float(value) / 256 if shift else float(value))* 6.25 / 65536,flag_count,flag_al,flag_dl
def read_line(self, line):
coms = line.split(b'\r')
for com in coms:
if com[0] >= ord('!') and com[0] <= ord('('):
value = self.decode_binvalue(com)
self.data.append(list(value))
self.bin_data_ready.set()
trace.debug('Data value:'+ str(value))
else:
idn, com_type, message = tuple(com.partition(b'.'))
# First char after the id number
if com_type == b'.':
com_type = 'sync'
else:
# if not, try other ordering character
idn, com_type, message = tuple(com.partition(b';'))
if com_type == b';':
com_type = 'async'
else:
trace.error('Major parsing fuckup, good luck')
return -1
idnn = int(idn)
if idnn not in self.devices.keys():
self.init_device(idnn)
message=message.decode('ascii') #convert bytes to string
self.devices[idnn][com_type].append(message)
if com_type == 'sync':
self.sync_data_ready.set()
elif com_type == 'async':
self.async_data_ready.set()
trace.debug('Device ID: %d Communication type: %s Message: %s', idnn, com_type, message)
def _read_from_device(self):
"""
Read from device. This function is executed in separate
thread. Function also updates necessary parameters for
this class
"""
self.rawdata = bytearray()
while not self._done:
# if incoming bytes are waiting to be
# read from the serial input buffer
if self._handle.inWaiting():
# read and remove all whitespaces
# on the right side, including '\n'
self.rawdata.extend( self._handle.read(self._handle.inWaiting()))
while True:
line,sep,rest=tuple(self.rawdata.partition(b'\r'))
if sep != b'\r':
break
trace.debug("new data to parse: " + str(line))
self.read_line(line.strip())
self.rawdata=rest
# sleep for a moment (pseudo-yield in python)
time.sleep(0.0001)
|
ccucumber/verdeta-lockin
|
fotonowy/komunikacja.py
|
komunikacja.py
|
py
| 8,765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "serial.EIGHTBITS",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "serial.STOPBITS_ONE",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 240,
"usage_type": "call"
}
] |
41854725692
|
from absl.testing import parameterized
import dataclasses
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import input_reader
from official.modeling import hyperparams
from official.vision.beta.dataloaders import tfds_detection_decoders
from official.vision.beta.projects.yolo.dataloaders import yolo_detection_input
@dataclasses.dataclass
class Parser(hyperparams.Config):
"""Dummy configuration for parser."""
output_size: int = (416, 416)
num_classes: int = 80
fixed_size: bool = True
jitter_im: float = 0.1
jitter_boxes: float = 0.005
min_process_size: int = 320
max_process_size: int = 608
max_num_instances: int = 200
random_flip: bool = True
seed: int = 10
shuffle_buffer_size: int = 10000
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
tfds_name: str = 'coco/2017'
tfds_split: str = 'train'
global_batch_size: int = 10
is_training: bool = True
dtype: str = 'float16'
decoder = None
parser: Parser = Parser()
shuffle_buffer_size: int = 10
class YoloDetectionInputTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('training', True), ('testing', False))
def test_yolo_input(self, is_training):
params = DataConfig(is_training=is_training)
decoder = tfds_detection_decoders.MSCOCODecoder()
anchors = [[12.0, 19.0], [31.0, 46.0], [96.0, 54.0], [46.0, 114.0],
[133.0, 127.0], [79.0, 225.0], [301.0, 150.0], [172.0, 286.0],
[348.0, 340.0]]
masks = {'3': [0, 1, 2], '4': [3, 4, 5], '5': [6, 7, 8]}
parser = yolo_detection_input.Parser(
output_size=params.parser.output_size,
num_classes=params.parser.num_classes,
fixed_size=params.parser.fixed_size,
jitter_im=params.parser.jitter_im,
jitter_boxes=params.parser.jitter_boxes,
min_process_size=params.parser.min_process_size,
max_process_size=params.parser.max_process_size,
max_num_instances=params.parser.max_num_instances,
random_flip=params.parser.random_flip,
seed=params.parser.seed,
anchors=anchors,
masks=masks)
postprocess_fn = parser.postprocess_fn(is_training=is_training)
reader = input_reader.InputReader(params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(
params.is_training))
dataset = reader.read(input_context=None).batch(10).take(1)
if postprocess_fn:
image, _ = postprocess_fn(
*tf.data.experimental.get_single_element(dataset))
else:
image, _ = tf.data.experimental.get_single_element(dataset)
print(image.shape)
self.assertAllEqual(image.shape, (10, 10, 416, 416, 3))
self.assertTrue(
tf.reduce_all(tf.math.logical_and(image >= 0, image <= 1)))
if __name__ == '__main__':
tf.test.main()
|
sek788432/Waymo-2D-Object-Detection
|
input/models/official/vision/beta/projects/yolo/dataloaders/yolo_detection_input_test.py
|
yolo_detection_input_test.py
|
py
| 3,074 |
python
|
en
|
code
| 79 |
github-code
|
6
|
[
{
"api_name": "official.modeling.hyperparams.Config",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "official.modeling.hyperparams",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "official.core.config_definitions.DataConfig",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "official.core.config_definitions",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.test",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "absl.testing.parameterized.TestCase",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "absl.testing.parameterized",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "official.vision.beta.dataloaders.tfds_detection_decoders.MSCOCODecoder",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "official.vision.beta.dataloaders.tfds_detection_decoders",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "official.vision.beta.projects.yolo.dataloaders.yolo_detection_input.Parser",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "official.vision.beta.projects.yolo.dataloaders.yolo_detection_input",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "official.core.input_reader.InputReader",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "official.core.input_reader",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "tensorflow.data",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.experimental.get_single_element",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.experimental.get_single_element",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_all",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.logical_and",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "absl.testing.parameterized.named_parameters",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "absl.testing.parameterized",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "tensorflow.test.main",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.test",
"line_number": 87,
"usage_type": "attribute"
}
] |
18194693461
|
from rest_framework.routers import DefaultRouter
from messaging import api_view
router = DefaultRouter()
router.register('message', api_view.MessageVewSet, base_name='message')
router.register('chat', api_view.GroupBlogViewSet, base_name='chat')
urlpatterns = [
]
urlpatterns += router.urls
|
SivakumarSkr/Movieclub
|
messaging/api_urls.py
|
api_urls.py
|
py
| 293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "messaging.api_view.MessageVewSet",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "messaging.api_view",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "messaging.api_view.GroupBlogViewSet",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "messaging.api_view",
"line_number": 7,
"usage_type": "name"
}
] |
3502830678
|
"""Retreive extracts wrt. previous lexicon update cycle.
Update extracts.json and output cycle extracts as extracts-{cycle}.txt for easy inspection
Example
$ python3 get_extracts.py 4 working_folder
$ python3 get_extracts.py 4 _aroma_NOUN+ADJ
Args:
n (int): number of (CPU Threads) processes to use
working_folder:
Required in working folder:
extracts.json: {"cycle":[([seeds], extract, parsed_extract),..],..}
lexicon.json: [
# cycle 0
[
# entry 0 in cycle: all coincident extracts corresponding to a pattern
(
[("_sharp_ADJ", "_lemon_NOUN"), ... ], #list of coincident vocabulary tuples tagetted by pattern A
pattern_A,
),
....
],
....
]
required in datasets:
harvesting.json: {"book_code": [sentence, parsed sentence tuples],.. }
"""
import json
import multiprocessing
import os
import re
import sys
import regex
from tqdm import tqdm
# add libraries to path
sys.path.append(os.path.join(sys.path[0], "libraries"))
# add working folder to path
sys.path.append(os.path.join(sys.path[0], sys.argv[2]))
from CHUNKS import chunks
from pattern_abstraction import convert_patterns, expand_chunks, to_chunks
from PATTERNS import extraction_patterns, identification_patterns
def main(argv):
# CL arguments
folder = argv[1]
n = int(argv[0])
# get a list of previously seen extracts, from all prior cycles
extracts_file = folder + "/extracts.json"
previous_extracts, current_cycle = get_extracts(extracts_file)
# previous_extracts = {"cycle":[([seeds], extract, parsed_extract),..],..}
print(f"current cycle = {current_cycle}")
seen_extracts = extracts_as_set(previous_extracts)
# seen_extracts = {set of unparsed extracts previously seen}
# Collect the previous cycle's lexicon entries
with open(folder + "/lexicon.json", "r") as f:
lexicon = json.load(f)
vocabulary = get_lexicon(lexicon) # [(compiled re, (coincident phrases),..]
# [(compiled re, (coincident phrases),..]
# compile previously seen abstractions
seen_abstractions = identification_patterns
seen_patterns = compile_patterns(seen_abstractions)
# ITERATE THROUGH HARVESTING SET, extracting where
# * an extract is unseen
# * and where known patterns do not match
with open("./datasets/harvesting.json", "r") as f:
dataset = json.load(f)
# dataset = {"book_code": [(sentence, parsed sentence),..]}
# iterate through the harvesting set
for book_index, (book_code, extracts) in enumerate(tqdm(dataset.items())):
# discard extracts already seen
extracts_trimmed = trim_extracts(extracts, seen_extracts) # [(extract, parsed_extract),...]
# split extracts n chunks, for multi-proccessing
extract_sets = group_extracts(extracts_trimmed, n) # [[(extract, parsed_extract),...],...]
processes = []
queue = multiprocessing.Queue()
# iterate through the extract chunks as separate processes
for i in range(n):
# run vocabulary pattern matching against trimmed extracts
process = multiprocessing.Process(
target=mapped_function, args=(extract_sets[i], vocabulary, seen_patterns, queue,),
)
process.start()
processes.append(process)
# collect process output
for r in range(n):
previous_extracts[current_cycle] += queue.get()
# terminate the processes
for process in processes:
process.join()
# save to json
with open(folder + "/extracts.json", "w") as f:
json.dump(previous_extracts, f, ensure_ascii=False)
# save ouput to text files for inspection
with open(folder + f"/extracts-{current_cycle}.txt", "w") as f:
for phrases, extract, parsed_extract in previous_extracts[current_cycle]:
f.write("\n\n")
f.write(f"{phrases}")
f.write("\n" + extract)
f.write("\n" + parsed_extract)
def mapped_function(extract_set, vocabulary, seen_patterns, queue):
"""Iterate through the extract_set and return a list of those extracts matching the previous lexicon cycle entries.
"""
returned = []
for extract, parsed_extract in extract_set:
for v_pattern, phrases in vocabulary:
mo_lexicon = regex.search(v_pattern, parsed_extract)
if mo_lexicon:
# check does not conform to a seen pattern
mo_seen = None
for seen_abstraction, seen_compiled in seen_patterns:
mo_seen = regex.match(seen_compiled, parsed_extract)
if mo_seen:
print("\n\nseen pattern")
print(extract)
print(seen_abstraction)
break # break seen pattern loop
if mo_lexicon and not mo_seen:
# if both vocab match and not conforming to seen_patterns
returned.append((phrases, extract, parsed_extract))
# print("\n\naccepted")
# print(extract)
queue.put(returned)
def get_extracts(file):
"""Return existing extracts file container or create new.
"""
# if file exists, then load
if os.path.exists(file):
with open(file, "r") as f:
previous_extracts = json.load(f)
# save as "folder/extracts.json" in case wish to revert
with open(file, "w") as f:
json.dump(previous_extracts, f, ensure_ascii=False, indent=4)
# add new cycle
previous_extracts[str(len(previous_extracts.keys()))] = []
# if file doesn't exist, create new
else:
previous_extracts = {"0": []}
# get the current cycle's index key for extracts
current_cycle = str(list(previous_extracts.keys())[-1])
return previous_extracts, current_cycle
def extracts_as_set(extracts):
"""Return the extracts to date as a set
Args:
extracts (dict): {"cycle":[([seeds], extract, parsed_extract),..],..}
Return:
set of seen extracts
"""
seen_extracts = []
for keys, values in extracts.items():
for phrase, extract, parsed_extract in values:
seen_extracts.append(extract)
seen_extracts = set(seen_extracts)
return seen_extracts
def get_lexicon(lexicon):
"""Return preivious lexicon vocab as a list of (compiled re, (coincident phrases)).
Args:
lexicon.json: [
# cycle 0
[
# list of entries, each entry corresponds to pattern
[
[(phrase0, phrase1), ..], # list of coincidents phrases matched (e.g., adj, noun collection)
pattern_A
],
[
[(phrase0, phrase1), ..],
pattern_B
]
....
],
....
]
"""
patterns = []
for entry in lexicon[-1]: # each entry in previous cycle
for phrases in entry[0]:
try:
converted_compounded_phrases = ""
for phrase in phrases:
converted_compounded_phrases += ".*" + convert_patterns([phrase],chunks)[0]
patterns.append((regex.compile(converted_compounded_phrases), phrases))
except:
print(f"lexicon error, please correct, token: {phrases}")
return patterns
def compile_patterns(abstractions):
"""Assemble list of (abstracted_pattern, compiled) tuples of abstracted patterns.
Args:
abstractions: []
Returns:
[(abstracted_pattern, compiled),...]
"""
# assemble (new) extraction patterns in python re format
patterns = [] # patterns = [(abstraction, compiled pattern), ..]
for abstraction in abstractions:
print(abstraction)
patterns.append(
(
abstraction,
regex.compile(
"^.*" + convert_patterns([abstraction], chunks)[0] + ".*",
re.MULTILINE,
),
)
)
return patterns
def trim_extracts(extracts, seen_extracts):
"""Return a list of (extract, parsed_extract) for unseen extracts, not conforming to a known pattern.
Args:
extracts (list): [(sentence, parsed sentence),..]
"""
# trim extract set, based on seen extracts
extracts_trimmed = []
for extract, parsed_extract in extracts:
if extract not in seen_extracts:
extracts_trimmed.append((extract, parsed_extract))
return extracts_trimmed
def group_extracts(extracts, n):
"""Return extracts as a list of n lists of extracts (for multiprocessing)
e.g., where n = 4, [[(extract, parsed_extract),...],[],[],[]]
Args:
extracts: [(sentence, parsed sentence),..]
"""
extract_sets = [[] for i in range(n)]
for i in range(0, len(extracts), n):
for j in range(0, n):
try:
extract_sets[j].append(extracts[i + j])
except:
pass
return extract_sets
if __name__ == "__main__":
main(sys.argv[1:])
|
ryanbrate/DS_thesis
|
5_Process/get_extracts.py
|
get_extracts.py
|
py
| 9,761 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "PATTERNS.identification_patterns",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Queue",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "regex.search",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "regex.match",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pattern_abstraction.convert_patterns",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "CHUNKS.chunks",
"line_number": 223,
"usage_type": "argument"
},
{
"api_name": "regex.compile",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "regex.compile",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "pattern_abstraction.convert_patterns",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "CHUNKS.chunks",
"line_number": 248,
"usage_type": "argument"
},
{
"api_name": "re.MULTILINE",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 290,
"usage_type": "attribute"
}
] |
40466793180
|
#!/home/gabriel/funcam/venv/bin/python3
# ONLY TESTED ON LINUX
# To run using ./run.py [args] on your terminal (without python3)
# point the first line to some python interpreter containing the requirements
# or create a venv inside this project.
# Or delete this to use another method.
from cam import Cam
from vcam import VCam
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--virtual',help='enable virtual cam',action='store_true')
parser.add_argument('--video', help='choose video input', type=int, default=0)
parser.add_argument('--maxhands', help='set max hands for detection', type=int, default=1)
parser.add_argument('-d', help='enable draw for marks and functional areas', action='store_true')
parser.add_argument('--finger', help='choose the finger for control', type=int, default=8, choices=[4, 8, 12, 16, 20])
parser.add_argument('-p', help='enable camera to take photos', action='store_true')
args = parser.parse_args()
if args.virtual:
# virtual cam
vc = VCam(video=args.video, mxhand=args.maxhands, du=args.d, f=args.finger)
vc.start()
else:
# own cam
cam = Cam(video=args.video, mxhand=args.maxhands, du=args.d, f=args.finger, p=args.p)
cam.open()
|
biguelito/funcam
|
funcam.py
|
funcam.py
|
py
| 1,315 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "vcam.VCam",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cam.Cam",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cam.open",
"line_number": 32,
"usage_type": "call"
}
] |
35951961808
|
# -*- coding: utf-8 -*-
import logging
from path import Path
logger = logging.getLogger(__name__)
def get_mipname(fastq_file):
"""Takes a demux fastq file and returns a MIP compatible fastq file
Args:
fastq_file (str): a FQP to a fastq file.
Returns (str): A MIP compatible fastq file.
"""
dirparts = fastq_file.split("/")
nameparts = dirparts[-1].split("_")
# H3LGFCCXX-l1t21_973470_CGGCTATG_L001_R2_001.fastq.gz
# H3LGFCCXX-l1t21_Undetermined_CGGCTATG_L001_R1_001.fastq.gz
# RNA1460A10_dual10_TCCGGAGA-ATAGAGGC_L001_R1_001.fastq.gz
# RNA1460A10_TCCGGAGA-ATAGAGGC_L001_R1_001.fastq.gz
index = nameparts[-4]
# no worries, this'll always work, right?
fc = dirparts[-5].split("_")[-1][1:]
lane = int(nameparts[-3][-1:])
readdirection = nameparts[-2][-1:]
rundir = dirparts[-5]
date = rundir.split("_")[0]
sample_id = dirparts[-2].split("_")[1]
# X stuff
undetermined = ''
if nameparts[1] == 'Undetermined':
undetermined = '-Undetermined'
tile = ''
if '-' in nameparts[0]:
# H2V2YCCXX-l2t21
tile = nameparts[0].split('-')[1].split('t')[1]
tile = '-' + tile
newname = "{lane}_{date}_{fc}{tile}{undetermined}_{sample}_{index}_{readdirection}.fastq.gz".format(
lane=lane,
date=date,
fc=fc,
sample=sample_id,
index=index,
readdirection=readdirection,
undetermined=undetermined,
tile=tile
)
return newname
def make_link(source, dest, link_type='hard'):
Path(dest).remove_p()
try:
if link_type == 'soft':
logger.debug("ln -s {} {} ...".format(source, dest))
Path(source).symlink(dest)
else:
real_source = Path(source).realpath()
logger.debug("ln {} {} ...".format(real_source, dest))
Path(real_source).link(dest)
except Exception as error:
# catch, print, and continue
logger.error(repr(error))
return False
return True
|
Clinical-Genomics/deliver
|
deliver/utils/files.py
|
files.py
|
py
| 2,052 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "path.Path",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "path.Path",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "path.Path",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "path.Path",
"line_number": 70,
"usage_type": "call"
}
] |
1397488728
|
from collections import defaultdict
def longestPalindrome(s):
maxlen, maxp, l, dit = 0, "", len(s), defaultdict(list)
for i in range(l):
dit[s[i]].append(i)
for j in dit[s[i][::-1]]:
if s[j:i+1] == s[j:i+1][::-1]:
if len(s[j:i+1]) > maxlen:
maxlen = len(s[j:i+1])
maxp = s[j:i+1]
break
return maxp
st=input()
print(longestPalindrome(st))
|
anjaliugale31/placement_preparation
|
strongest_palindrome.py
|
strongest_palindrome.py
|
py
| 368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 3,
"usage_type": "call"
}
] |
10010702062
|
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
from keras.models import load_model
model = load_model('customer-churn\saved_model (1).pb')
# Importing the dataset
dataset = pd.read_csv('customer_churn_large_dataset.csv')
# Extracting dependent and independent variables:
# Extracting independent variable:
X = dataset.iloc[:,3:13].values
# Extracting dependent variable:
y = dataset.iloc[:, 5].values
# Encoding Categorical data:
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X[:, 1] = labelencoder_X.fit_transform(X[:, 1])
# Encoding Categorical data:
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X[:, 2] = labelencoder_X.fit_transform(X[:, 2])
#dummy encoding.
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
columnTransformer = ColumnTransformer([('yograj', OneHotEncoder(), [1])],remainder='passthrough')
X=columnTransformer.fit_transform(X)
#dummy encoding.
# Dummy Variable trapping
X = X[:, 1:]
# Splitting the Dataset into the Training set and Test set
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
# Feature Scaling
# Standard Scaling: Standardization = X'=X-mean(X)/standard deviation
# normal scaling : Normalization= X'=X-min(X)/max(x)-min(X)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict',methods=['GET'])
def predict():
'''
For rendering results on HTML GUI
'''
creditscore = int(request.args.get('CustomerID'))
geo = int(request.args.get('Name'))
age = int(request.args.get('Age'))
tenure = int(request.args.get('Gender'))
balance = int(request.args.get('Location'))
numofproducts = int(request.args.get('Subscription_Length_Months'))
creditcards=int(request.args.get('Monthly_Bill'))
activemember = int(request.args.get('Total_Usage_GB'))
salary = int(request.args.get('Churn'))
y_pred= model.predict(sc_X.transform(np.array([[0,1,CustomerID ,Name,Age,Gender,Location,
Subscription_Length_Months ,Monthly_Bill,Total_Usage_GB,Churn]])))
y_pred = (y_pred > 0.5)
if y_pred>0.5:
result="Customer will not churn"
else:
result="Customer will exit to"
return render_template('index.html', prediction_text='Model has predicted : {}'.format(result))
|
meyograj/churn1
|
app.py
|
app.py
|
py
| 2,806 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.compose.ColumnTransformer",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 80,
"usage_type": "call"
}
] |
29205882969
|
import subprocess
import time
import os
import math
from PIL import Image
import psutil
import re
from skyfield.api import Star
import numpy as np
import threading
import select
from pathlib import Path
import fitsio
import Nexus
import Coordinates
import Display
home_path = str(Path.home())
version = "21_7"
#os.system('pkill -9 -f eFinder.py') # stops the autostart eFinder program running
x = y = 0 # x, y define what page the display is showing
deltaAz = deltaAlt = 0
expInc = 1 # sets how much exposure changes when using handpad adjust (seconds)
gainInc = 5 # ditto for gain
offset_flag = False
align_count = 0
offset = 640, 480
star_name = "no star"
solve = False
sync_count = 0
sDog = True
gotoFlag = False
def xy2rd(x, y): # returns the RA & Dec equivalent to a camera pixel x,y
result = subprocess.run(
[
"wcs-xy2rd",
"-w",
destPath + "capture.wcs",
"-x",
str(x),
"-y",
str(y),
],
capture_output=True,
text=True,
)
result = str(result.stdout)
line = result.split("RA,Dec")[1]
ra, dec = re.findall("[-,+]?\d+\.\d+", line)
return (float(ra), float(dec))
def pixel2dxdy(pix_x, pix_y): # converts a pixel position, into a delta angular offset from the image centre
deg_x = (float(pix_x) - 640) * pix_scale / 3600 # in degrees
deg_y = (480 - float(pix_y)) * pix_scale / 3600
dxstr = "{: .1f}".format(float(60 * deg_x)) # +ve if finder is left of Polaris
dystr = "{: .1f}".format(
float(60 * deg_y)
) # +ve if finder is looking below Polaris
return (deg_x, deg_y, dxstr, dystr)
def dxdy2pixel(dx, dy):
pix_x = dx * 3600 / pix_scale + 640
pix_y = 480 - dy * 3600 / pix_scale
dxstr = "{: .1f}".format(float(60 * dx)) # +ve if finder is left of Polaris
dystr = "{: .1f}".format(float(60 * dy)) # +ve if finder is looking below Polaris
return (pix_x, pix_y, dxstr, dystr)
def imgDisplay(): # displays the captured image on the Pi desktop.
for proc in psutil.process_iter():
if proc.name() == "display":
proc.kill() # delete any previous image display
im = Image.open(destPath + "capture.jpg")
#im.show()
def capture():
global param
if param["Test mode"] == "1":
if offset_flag == False:
m13 = True
polaris_cap = False
else:
m13 = False
polaris_cap = True
else:
m13 = False
polaris_cap = False
radec = nexus.get_short()
camera.capture(
int(float(param["Exposure"]) * 1000000),
int(float(param["Gain"])),
radec,
m13,
polaris_cap,
destPath,
)
def solveImage():
global offset_flag, solve, solvedPos, elapsed_time, star_name, star_name_offset, solved_radec, solved_altaz
scale_low = str(pix_scale * 0.9)
scale_high = str(pix_scale * 1.1)
name_that_star = ([]) if (offset_flag == True) else (["--no-plots"])
handpad.display("Started solving", "", "")
limitOptions = [
"--overwrite", # overwrite any existing files
"--skip-solved", # skip any files we've already solved
"--cpulimit",
"10", # limit to 10 seconds(!). We use a fast timeout here because this code is supposed to be fast
]
optimizedOptions = [
"--downsample",
"2", # downsample 4x. 2 = faster by about 1.0 second; 4 = faster by 1.3 seconds
"--no-remove-lines", # Saves ~1.25 sec. Don't bother trying to remove surious lines from the image
"--uniformize",
"0", # Saves ~1.25 sec. Just process the image as-is
]
scaleOptions = [
"--scale-units",
"arcsecperpix", # next two params are in arcsecs. Supplying this saves ~0.5 sec
"--scale-low",
scale_low, # See config above
"--scale-high",
scale_high, # See config above
]
fileOptions = [
"--new-fits",
"none", # Don't create a new fits
"--solved",
"none", # Don't generate the solved output
"--match",
"none", # Don't generate matched output
"--corr",
"none", # Don't generate .corr files
"--rdls",
"none", # Don't generate the point list
]
cmd = ["solve-field"]
captureFile = destPath + "capture.jpg"
options = (
limitOptions + optimizedOptions + scaleOptions + fileOptions + [captureFile]
)
start_time = time.time()
# next line runs the plate-solve on the captured image file
result = subprocess.run(
cmd + name_that_star + options, capture_output=True, text=True
)
elapsed_time = time.time() - start_time
print("solve elapsed time " + str(elapsed_time)[0:4] + " sec\n")
print(result.stdout) # this line added to help debug.
result = str(result.stdout)
if "solved" not in result:
print("Bad Luck - Solve Failed")
handpad.display("Not Solved", "", "")
solve = False
return
if (offset_flag == True) and ("The star" in result):
table, h = fitsio.read(destPath + "capture.axy", header=True)
star_name_offset = table[0][0], table[0][1]
lines = result.split("\n")
for line in lines:
if line.startswith(" The star "):
star_name = line.split(" ")[4]
print("Solve-field Plot found: ", star_name)
break
solvedPos = applyOffset()
ra, dec, d = solvedPos.apparent().radec(coordinates.get_ts().now())
solved_radec = ra.hours, dec.degrees
solved_altaz = coordinates.conv_altaz(nexus, *(solved_radec))
nexus.set_scope_alt(solved_altaz[0] * math.pi / 180)
arr[0, 2][0] = "Sol: RA " + coordinates.hh2dms(solved_radec[0])
arr[0, 2][1] = " Dec " + coordinates.dd2dms(solved_radec[1])
arr[0, 2][2] = "time: " + str(elapsed_time)[0:4] + " s"
solve = True
deltaCalc()
def applyOffset():
x_offset, y_offset, dxstr, dystr = dxdy2pixel(
float(param["d_x"]), float(param["d_y"])
)
print('applied_offset_pixels x,y',x_offset,y_offset)
ra, dec = xy2rd(x_offset, y_offset)
solved = Star(
ra_hours=ra / 15, dec_degrees=dec
) # will set as J2000 as no epoch input
solvedPos_scope = (
nexus.get_location().at(coordinates.get_ts().now()).observe(solved)
) # now at Jnow and current location
return solvedPos_scope
def deltaCalc():
global deltaAz, deltaAlt, elapsed_time
deltaAz = solved_altaz[1] - nexus.get_altAz()[1]
if abs(deltaAz) > 180:
if deltaAz < 0:
deltaAz = deltaAz + 360
else:
deltaAz = deltaAz - 360
deltaAz = 60 * (
deltaAz * math.cos(nexus.get_scope_alt())
) # actually this is delta'x' in arcminutes
deltaAlt = solved_altaz[0] - nexus.get_altAz()[0]
deltaAlt = 60 * (deltaAlt) # in arcminutes
deltaXstr = "{: .2f}".format(float(deltaAz))
deltaYstr = "{: .2f}".format(float(deltaAlt))
arr[0, 3][0] = "Delta: x= " + deltaXstr
arr[0, 3][1] = " y= " + deltaYstr
arr[0, 3][2] = "time: " + str(elapsed_time)[0:4] + " s"
def align():
global align_count, solve, sync_count, param, offset_flag, arr, x,y
new_arr = nexus.read_altAz(arr)
arr = new_arr
capture()
imgDisplay()
solveImage()
if solve == False:
handpad.display(arr[x, y][0], "Solved Failed", arr[x, y][2])
return
align_ra = ":Sr" + coordinates.dd2dms((solved_radec)[0]) + "#"
align_dec = ":Sd" + coordinates.dd2aligndms((solved_radec)[1]) + "#"
valid = nexus.get(align_ra)
print(align_ra)
if valid == "0":
print("invalid position")
handpad.display(arr[x, y][0], "Invalid position", arr[x, y][2])
time.sleep(3)
return
valid = nexus.get(align_dec)
print(align_dec)
if valid == "0":
print("invalid position")
handpad.display(arr[x, y][0], "Invalid position", arr[x, y][2])
time.sleep(3)
return
reply = nexus.get(":CM#")
nexus.read_altAz(arr)
deltaCalc()
print("reply: ", reply)
p = nexus.get(":GW#")
print("Align status reply ", p)
if nexus.is_aligned() == False: # wasnt aligned before this action
align_count += 1
if p[1] != "T": # and still not aligned
arr[0,4][0] = "'OK' aligns"
arr[0,4][1] = "Align count " + str(align_count)
arr[0,4][2] = "Nexus reply:" + p[0:3]
handpad.display(arr[0,4][0],arr[0,4][1],arr[0,4][2])
else:
arr[0,4][0] = "'OK' now syncs"
arr[0,4][1] = "Sync count " + str(sync_count)
arr[0,4][2] = "Nexus reply:" + p[0:3]
arr[2,0][1] = "Nexus is aligned"
handpad.display(arr[0,4][0],arr[0,4][1],arr[0,4][2])
nexus.set_aligned(True)
else:
sync_count +=1
arr[0,4][0] = "'OK' syncs"
arr[0,4][1] = "Sync count " + str(sync_count)
arr[0,4][2] = ""
handpad.display(arr[0,4][0],arr[0,4][1],arr[0,4][2])
print("Nexus is aligned:",nexus.is_aligned())
return
def measure_offset():
global offset_str, offset_flag, param, scope_x, scope_y, star_name
offset_flag = True
handpad.display("started capture", "", "")
capture()
imgDisplay()
solveImage()
if solve == False:
handpad.display("solve failed", "", "")
return
scope_x = star_name_offset[0]
scope_y = star_name_offset[1]
print('pixel_offset x,y',star_name_offset)
d_x, d_y, dxstr, dystr = pixel2dxdy(scope_x, scope_y)
param["d_x"] = d_x
param["d_y"] = d_y
save_param()
offset_str = dxstr + "," + dystr
arr[2, 1][1] = "new " + offset_str
arr[2, 2][1] = "new " + offset_str
handpad.display(arr[2, 1][0], arr[2, 1][1], star_name + " found")
offset_flag = False
def up_down(v):
global x
x = x + v
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
def left_right(v):
global y
y = y + v
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
def up_down_inc(inc, sign):
arr[x, y][1] = int(float(arr[x, y][1])) + inc * sign
param[arr[x, y][0]] = float(arr[x, y][1])
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
update_summary()
time.sleep(0.1)
def flip():
global param
arr[x, y][1] = 1 - int(float(arr[x, y][1]))
param[arr[x, y][0]] = str((arr[x, y][1]))
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
update_summary()
time.sleep(0.1)
def update_summary():
global param
arr[1, 0][0] = (
"Ex:" + str(param["Exposure"]) + " Gn:" + str(param["Gain"])
)
arr[1, 0][1] = "Test:" + str(param["Test mode"]) + " GoTo++:" + str(param["Goto++ mode"])
save_param()
def go_solve():
global x, y, solve, arr
new_arr = nexus.read_altAz(arr)
arr = new_arr
handpad.display("Image capture", "", "")
capture()
imgDisplay()
handpad.display("Plate solving", "", "")
solveImage()
if solve == True:
handpad.display("Solved", "", "")
else:
handpad.display("Not Solved", "", "")
return
x = 0
y = 3
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
def gotoDistant():
nexus.read_altAz(arr)
nexus_radec = nexus.get_radec()
deltaRa = abs(nexus_radec[0]-goto_radec[0])*15
if deltaRa > 180:
deltaRa = abs(deltaRa - 360)
deltaDec = abs(nexus_radec[1]-goto_radec[1])
print('goto distance, RA,Dec :',deltaRa,deltaDec)
if deltaRa+deltaDec > 5:
return(True)
else:
return(False)
def readTarget():
global goto_radec,goto_ra,goto_dec
goto_ra = nexus.get(":Gr#")
if (
goto_ra[0:2] == "00" and goto_ra[3:5] == "00"
): # not a valid goto target set yet.
print("no GoTo target")
handpad.display("no GoTo target","set yet","")
return
goto_dec = nexus.get(":Gd#")
ra = goto_ra.split(":")
dec = re.split(r"[:*]", goto_dec)
goto_radec = (float(ra[0]) + float(ra[1]) / 60 + float(ra[2]) / 3600), math.copysign(
abs(abs(float(dec[0])) + float(dec[1]) / 60 + float(dec[2]) / 3600),
float(dec[0]),
)
print("Target goto RA & Dec", goto_ra, goto_dec)
def goto():
global gotoFlag
handpad.display("Attempting", "GoTo", "")
gotoFlag = True
readTarget()
if gotoDistant():
if sDog == True:
nexus.write(":Sr" + goto_ra + "#")
nexus.write(":Sd" + goto_dec + "#")
reply = nexus.get(":MS#")
else:
gotoStr = '%s%06.3f %+06.3f' %("g",goto_radec[0],goto_radec[1])
print("Target goto RA & Dec", gotoStr)
servocat.send(gotoStr)
handpad.display("Performing", " GoTo", "")
time.sleep(1)
gotoStopped()
handpad.display("Finished", " GoTo", "")
go_solve()
if int(param["Goto++ mode"]) == 0:
return
align() # close, so local sync scope to true RA & Dec
if sDog == True:
nexus.write(":Sr" + goto_ra + "#")
nexus.write(":Sd" + goto_dec + "#")
reply = nexus.get(":MS#")
else:
gotoStr = '%s%06.3f %+06.3f' %("g",goto_radec[0],goto_radec[1])
print('GoToStr: ',gotoStr)
servocat.send(gotoStr)
gotoStopped()
gotoFlag = False
handpad.display("Finished", " GoTo++", "")
go_solve()
def getRadec():
nexus.read_altAz(None)
return(nexus.get_radec())
def gotoStopped():
radecNow = getRadec()
while True:
time.sleep(1)
radec = getRadec()
print(radec[0],radecNow[0],radec[1],radecNow[1])
if (abs(radecNow[0] - radec[0])*15 < 0.01) and (abs(radecNow[1] - radec[1]) < 0.01):
return
else:
radecNow = radec
def reset_offset():
global param, arr
param["d_x"] = 0
param["d_y"] = 0
offset_str = "0,0"
arr[2,1][1] = "new " + offset_str
arr[2,2][1] = "new " + offset_str
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
save_param()
def get_param():
global param, offset_str, pix_scale
if os.path.exists(home_path + "/Solver/eFinder.config") == True:
with open(home_path + "/Solver/eFinder.config") as h:
for line in h:
line = line.strip("\n").split(":")
param[line[0]] = str(line[1])
pix_scale = float(param["pixel scale"])
pix_x, pix_y, dxstr, dystr = dxdy2pixel(
float(param["d_x"]), float(param["d_y"])
)
offset_str = dxstr + "," + dystr
def save_param():
global param
with open(home_path + "/Solver/eFinder.config", "w") as h:
for key, value in param.items():
#print("%s:%s\n" % (key, value))
h.write("%s:%s\n" % (key, value))
def reader():
global button
while True:
if handpad.get_box() in select.select([handpad.get_box()], [], [], 0)[0]:
button = handpad.get_box().readline().decode("ascii").strip("\r\n")
time.sleep(0.1)
def home_refresh():
global x,y
while True:
if x == 0 and y == 0:
time.sleep(1)
while x ==0 and y==0:
nexus.read_altAz(arr)
radec = nexus.get_radec()
ra = coordinates.hh2dms(radec[0])
dec = coordinates.dd2dms(radec[1])
handpad.display('Nexus live',' RA: '+ra, 'Dec: '+dec)
time.sleep(0.5)
else:
handpad.display(arr[x, y][0], arr[x, y][1], arr[x, y][2])
time.sleep (0.5)
# main code starts here
handpad = Display.Handpad(version)
coordinates = Coordinates.Coordinates()
nexus = Nexus.Nexus(handpad, coordinates)
nexus.read()
param = dict()
get_param()
# array determines what is displayed, computed and what each button does for each screen.
# [first line,second line,third line, up button action,down...,left...,right...,select button short press action, long press action]
# empty string does nothing.
# example: left_right(-1) allows left button to scroll to the next left screen
# button texts are infact def functions
p = ""
home = [
"Nexus live",
" RA:",
"Dec:",
"",
"up_down(1)",
"",
"left_right(1)",
"align()",
"goto()",
]
nex = [
"Nex: RA ",
" Dec ",
"",
"",
"",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
sol = [
"No solution yet",
"'OK' solves",
"",
"",
"",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
delta = [
"Delta: No solve",
"'OK' solves",
"",
"",
"",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
aligns = [
"'OK' aligns",
"not aligned yet",
str(p),
"",
"",
"left_right(-1)",
"",
"align()",
"",
]
polar = [
"'OK' Bright Star",
offset_str,
"",
"",
"",
"left_right(-1)",
"left_right(1)",
"measure_offset()",
"",
]
reset = [
"'OK' Resets",
offset_str,
"",
"",
"",
"left_right(-1)",
"left_right(1)",
"reset_offset()",
"",
]
summary = ["", "", "", "up_down(-1)", "up_down(1)", "", "left_right(1)", "go_solve()", ""]
exp = [
"Exposure",
param["Exposure"],
"",
"up_down_inc(expInc,1)",
"up_down_inc(expInc,-1)",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
gn = [
"Gain",
param["Gain"],
"",
"up_down_inc(gainInc,1)",
"up_down_inc(gainInc,-1)",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
gotoMode = [
"Goto++ mode",
int(param["Goto++ mode"]),
"",
"flip()",
"flip()",
"left_right(-1)",
"",
"go_solve()",
"goto()",
]
mode = [
"Test mode",
int(param["Test mode"]),
"",
"flip()",
"flip()",
"left_right(-1)",
"left_right(1)",
"go_solve()",
"goto()",
]
status = [
"Nexus via " + nexus.get_nexus_link(),
"Nex align " + str(nexus.is_aligned()),
"Brightness",
"up_down(-1)",
"",
"",
"left_right(1)",
"go_solve()",
"goto()",
]
bright = [
"Handpad",
"Display",
"Bright Adj",
"",
"",
"left_right(-1)",
"",
"go_solve()",
"goto()",
]
arr = np.array(
[
[home, nex, sol, delta, aligns],
[summary, exp, gn, mode, gotoMode],
[status, polar, reset, bright, bright],
]
)
update_summary()
deg_x, deg_y, dxstr, dystr = dxdy2pixel(float(param["d_x"]), float(param["d_y"]))
offset_str = dxstr + "," + dystr
new_arr = nexus.read_altAz(arr)
arr = new_arr
if nexus.is_aligned() == True:
arr[0, 4][1] = "Nexus is aligned"
arr[0, 4][0] = "'OK' syncs"
#arr[2,0][1] = "Nexus is aligned"
if param["Camera Type ('QHY' or 'ASI')"]=='ASI':
import ASICamera2
camera = ASICamera2.ASICamera(handpad)
elif param["Camera Type ('QHY' or 'ASI')"]=='QHY':
import QHYCamera2
camera = QHYCamera2.QHYCamera(handpad)
if param["Drive ('scopedog' or 'servocat')"].lower()=='servocat':
import ServoCat
servocat = ServoCat.ServoCat()
sDog = False
print('ServoCat mode')
arr[2,0][1] = "ServoCat mode"
else:
print('ScopeDog mode')
arr[2,0][1] = "ScopeDog mode"
if param["Ramdisk"].lower()=='true':
destPath = "/var/tmp/"
else:
destPath = home_path + "/Solver/images/"
print('Working folder: '+destPath)
handpad.display("ScopeDog eFinder", "ver " + version, "Drive: "+param["Drive ('scopedog' or 'servocat')"])
time.sleep(3)
button = ""
scan = threading.Thread(target=reader)
scan.daemon = True
scan.start()
while True: # next loop looks for button press and sets display option x,y
if button == "20":
exec(arr[x, y][7])
elif button == "21":
exec(arr[x, y][8])
elif button == "18":
exec(arr[x, y][4])
elif button == "16":
exec(arr[x, y][3])
elif button == "19":
exec(arr[x, y][5])
elif button == "17":
exec(arr[x, y][6])
button = ""
if x == 0 and y == 0 and gotoFlag == False:
nexus.read_altAz(arr)
radec = nexus.get_radec()
if nexus.is_aligned() == True:
tick = "T"
else:
tick = "N"
ra = coordinates.hh2dms(radec[0])
dec = coordinates.dd2dms(radec[1])
handpad.display('Nexus live '+tick,' RA: '+ra, 'Dec: '+dec)
time.sleep(0.2)
else:
time.sleep(0.1)
|
WimDeMeester/eFinder
|
eFinder.py
|
eFinder.py
|
py
| 20,522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path.home",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "subprocess.run",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "psutil.process_iter",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "fitsio.read",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "skyfield.api.Star",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "math.copysign",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 433,
"usage_type": "attribute"
},
{
"api_name": "select.select",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "Display.Handpad",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "Coordinates.Coordinates",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "Nexus.Nexus",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "ASICamera2.ASICamera",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "QHYCamera2.QHYCamera",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "ServoCat.ServoCat",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 680,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 710,
"usage_type": "call"
}
] |
36040524676
|
import statistics
from ParadoxTrading.Indicator.IndicatorAbstract import IndicatorAbstract
from ParadoxTrading.Utils import DataStruct
class AdaBBands(IndicatorAbstract):
def __init__(
self, _period: int, _use_key: str,
_init_n: int = 20, _min_n: int = 20, _max_n: int = 60,
_rate: float = 2.0, _idx_key: str = 'time'
):
super().__init__()
self.use_key = _use_key
self.idx_key = _idx_key
self.keys = [self.idx_key, 'upband', 'midband', 'downband']
self.data = DataStruct(
self.keys, self.idx_key
)
self.period = _period
self.rate = _rate
self.buf = []
self.prev_std = None
self.dynamic_n = float(_init_n)
self.min_n = _min_n
self.max_n = _max_n
def _addOne(self, _data_struct: DataStruct):
index_value = _data_struct.index()[0]
self.buf.append(_data_struct.getColumn(self.use_key)[0])
if len(self.data) > self.period:
const_std = statistics.pstdev(self.buf[-self.period:])
self.dynamic_n *= const_std / self.prev_std
self.dynamic_n = max(self.min_n, self.dynamic_n)
self.dynamic_n = min(self.max_n, self.dynamic_n)
tmp_n = int(round(self.dynamic_n))
mean = statistics.mean(self.buf[-tmp_n:])
std = statistics.pstdev(self.buf[-tmp_n:])
self.data.addRow(
[index_value, mean + self.rate * std,
mean, mean - self.rate * std],
self.keys
)
self.prev_std = const_std
else:
if len(self.data) == self.period:
self.prev_std = statistics.pstdev(self.buf)
self.data.addRow(
[index_value, None, None, None],
self.keys
)
|
ppaanngggg/ParadoxTrading
|
ParadoxTrading/Indicator/General/AdaBBands.py
|
AdaBBands.py
|
py
| 1,870 |
python
|
en
|
code
| 51 |
github-code
|
6
|
[
{
"api_name": "ParadoxTrading.Indicator.IndicatorAbstract.IndicatorAbstract",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "ParadoxTrading.Utils.DataStruct",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ParadoxTrading.Utils.DataStruct",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "statistics.pstdev",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "statistics.pstdev",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "statistics.pstdev",
"line_number": 56,
"usage_type": "call"
}
] |
19780951486
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import os
import argparse
import subprocess
from glob import glob
def translatefolder(src, trg, **kw):
python = kw.get("python", "python3")
translate = kw.get("translate", "./translate/translate.py")
port = int(kw.get("port", 3035))
host = kw.get("host", "127.0.0.1")
# create directories
if not os.path.exists(trg):
os.makedirs(trg)
# collect files
domains, problems = [], []
for f in os.listdir(src):
if "domain" in f and f.endswith(".pddl"):
domains.append(os.path.join(src, f))
elif "problem" in f and f.endswith(".pddl"):
problems.append(os.path.join(src, f))
domains.sort()
problems.sort()
# assign agents
agents = []
for i in range(len(domains)):
agents.append("tcp://{}:{}".format(host, str(port+i)))
# create command
tmpl = ("{} {} {} {} --agent-url " + " --agent-url ".join(agents) +
" --agent-id {} --output {} --json")
cmd = ""
for i, d in enumerate(domains):
s = tmpl.format(python, translate, d, problems[i], i,
os.path.join(trg,str(i)+'.json')) + ' & '
print(s)
cmd += s
cmd = cmd[:-2]
os.system(cmd)
def translateall(src='benchmarks/factored/', trg='benchmarks/compiled/', **kw):
files_src = glob(src + "*/*/")
files_trg = [os.path.join(trg, *f.split('/')[2:]) for f in files_src]
port = 3035
shift = 100
errors = []
for s, t in zip(files_src, files_trg):
try:
print("translating " + s + " to " + t + " port: " + str(port))
translatefolder(s, t, port=port)
except Exception as e:
errors += [e]
port += shift
for i, error in enumerate(errors):
print("ERR %d: %s: %s" % (i, type(error), error))
def on_translate(*args, **kw):
if kw['all']:
translateall(**kw)
else:
translatefolder(**kw)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run GOA planner')
parser.add_argument('src', help='path to folder containing src task')
parser.add_argument('trg', help='destination path')
parser.add_argument(
'--port',
default=3035,
help='the port (default: 3035)'
)
parser.add_argument(
'--all',
help='translate all domains of given folder',
action='store_true'
)
parser.set_defaults(func=on_translate)
args, rest = parser.parse_known_args()
args.func(*rest, **vars(args))
|
schultet/goa
|
scripts/translate.py
|
translate.py
|
py
| 2,582 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 74,
"usage_type": "call"
}
] |
35616591437
|
# https://adventofcode.com/2022/day/22
from collections import defaultdict
from aoctk.data import Graph, Unbound2DGrid
from aoctk.input import get_groups
def parse(data):
ps, (ins,) = get_groups(data)
m = Unbound2DGrid(
(
(complex(j, i), c)
for i, r in enumerate(ps)
for j, c in enumerate(r)
if c != " "
)
)
p = map(complex, ins.replace("R", " 1j ").replace("L", " -1j ").split())
return m, p, complex(ps[0].index(ps[0].strip()))
def solve(wrapping, data="input.txt"):
m, p, z = parse(data)
d = 1
while True:
s = next(p)
for _ in range(int(abs(s))):
if z + d not in m:
w, e = wrapping(m, z, d)
if m[w] != "#":
z, d = w, e
continue
elif m[z + d] == "#":
break
z += d
try:
d *= next(p)
except StopIteration:
break
return (
int(z.real + 1) * 4 + int(z.imag + 1) * 1000 + {1: 0, 1j: 1, -1: 2, -1j: 3}[d]
)
def part_one(data="input.txt"):
def wrapping(m, z, d):
w = z
while w - d in m:
w -= d
return w, d
return solve(wrapping, data)
def part_two(data="input.txt"):
m, _, _ = parse(data)
# Determine the face size
w, h = (_.hi + 1 for _ in m.bounds())
l = max(w, h) - min(w, h)
class Faces(Graph):
def adj(self, n):
return {
(n + l * d, d)
for d in (1j ** k for k in range(4))
if n + l * d in self.data
}
def __iter__(self):
return iter(self.data)
fs = Faces(
{
complex(i, j)
for i in range(0, w, l)
for j in range(0, h, l)
if complex(i, j) in m
}
)
# Determine the wrapping rules based on how the faces are connected
# The mapping tells for each face and each direction the destination face
# and the direction to go in that face.
wrs, c = defaultdict(dict), 24
for s in fs:
for t, d in fs.adj(s):
wrs[s][d] = (t, d)
c -= 1
while c > 0:
for s in fs:
r = wrs[s]
for k in (1j ** _ for _ in range(4)):
if c <= 0:
break
if k in r and k * 1j in r:
(t, phi), (q, psi) = r[k], r[k * 1j]
if phi * 1j not in wrs[t]:
wrs[t][phi * 1j] = (q, psi * 1j)
c -= 1
if -psi * 1j not in wrs[q]:
wrs[q][-psi * 1j] = (t, -phi * 1j)
c -= 1
def wrapping(m, z, d):
a = complex(z.real // l, z.imag // l) * l
b, e = wrs[a][d]
w = (z - a) - (l - 1) * d + (1 + 1j)
rot = e / d
tr = (l + 1) * (1 + 1j) * (1 - rot) / 2
w = b + w * rot + tr - (1 + 1j)
return w, e
return solve(wrapping, data)
def test():
assert part_one("test.txt") == 6032
assert part_two("test.txt") == 5031
|
P403n1x87/aoc
|
2022/22/code.py
|
code.py
|
py
| 3,157 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "aoctk.input.get_groups",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aoctk.data.Unbound2DGrid",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aoctk.data.Graph",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 90,
"usage_type": "call"
}
] |
32177006845
|
import numpy as np
from sentinelhub import BBox, bbox_to_dimensions,CRS
class resolution_image:
def __init__(self,bbox,resolution):
self.bbox = bbox
self.resolution = resolution
self.size=None
def run(self):
our_bbox = list(np.round(self.bbox,2))
our_bbox = BBox(bbox=our_bbox, crs=CRS.WGS84)
self.size = bbox_to_dimensions(our_bbox, resolution=self.resolution)
return self.size
|
VaclavLamich/Cloud-Detection
|
resolution.py
|
resolution.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.round",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sentinelhub.BBox",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sentinelhub.CRS.WGS84",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sentinelhub.CRS",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sentinelhub.bbox_to_dimensions",
"line_number": 15,
"usage_type": "call"
}
] |
3647213738
|
import heapq
import numpy as np
import itertools
class PQueue:
def __init__(self):
self.pq = [] # list of entries arranged in a heap
self.entry_finder = {} # mapping of tasks to entries
self.REMOVED = '<removed-task>' # placeholder for a removed task
self.counter = itertools.count() # unique sequence count
def add_task(self, task, priority=0):
'Add a new task or update the priority of an existing task'
add_to_q = True
if task in self.entry_finder:
add_to_q = self.remove_task_if_lower_priority(task, priority)
if add_to_q:
count = next(self.counter)
entry = [priority, count, task]
self.entry_finder[task] = entry
heapq.heappush(self.pq, entry)
def remove_task_if_lower_priority(self, task, priority):
'Mark an existing task as self.REMOVED. Raise KeyError if not found.'
entry = self.entry_finder[task]
if entry[0] > priority:
del self.entry_finder[task]
entry[-1] = self.REMOVED
return True
else:
return False
def pop_task(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heapq.heappop(self.pq)
if task is not self.REMOVED:
#print(task)
#print(self.entry_finder)
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
def empty(self):
return len(self.entry_finder) == 0
def qsize(self):
return len(self.entry_finder)
def test():
q = PQueue()
q.add_task((tuple(np.array([1,2,3])),1),1)
q.add_task((tuple(np.array([4,5,6])),1),0)
q.add_task((tuple(np.array([1,2,3])),1),-1)
print(q.pop_task())
print(q.pop_task())
q.add_task((tuple(np.array([1,2,3])),1),0.5)
print(q.pop_task())
|
joedlcolvin/Tugboats
|
p_queue.py
|
p_queue.py
|
py
| 2,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.count",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
}
] |
27462814126
|
from app import app
from app import db
from app.models import booking
from flask import jsonify, request
@app.route('/get_booking', methods=['GET'])
def get_booking():
date = request.args.get('date')
idTable = request.args.get('idTable')
phone = ['','','','','','','','']
users = booking.query.all()
for u in users:
if u.date == date and u.table == int(idTable):
for h in range(8):
if (u.hour_start <= 12+h) and (12+h <= u.hour_end):
phone[h] = u.phone
return jsonify({
"schedule":{
"table_id": idTable,
"date": date,
"hours":[
{
"hour": "12:00",
"customerPhone": phone[0]
},
{
"hour": "13:00",
"customerPhone": phone[1]
},
{
"hour": "14:00",
"customerPhone": phone[2]
},
{
"hour": "15:00",
"customerPhone": phone[3]
},
{
"hour": "16:00",
"customerPhone": phone[4]
},
{
"hour": "17:00",
"customerPhone": phone[5]
},
{
"hour": "18:00",
"customerPhone": phone[6]
},
{
"hour": "19:00",
"customerPhone": phone[7]
}
]
}
})
@app.route('/post_new_booking', methods=['POST'])
def post_new_booking():
date = request.json['date']
table = request.json['table_id']
name = request.json['name']
comment = request.json['comment']
phone = request.json['phone']
hours_start = request.json['hours_start']
hours_end = request.json['hours_end']
u = booking(table=table, name=name, phone=phone, info=comment, date=date, hour_start=hours_start, hour_end=hours_end)
db.session.add(u)
db.session.commit()
return jsonify({"status": "OK"})
|
SevaSob/Na-rogah
|
routes.py
|
routes.py
|
py
| 2,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.request.args.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "app.models.booking.query.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.models.booking.query",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "app.models.booking",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "app.app.route",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "app.models.booking",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "app.db.session.add",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "app.app.route",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 61,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.