max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
src/Fun API.py | Ansh-code398/alfred-discord-bot | 0 | 6630851 | <filename>src/Fun API.py
from functools import lru_cache
import nextcord as discord
import os
import aiohttp
import asyncio
from bs4 import BeautifulSoup
import datetime
import requests
import urllib.parse
from googlesearch import search
import External_functions as ef
def requirements():
return ["re"]
def main(client, re):
def convert_to_url(name):
name = urllib.parse.quote(name)
return name
@client.command()
async def gen(ctx, *, text):
print(ctx.guild.name)
re[0]+=1
API_URL2 = "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-2.7B"
header2 = {"Authorization": f"Bearer {os.environ['transformers_auth']}"}
payload2 = {
"inputs": text,
"parameters": {"max_new_tokens": 100, "return_full_text": True},
}
output = await ef.post_async(API_URL2, header2, payload2)
print(output)
o = output[0]["generated_text"]
await ctx.reply(
embed=ef.cembed(
title="Generated text", description=o, color=re[8],thumbnail=client.user.avatar_url_as(format="png")
)
)
@client.command()
async def kanye(ctx):
re[0] += 1
text = await ef.get_async("https://api.kanye.rest", kind="json");text=text["quote"]
embed = discord.Embed(
title="Kanye Rest", description=text, color=discord.Color(value=re[8])
)
embed.set_thumbnail(
url="https://i.pinimg.com/originals/3b/84/e1/3b84e1b85fb0a8068044df8b6cd8869f.jpg"
)
await ctx.send(embed=embed)
@client.command()
async def age(ctx, name):
try:
re[0] += 1
text = eval(
requests.get(
f"https://api.agify.io/?name={name}").content.decode()
)
st = ""
for i in text:
st += i + ":" + str(text[i]) + "\n"
await ctx.send(
embed=discord.Embed(
title="Agify", description=st, color=discord.Color(value=re[8])
)
)
except:
await ctx.send(
embed=discord.Embed(
title="Oops",
description="Something went wrong",
color=discord.Color(value=re[8]),
)
)
@client.command()
async def apis(ctx, page: int = 0):
a = await ef.get_async("https://api.publicapis.org/entries",kind="json")
b=a['entries']
embeds=[]
for i in range(a['count']):
text=f"{b[i]['Description']}\n\n\nAuth: {b[i]['Auth'] if b[i]['Auth']!='' else None}\nHTTPS: {b[i]['HTTPS']}\nCors: {b[i]['Cors']}\nCategory: {b[i]['Category']}"
embed = ef.cembed(
title=b[i]['API'],
description=text,
color=re[8],
url=b[i]['Link'],
footer=f"{i+1} of {a['count']}"
)
embeds.append(embed)
await pa1(embeds,ctx,page)
@client.command()
async def pokemon(ctx, pokemon=None):
re[0] + re[0] + 1
try:
a = await ef.get_async(f"https://pokeapi.co/api/v2/pokemon/{ef.convert_to_url(pokemon.lower())}",kind="json")
except:
a = "Not Found"
if a != "Not Found":
response = a
title = response["name"]
thumbnail = response["sprites"]["front_default"]
ability = "**ABILITIES:**\n"
for i in response["abilities"]:
ability += i["ability"]["name"] + "\n"
weight = "\n**WEIGHT**\n" + str(response["weight"])
embed = discord.Embed(
title=title,
description=ability + weight,
color=discord.Color(value=re[8]),
)
embed.set_thumbnail(url=thumbnail)
await ctx.send(embed=embed)
else:
await ctx.send(
embed=discord.Embed(
title="Hmm",
description="Not found",
color=discord.Color(value=re[8]),
)
)
@client.command()
async def ip(ctx, *, ip):
re[0] + re[0] + 1
ip = convert_to_url(ip)
print(ip)
print(f"https://ipinfo.io/{ip}/geo")
a = await ef.get_async(f"https://ipinfo.io/{ip}/geo",kind="json")
st = ""
if "status" not in list(a.keys()):
for i in list(a.keys()):
st += f"**{i}**:\n{a[i]}\n\n"
embed = discord.Embed(
title=ip, description=st, color=discord.Color(value=re[8])
)
embed.set_thumbnail(url=client.user.avatar_url_as(format="png"))
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Oops",
description="Oops, couldnt find it :confused:",
color=discord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar_url_as(format="png"))
await ctx.send(embed=embed)
@client.command(aliases=["cat"])
async def cat_fact(ctx):
re[0] + re[0] + 1
a = eval(requests.get("https://catfact.ninja/fact").content.decode())
embed = discord.Embed(
title="Cat Fact", description=a["fact"], color=discord.Color(value=re[8])
)
embed.set_thumbnail(url="https://i.imgur.com/u1TPbIp.png?1")
await ctx.send(embed=embed)
@client.command(aliases=["desktop"])
async def gs_stat(ctx):
a = await ef.get_async("https://gs.statcounter.com/os-market-share/desktop/worldwide/")
start = a.find('og:image" content="')+len('og:image" content="')
end = a.find(".png",start)+len(".png")
url = a[start:end]
await ctx.send(embed=ef.cembed(title="Gs.statcounter Desktop OS",description="This contains the market share of desktop operating systems worldwide", color=re[8], thumbnail="https://pbs.twimg.com/profile_images/918460707787681792/fMVNRhz4_400x400.jpg", picture=url))
@client.command()
async def csvoyager(ctx, edition):
embeds=[]
for i in range(1,20):
embed = ef.cembed(title="CS Voyager",description=f"{i} of 20",color=re[8],picture=f"https://csvoyager.netlify.app/data/{edition}/{i}.jpg")
embeds.append(embed)
await pa1(embeds,ctx)
@client.command(aliases=["g"])
async def google(ctx, *, text):
re[0] += 1
li = []
print(text, str(ctx.author))
for i in search(text, num=5, stop=5, pause=0):
embed = ef.cembed(title="Google",
color=re[8],
thumbnail=client.user.avatar_url_as(
format="png"),
picture=f"https://render-tron.appspot.com/screenshot/{ef.convert_to_url(i)}/?width=600&height=400")
embed.url = i
li.append(embed)
await pa1(li, ctx)
@client.slash_command(name = "screenshot",description = "Takes a screenshot of the website")
async def screenshot(ctx, url):
fp = await ef.get_async(f"https://render-tron.appspot.com/screenshot/{ef.convert_to_url(url)}/?width=600&height=400")
file = discord.File(fp, filename="image.png")
print(url)
await ctx.send(file = file)
@client.command()
async def lyrics(ctx, *, song):
j = await ef.get_async(f"https://api.popcat.xyz/lyrics?song={convert_to_url(song)}",kind="json")
await ctx.send(embed=ef.cembed(title=j['title'],description=j['lyrics'],color=re[8],thumbnail=j['image'],footer=j['artist']))
async def pa1(embeds, ctx, start_from=0):
message = await ctx.send(embed=embeds[start_from])
pag = start_from
await message.add_reaction("◀️")
await message.add_reaction("▶️")
def check(reaction, user):
return (
user != client.user
and str(reaction.emoji) in ["◀️", "▶️"]
and reaction.message.id == message.id
)
while True:
try:
reaction, user = await client.wait_for(
"reaction_add", timeout=360, check=check
)
await message.remove_reaction(reaction, user)
if str(reaction.emoji) == "▶️" and pag + 1 != len(embeds):
pag += 1
await message.edit(embed=embeds[pag])
elif str(reaction.emoji) == "◀️" and pag != 0:
pag -= 1
await message.edit(embed=embeds[pag])
except asyncio.TimeoutError:
break
| <filename>src/Fun API.py
from functools import lru_cache
import nextcord as discord
import os
import aiohttp
import asyncio
from bs4 import BeautifulSoup
import datetime
import requests
import urllib.parse
from googlesearch import search
import External_functions as ef
def requirements():
return ["re"]
def main(client, re):
def convert_to_url(name):
name = urllib.parse.quote(name)
return name
@client.command()
async def gen(ctx, *, text):
print(ctx.guild.name)
re[0]+=1
API_URL2 = "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-2.7B"
header2 = {"Authorization": f"Bearer {os.environ['transformers_auth']}"}
payload2 = {
"inputs": text,
"parameters": {"max_new_tokens": 100, "return_full_text": True},
}
output = await ef.post_async(API_URL2, header2, payload2)
print(output)
o = output[0]["generated_text"]
await ctx.reply(
embed=ef.cembed(
title="Generated text", description=o, color=re[8],thumbnail=client.user.avatar_url_as(format="png")
)
)
@client.command()
async def kanye(ctx):
re[0] += 1
text = await ef.get_async("https://api.kanye.rest", kind="json");text=text["quote"]
embed = discord.Embed(
title="Kanye Rest", description=text, color=discord.Color(value=re[8])
)
embed.set_thumbnail(
url="https://i.pinimg.com/originals/3b/84/e1/3b84e1b85fb0a8068044df8b6cd8869f.jpg"
)
await ctx.send(embed=embed)
@client.command()
async def age(ctx, name):
try:
re[0] += 1
text = eval(
requests.get(
f"https://api.agify.io/?name={name}").content.decode()
)
st = ""
for i in text:
st += i + ":" + str(text[i]) + "\n"
await ctx.send(
embed=discord.Embed(
title="Agify", description=st, color=discord.Color(value=re[8])
)
)
except:
await ctx.send(
embed=discord.Embed(
title="Oops",
description="Something went wrong",
color=discord.Color(value=re[8]),
)
)
@client.command()
async def apis(ctx, page: int = 0):
a = await ef.get_async("https://api.publicapis.org/entries",kind="json")
b=a['entries']
embeds=[]
for i in range(a['count']):
text=f"{b[i]['Description']}\n\n\nAuth: {b[i]['Auth'] if b[i]['Auth']!='' else None}\nHTTPS: {b[i]['HTTPS']}\nCors: {b[i]['Cors']}\nCategory: {b[i]['Category']}"
embed = ef.cembed(
title=b[i]['API'],
description=text,
color=re[8],
url=b[i]['Link'],
footer=f"{i+1} of {a['count']}"
)
embeds.append(embed)
await pa1(embeds,ctx,page)
@client.command()
async def pokemon(ctx, pokemon=None):
re[0] + re[0] + 1
try:
a = await ef.get_async(f"https://pokeapi.co/api/v2/pokemon/{ef.convert_to_url(pokemon.lower())}",kind="json")
except:
a = "Not Found"
if a != "Not Found":
response = a
title = response["name"]
thumbnail = response["sprites"]["front_default"]
ability = "**ABILITIES:**\n"
for i in response["abilities"]:
ability += i["ability"]["name"] + "\n"
weight = "\n**WEIGHT**\n" + str(response["weight"])
embed = discord.Embed(
title=title,
description=ability + weight,
color=discord.Color(value=re[8]),
)
embed.set_thumbnail(url=thumbnail)
await ctx.send(embed=embed)
else:
await ctx.send(
embed=discord.Embed(
title="Hmm",
description="Not found",
color=discord.Color(value=re[8]),
)
)
@client.command()
async def ip(ctx, *, ip):
re[0] + re[0] + 1
ip = convert_to_url(ip)
print(ip)
print(f"https://ipinfo.io/{ip}/geo")
a = await ef.get_async(f"https://ipinfo.io/{ip}/geo",kind="json")
st = ""
if "status" not in list(a.keys()):
for i in list(a.keys()):
st += f"**{i}**:\n{a[i]}\n\n"
embed = discord.Embed(
title=ip, description=st, color=discord.Color(value=re[8])
)
embed.set_thumbnail(url=client.user.avatar_url_as(format="png"))
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Oops",
description="Oops, couldnt find it :confused:",
color=discord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar_url_as(format="png"))
await ctx.send(embed=embed)
@client.command(aliases=["cat"])
async def cat_fact(ctx):
re[0] + re[0] + 1
a = eval(requests.get("https://catfact.ninja/fact").content.decode())
embed = discord.Embed(
title="Cat Fact", description=a["fact"], color=discord.Color(value=re[8])
)
embed.set_thumbnail(url="https://i.imgur.com/u1TPbIp.png?1")
await ctx.send(embed=embed)
@client.command(aliases=["desktop"])
async def gs_stat(ctx):
a = await ef.get_async("https://gs.statcounter.com/os-market-share/desktop/worldwide/")
start = a.find('og:image" content="')+len('og:image" content="')
end = a.find(".png",start)+len(".png")
url = a[start:end]
await ctx.send(embed=ef.cembed(title="Gs.statcounter Desktop OS",description="This contains the market share of desktop operating systems worldwide", color=re[8], thumbnail="https://pbs.twimg.com/profile_images/918460707787681792/fMVNRhz4_400x400.jpg", picture=url))
@client.command()
async def csvoyager(ctx, edition):
embeds=[]
for i in range(1,20):
embed = ef.cembed(title="CS Voyager",description=f"{i} of 20",color=re[8],picture=f"https://csvoyager.netlify.app/data/{edition}/{i}.jpg")
embeds.append(embed)
await pa1(embeds,ctx)
@client.command(aliases=["g"])
async def google(ctx, *, text):
re[0] += 1
li = []
print(text, str(ctx.author))
for i in search(text, num=5, stop=5, pause=0):
embed = ef.cembed(title="Google",
color=re[8],
thumbnail=client.user.avatar_url_as(
format="png"),
picture=f"https://render-tron.appspot.com/screenshot/{ef.convert_to_url(i)}/?width=600&height=400")
embed.url = i
li.append(embed)
await pa1(li, ctx)
@client.slash_command(name = "screenshot",description = "Takes a screenshot of the website")
async def screenshot(ctx, url):
fp = await ef.get_async(f"https://render-tron.appspot.com/screenshot/{ef.convert_to_url(url)}/?width=600&height=400")
file = discord.File(fp, filename="image.png")
print(url)
await ctx.send(file = file)
@client.command()
async def lyrics(ctx, *, song):
j = await ef.get_async(f"https://api.popcat.xyz/lyrics?song={convert_to_url(song)}",kind="json")
await ctx.send(embed=ef.cembed(title=j['title'],description=j['lyrics'],color=re[8],thumbnail=j['image'],footer=j['artist']))
async def pa1(embeds, ctx, start_from=0):
message = await ctx.send(embed=embeds[start_from])
pag = start_from
await message.add_reaction("◀️")
await message.add_reaction("▶️")
def check(reaction, user):
return (
user != client.user
and str(reaction.emoji) in ["◀️", "▶️"]
and reaction.message.id == message.id
)
while True:
try:
reaction, user = await client.wait_for(
"reaction_add", timeout=360, check=check
)
await message.remove_reaction(reaction, user)
if str(reaction.emoji) == "▶️" and pag + 1 != len(embeds):
pag += 1
await message.edit(embed=embeds[pag])
elif str(reaction.emoji) == "◀️" and pag != 0:
pag -= 1
await message.edit(embed=embeds[pag])
except asyncio.TimeoutError:
break
| none | 1 | 2.549528 | 3 |
|
boats/urls.py | DonSelester/yachts | 0 | 6630852 | <filename>boats/urls.py
from django.urls import path, re_path
from . import views
app_name = 'boats'
urlpatterns = [
# /boats/
path('', views.IndexView, name='index'),
# /boats/owner/id/
re_path(r'^owner/(?P<user_id_id>[0-9]+)/$', views.owner_profile, name='owner_profile'),
# /boats/owner/addboat/id/
re_path(r'^owner/addboat/(?P<user_id_id>[0-9]+)/$', views.add_boat, name='addboat'),
# /boats/owner/crewcontract/id/
re_path(r'^owner/crewcontract/(?P<boat_id_id>[0-9]+)/$', views.crew_contract, name='crew_contract'),
# /boats/owner/boat_info/id/
re_path(r'^owner/boat_info/(?P<boat_id_id>[0-9]+)/$', views.boat_info, name='boat_info'),
# /boats/renter/id/
re_path(r'^renter/(?P<user_id_id>[0-9]+)/$', views.renter_profile, name='renter_profile'),
# /boats/renter/id/rentcontract/id/
re_path(r'^renter/(?P<user_id_id>[0-9]+)/rentcontract/(?P<boat_id_id>[0-9]+)/$', views.rent_contract, name='rent_contract'),
# /boats/register/
path('register/', views.register, name='register'),
path('register/owner', views.register_owner, name='register_owner'),
path('register/renter', views.register_renter, name='register_renter'),
# /boats/login/
path('login/', views.user_login, name='login'),
# /boats/logout/
path('logout/', views.user_logout, name='logout'),
# /boats/id/
re_path(r'^(?P<boat_id_id>[0-9]+)/$', views.boat_detail, name='boat_detail'),
# /boats/bay/
path('bay/', views.bays, name='bay'),
# /boats/bay/id/
re_path(r'^bay/(?P<pk>[0-9]+)/$', views.BayDetailView.as_view(), name='bay_detail'),
# /boats/crew/
path('crew/', views.CrewIndex, name='crew'),
# /boats/crew/id/
re_path(r'^crew/(?P<cr_id>[0-9]+)/$', views.CrewDetail, name='crew_detail'),
# /boats/competitions/
path('competitions/', views.Competetions, name='Competitions'),
# /boats/elling/
path('elling/', views.elling, name='elling'),
# /boats/elling/id
re_path(r'^elling/(?P<el_id>[0-9]+)/$', views.elling_detail, name='elling_detail'),
]
| <filename>boats/urls.py
from django.urls import path, re_path
from . import views
app_name = 'boats'
urlpatterns = [
# /boats/
path('', views.IndexView, name='index'),
# /boats/owner/id/
re_path(r'^owner/(?P<user_id_id>[0-9]+)/$', views.owner_profile, name='owner_profile'),
# /boats/owner/addboat/id/
re_path(r'^owner/addboat/(?P<user_id_id>[0-9]+)/$', views.add_boat, name='addboat'),
# /boats/owner/crewcontract/id/
re_path(r'^owner/crewcontract/(?P<boat_id_id>[0-9]+)/$', views.crew_contract, name='crew_contract'),
# /boats/owner/boat_info/id/
re_path(r'^owner/boat_info/(?P<boat_id_id>[0-9]+)/$', views.boat_info, name='boat_info'),
# /boats/renter/id/
re_path(r'^renter/(?P<user_id_id>[0-9]+)/$', views.renter_profile, name='renter_profile'),
# /boats/renter/id/rentcontract/id/
re_path(r'^renter/(?P<user_id_id>[0-9]+)/rentcontract/(?P<boat_id_id>[0-9]+)/$', views.rent_contract, name='rent_contract'),
# /boats/register/
path('register/', views.register, name='register'),
path('register/owner', views.register_owner, name='register_owner'),
path('register/renter', views.register_renter, name='register_renter'),
# /boats/login/
path('login/', views.user_login, name='login'),
# /boats/logout/
path('logout/', views.user_logout, name='logout'),
# /boats/id/
re_path(r'^(?P<boat_id_id>[0-9]+)/$', views.boat_detail, name='boat_detail'),
# /boats/bay/
path('bay/', views.bays, name='bay'),
# /boats/bay/id/
re_path(r'^bay/(?P<pk>[0-9]+)/$', views.BayDetailView.as_view(), name='bay_detail'),
# /boats/crew/
path('crew/', views.CrewIndex, name='crew'),
# /boats/crew/id/
re_path(r'^crew/(?P<cr_id>[0-9]+)/$', views.CrewDetail, name='crew_detail'),
# /boats/competitions/
path('competitions/', views.Competetions, name='Competitions'),
# /boats/elling/
path('elling/', views.elling, name='elling'),
# /boats/elling/id
re_path(r'^elling/(?P<el_id>[0-9]+)/$', views.elling_detail, name='elling_detail'),
]
| en | 0.935955 | # /boats/ # /boats/owner/id/ # /boats/owner/addboat/id/ # /boats/owner/crewcontract/id/ # /boats/owner/boat_info/id/ # /boats/renter/id/ # /boats/renter/id/rentcontract/id/ # /boats/register/ # /boats/login/ # /boats/logout/ # /boats/id/ # /boats/bay/ # /boats/bay/id/ # /boats/crew/ # /boats/crew/id/ # /boats/competitions/ # /boats/elling/ # /boats/elling/id | 2.061755 | 2 |
server/infrastructure/catalog_records/repositories.py | multi-coop/catalogage-donnees | 0 | 6630853 | <gh_stars>0
import uuid
from typing import TYPE_CHECKING, Optional
from sqlalchemy import Column, DateTime, ForeignKey, func, select
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import relationship
from server.domain.catalog_records.entities import CatalogRecord
from server.domain.catalog_records.repositories import CatalogRecordRepository
from server.domain.common.types import ID
from ..database import Base, Database
if TYPE_CHECKING:
from ..datasets.models import DatasetModel
class CatalogRecordModel(Base):
__tablename__ = "catalog_record"
id: uuid.UUID = Column(UUID(as_uuid=True), primary_key=True)
dataset_id: ID = Column(UUID(as_uuid=True), ForeignKey("dataset.id"))
dataset: "DatasetModel" = relationship(
"DatasetModel",
back_populates="catalog_record",
)
created_at = Column(
DateTime(timezone=True), server_default=func.clock_timestamp(), nullable=False
)
def make_entity(instance: CatalogRecordModel) -> CatalogRecord:
return CatalogRecord(
id=instance.id,
created_at=instance.created_at,
)
def make_instance(entity: CatalogRecord) -> CatalogRecordModel:
return CatalogRecordModel(
**entity.dict(
exclude={
"created_at", # Managed by DB for better time consistency
}
),
)
class SqlCatalogRecordRepository(CatalogRecordRepository):
def __init__(self, db: Database) -> None:
self._db = db
async def get_by_id(self, id: ID) -> Optional[CatalogRecord]:
async with self._db.session() as session:
stmt = select(CatalogRecordModel).where(CatalogRecordModel.id == id)
result = await session.execute(stmt)
try:
instance = result.scalar_one()
except NoResultFound:
return None
else:
return make_entity(instance)
async def insert(self, entity: CatalogRecord) -> ID:
async with self._db.session() as session:
instance = make_instance(entity)
session.add(instance)
await session.commit()
await session.refresh(instance)
return ID(instance.id)
| import uuid
from typing import TYPE_CHECKING, Optional
from sqlalchemy import Column, DateTime, ForeignKey, func, select
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import relationship
from server.domain.catalog_records.entities import CatalogRecord
from server.domain.catalog_records.repositories import CatalogRecordRepository
from server.domain.common.types import ID
from ..database import Base, Database
if TYPE_CHECKING:
from ..datasets.models import DatasetModel
class CatalogRecordModel(Base):
__tablename__ = "catalog_record"
id: uuid.UUID = Column(UUID(as_uuid=True), primary_key=True)
dataset_id: ID = Column(UUID(as_uuid=True), ForeignKey("dataset.id"))
dataset: "DatasetModel" = relationship(
"DatasetModel",
back_populates="catalog_record",
)
created_at = Column(
DateTime(timezone=True), server_default=func.clock_timestamp(), nullable=False
)
def make_entity(instance: CatalogRecordModel) -> CatalogRecord:
return CatalogRecord(
id=instance.id,
created_at=instance.created_at,
)
def make_instance(entity: CatalogRecord) -> CatalogRecordModel:
return CatalogRecordModel(
**entity.dict(
exclude={
"created_at", # Managed by DB for better time consistency
}
),
)
class SqlCatalogRecordRepository(CatalogRecordRepository):
def __init__(self, db: Database) -> None:
self._db = db
async def get_by_id(self, id: ID) -> Optional[CatalogRecord]:
async with self._db.session() as session:
stmt = select(CatalogRecordModel).where(CatalogRecordModel.id == id)
result = await session.execute(stmt)
try:
instance = result.scalar_one()
except NoResultFound:
return None
else:
return make_entity(instance)
async def insert(self, entity: CatalogRecord) -> ID:
async with self._db.session() as session:
instance = make_instance(entity)
session.add(instance)
await session.commit()
await session.refresh(instance)
return ID(instance.id) | en | 0.985275 | # Managed by DB for better time consistency | 2.568091 | 3 |
stringspair.py | mujeebishaque/WorkingWithPython | 4 | 6630854 | <gh_stars>1-10
sparta={0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine',
10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen',
17: 'seventeen', 18: 'eighteen', 19: 'nineteen', 20: 'twenty', 21: 'twentyone', 22: 'twentytwo',
23: 'twentythree', 24: 'twentyfour', 25: 'twentyfive', 26: 'twentysix', 27: 'twentyseven', 28: 'twentyeight',
29: 'twentynine', 30: 'thirty', 31: 'thirtyone', 32: 'thirtytwo', 33: 'thirtythree', 34: 'thirtyfour',
35: 'thirtyfive', 36: 'thirtysix', 37: 'thirtyseven', 38: 'thirtyeight', 39: 'thirtynine', 40: 'forty',
41: 'fortyone', 42: 'fortytwo', 43: 'fortythree', 44: 'fortyfour', 45: 'fortyfive', 46: 'fortysix',
47: 'fortyseven', 48: 'fortyeight', 49: 'fortynine', 50: 'fifty', 51: 'fiftyone', 52: 'fiftytwo',
53: 'fiftythree', 54: 'fiftyfour', 55: 'fiftyfive', 56: 'fiftysix', 57: 'fiftyseven', 58: 'fiftyeight',
59: 'fiftynine', 60: 'sixty', 61: 'sixtyone', 62: 'sixtytwo', 63: 'sixtythree', 64: 'sixtyfour',
65: 'sixtyfive', 66: 'sixtysix', 67: 'sixtyseven', 68: 'sixtyeight', 69: 'sixtynine', 70: 'seventy',
71: 'seventyone', 72: 'seventytwo', 73: 'seventythree', 74: 'seventyfour', 75: 'seventyfive', 76: 'seventysix',
77: 'seventyseven', 78: 'seventyeight', 79: 'seventynine', 80: 'eighty', 81: 'eightyone', 82: 'eightytwo',
83: 'eightythree', 84: 'eightyfour', 85: 'eightyfive', 86: 'eightysix', 87: 'eightyseven', 88: 'eightyeight',
89: 'eightynine', 90: 'ninety', 91: 'ninetyone', 92: 'ninetytwo', 93: 'ninetythree', 94: 'ninetyfour',
95: 'ninetyfive', 96: 'ninetysix', 97: 'ninetyseven', 98: 'ninetyeight', 99: 'ninetynine', 100: 'hundred'}
vowels={'a', 'e', 'i', 'o', 'u'}
p=int(input())
ps = list(map(int, input().split()))
g=0
def shake(j)->int:
if(j<0 or j>100):
return
pg=0
for i in sparta[j]:
if i in vowels:
pg=pg+1
return pg
def Set_add(g, ps):
pgl = []
while ps:
number = ps.pop()
difference = g - number
if difference in ps:
pgl.append([difference, number])
pgl.reverse()
return pgl
for a in ps:
g=g+shake(a)
print(sparta[len(Set_add(g, ps))])
| sparta={0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine',
10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen',
17: 'seventeen', 18: 'eighteen', 19: 'nineteen', 20: 'twenty', 21: 'twentyone', 22: 'twentytwo',
23: 'twentythree', 24: 'twentyfour', 25: 'twentyfive', 26: 'twentysix', 27: 'twentyseven', 28: 'twentyeight',
29: 'twentynine', 30: 'thirty', 31: 'thirtyone', 32: 'thirtytwo', 33: 'thirtythree', 34: 'thirtyfour',
35: 'thirtyfive', 36: 'thirtysix', 37: 'thirtyseven', 38: 'thirtyeight', 39: 'thirtynine', 40: 'forty',
41: 'fortyone', 42: 'fortytwo', 43: 'fortythree', 44: 'fortyfour', 45: 'fortyfive', 46: 'fortysix',
47: 'fortyseven', 48: 'fortyeight', 49: 'fortynine', 50: 'fifty', 51: 'fiftyone', 52: 'fiftytwo',
53: 'fiftythree', 54: 'fiftyfour', 55: 'fiftyfive', 56: 'fiftysix', 57: 'fiftyseven', 58: 'fiftyeight',
59: 'fiftynine', 60: 'sixty', 61: 'sixtyone', 62: 'sixtytwo', 63: 'sixtythree', 64: 'sixtyfour',
65: 'sixtyfive', 66: 'sixtysix', 67: 'sixtyseven', 68: 'sixtyeight', 69: 'sixtynine', 70: 'seventy',
71: 'seventyone', 72: 'seventytwo', 73: 'seventythree', 74: 'seventyfour', 75: 'seventyfive', 76: 'seventysix',
77: 'seventyseven', 78: 'seventyeight', 79: 'seventynine', 80: 'eighty', 81: 'eightyone', 82: 'eightytwo',
83: 'eightythree', 84: 'eightyfour', 85: 'eightyfive', 86: 'eightysix', 87: 'eightyseven', 88: 'eightyeight',
89: 'eightynine', 90: 'ninety', 91: 'ninetyone', 92: 'ninetytwo', 93: 'ninetythree', 94: 'ninetyfour',
95: 'ninetyfive', 96: 'ninetysix', 97: 'ninetyseven', 98: 'ninetyeight', 99: 'ninetynine', 100: 'hundred'}
vowels={'a', 'e', 'i', 'o', 'u'}
p=int(input())
ps = list(map(int, input().split()))
g=0
def shake(j)->int:
if(j<0 or j>100):
return
pg=0
for i in sparta[j]:
if i in vowels:
pg=pg+1
return pg
def Set_add(g, ps):
pgl = []
while ps:
number = ps.pop()
difference = g - number
if difference in ps:
pgl.append([difference, number])
pgl.reverse()
return pgl
for a in ps:
g=g+shake(a)
print(sparta[len(Set_add(g, ps))]) | none | 1 | 1.54975 | 2 |
|
python/hangman/hangman.py | sci-c0/exercism-learning | 0 | 6630855 | # Game status categories
# Change the values as you see fit
STATUS_WIN = "win"
STATUS_LOSE = "lose"
STATUS_ONGOING = "ongoing"
UNKNOWN = '_'
class Hangman:
def __init__(self, word: str):
self._remaining_guesses = 10
self.status = STATUS_ONGOING
self._answer = word
self._guess = ['_'] * len(self._answer)
self._done = set()
@property
def remaining_guesses(self):
return self._remaining_guesses - 1
def _has_letter(self, char):
positions = []
pos = -1
while True:
try:
pos = self._answer.index(char, pos + 1)
except ValueError:
break
else:
positions.append(pos)
self._done.add(char)
return positions
def guess(self, char):
if self.status != STATUS_ONGOING:
raise ValueError("No guesses remaining !!")
if char in self._done or char not in self._answer:
self._remaining_guesses -= 1
positions = self._has_letter(char)
for pos in positions:
self._guess[pos] = char
self.update_status()
def get_masked_word(self):
return ''.join(self._guess)
def update_status(self):
if self._remaining_guesses == 0:
self.status = STATUS_LOSE
elif set(self._answer) == self._done:
self.status = STATUS_WIN
def get_status(self):
return self.status
| # Game status categories
# Change the values as you see fit
STATUS_WIN = "win"
STATUS_LOSE = "lose"
STATUS_ONGOING = "ongoing"
UNKNOWN = '_'
class Hangman:
def __init__(self, word: str):
self._remaining_guesses = 10
self.status = STATUS_ONGOING
self._answer = word
self._guess = ['_'] * len(self._answer)
self._done = set()
@property
def remaining_guesses(self):
return self._remaining_guesses - 1
def _has_letter(self, char):
positions = []
pos = -1
while True:
try:
pos = self._answer.index(char, pos + 1)
except ValueError:
break
else:
positions.append(pos)
self._done.add(char)
return positions
def guess(self, char):
if self.status != STATUS_ONGOING:
raise ValueError("No guesses remaining !!")
if char in self._done or char not in self._answer:
self._remaining_guesses -= 1
positions = self._has_letter(char)
for pos in positions:
self._guess[pos] = char
self.update_status()
def get_masked_word(self):
return ''.join(self._guess)
def update_status(self):
if self._remaining_guesses == 0:
self.status = STATUS_LOSE
elif set(self._answer) == self._done:
self.status = STATUS_WIN
def get_status(self):
return self.status
| en | 0.69838 | # Game status categories # Change the values as you see fit | 3.37835 | 3 |
072-assembler-2/casm.py | gynvael/stream | 152 | 6630856 | <reponame>gynvael/stream
#!/usr/bin/python
import sys
import os
import string
import struct
from enum import Enum
class TokenTypes(Enum):
LABEL_INDICATOR = 1 # : in label declaration.
LITERAL_INT = 2
LITERAL_STR = 4
IDENTIFIER = 5 # Labels, etc.
class OutputElement(Enum):
LABEL = 1
class AsmException(Exception):
pass
class StreamException(Exception):
pass
class Stream():
def __init__(self, data, ln_no):
self.data = data
self.i = 0
self.stack = []
self.ln_no = ln_no
def push(self):
self.stack.append(self.i)
def pop(self):
self.i = self.stack.pop()
def dis(self):
self.stack.pop()
def reset(self):
self.i = 0
def peek(self, size=1, quiet=False):
if not quiet and self.i + size - 1 >= len(self.data):
raise StreamException("out of chars")
return self.data[self.i:self.i + size]
def get(self):
ch = self.peek()
self.i += 1
return ch
def unget(self):
if self.i == 0:
raise StreamException("tried to unget on first char")
self.i -= 1
def empty(self):
return self.i == len(self.data)
def s_anything(data):
return True
def s_ws(s):
while not s.empty() and s.peek() in " \t":
s.get()
return True
def s_cname(s, t):
CNAME0 = string.ascii_letters + "_.!:"
CNAME1 = string.ascii_letters + string.digits + "_."
token = ""
if s.peek() not in CNAME0:
return False
token = s.get()
try:
while s.peek() in CNAME1:
token += s.get()
except StreamException:
pass
t.append((TokenTypes.IDENTIFIER, token)) # TODO: maybe add more info on type
return True
def s_comma(s):
if s.peek() == ',':
s.get()
return True
return False
def s_qstring(s, t):
if s.peek() != '"':
return False
s.get()
string = ""
while s.peek() != '"':
string += s.get()
s.get()
t.append((TokenTypes.LITERAL_STR, string))
return True
def s_declit(s, t):
lit = ""
while not s.empty() and s.peek() in string.digits:
lit += s.get()
if not lit:
return False
t.append((TokenTypes.LITERAL_INT, int(lit)))
return True
def s_hexlit(s, t):
if s.peek(2, True) != '0x':
return False
s.get()
s.get()
lit = ""
while not s.empty() and s.peek() in string.hexdigits:
lit += s.get()
if not lit:
return False
t.append((TokenTypes.LITERAL_INT, int(lit, 16)))
return True
def s_arg(s, t):
# "asdf"
# 0x1234
# 1234
# identyfikator
s.push()
if s_qstring(s, t):
s.dis()
return True
s.pop()
s.push()
if s_cname(s, t):
s.dis()
return True
s.pop()
s.push()
if s_hexlit(s, t):
s.dis()
return True
s.pop()
s.push()
if s_declit(s, t):
s.dis()
return True
s.pop()
return False
def s_label(s, t):
# WS* : CNAME [WS* COMMENT]
s.reset()
s_ws(s)
if s.peek(1, True) != ':':
return False
s.get()
t.append((TokenTypes.LABEL_INDICATOR, ':'))
if not s_cname(s, t):
t.pop()
raise AsmException("Missing label name after : in line %i" % s.ln_no)
s_ws(s)
try:
res = s_comment(s)
if res:
return True
except StreamException:
return True
return False
def s_stmt(s, t):
# WS* CNAME WS+ (ARG WS* [, WS* ARG])? WS* COMMENT?
s.reset()
s_ws(s)
if not s_cname(s, t):
return False
s_ws(s)
if not s.empty():
while s_arg(s, t):
s_ws(s)
if s.empty() or not s_comma(s):
break
s_ws(s)
s_ws(s)
try:
res = s_comment(s)
if res:
return True
except StreamException:
return True
return False
def s_comment(s):
# WS* "#" WS* COMMENT
# s.reset()
s_ws(s)
if s.peek() == '#':
return True
return False
def s_parse(s):
t = []
if s_comment(s):
return t
if s_label(s, t):
return t
if s_stmt(s, t):
return t
return t
class Assembler():
def __init__(self):
self.output = []
self.output_sz = 0
self.labels = {}
self.current_ln_no = None
self.ins_set = {
"NOP",
"INT3"
}
self.ins_list = [
#INT3 CC 8 - - - - -
( "INT3", 0xcc, 8, None, None, None, None, None, ),
#NOP 90 8 - - - - -
( "NOP", 0x90, 8, None, None, None, None, None, )
]
def handle_data(self, tokens):
fmt = {
"db": ("B", 1),
"dw": ("H", 2),
"dd": ("I", 4),
"dq": ("Q", 8)
}[tokens[0][1]]
output = []
output_sz = 0
for t in tokens[1:]:
if t[0] == TokenTypes.LITERAL_STR:
output.append(t[1])
output_sz += len(t[1])
continue
if t[0] == TokenTypes.LITERAL_INT:
output.append(struct.pack(fmt[0], t[1]))
output_sz += fmt[1]
continue
if t[0] == TokenTypes.IDENTIFIER:
output.append((OutputElement.LABEL, t[1], fmt[1]))
output_sz += fmt[1]
continue
raise Exception("handle data failed")
#print output
return (output, output_sz)
def handle_org(self, tokens):
self.org = tokens[1][1]
return True
def handle_ins(self, tokens):
ins = tokens[0][1].upper()
for e in self.ins_list:
if e[0] == ins:
break
else:
raise AsmException("Instruction found but not found wtf '%s'" % ins)
self.output.append(chr(e[1]))
self.output_sz += e[2] / 8
return True
def handle_label(self, tokens):
if len(tokens) != 2:
raise AsmException(
"Unexpected extra characters in label name on line %i" % (
self.current_ln_no
))
if tokens[1][0] != TokenTypes.IDENTIFIER:
raise AsmException(
"Syntax error at line %i" % self.current_ln_no)
label_name = tokens[1][1]
if label_name in self.labels:
raise AsmException("Label redeclared in line %i" % self.current_ln_no)
self.labels[label_name] = self.output_sz
return True
def phase1(self, data):
for ln_no, ln in data:
res = self.phase1_worker(ln_no, ln)
if not res:
raise AsmException("Something went wrong in phase 1 at line %i" % ln_no)
def phase1_worker(self, ln_no, ln):
self.current_ln_no = ln_no
s = Stream(ln, ln_no)
try:
tokens = s_parse(s)
except StreamException as e:
print ln_no, ":", e
raise
if len(tokens) == 0:
return True
print tokens
if (tokens[0][0] == TokenTypes.IDENTIFIER and
tokens[0][1] in {"db", "dw", "dd", "dq"}):
output, output_sz = self.handle_data(tokens)
self.output += output
self.output_sz += output_sz
return True
if tokens[0][0] == TokenTypes.IDENTIFIER and tokens[0][1] in {"!org"}:
return self.handle_org(tokens)
if (tokens[0][0] == TokenTypes.IDENTIFIER and
tokens[0][1].upper() in self.ins_set):
return self.handle_ins(tokens)
if tokens[0][0] == TokenTypes.LABEL_INDICATOR:
return self.handle_label(tokens)
raise AsmException("Unknown directive/instruction/etc in line %i" % ln_no)
def phase2(self):
for i in xrange(len(self.output)):
if type(self.output[i]) is str:
continue
el_type = self.output[i][0]
el_args = self.output[i][1:]
if el_type == OutputElement.LABEL:
fmt = {
1: "B",
2: "H",
4: "I",
8: "Q"
}[el_args[1]]
label = el_args[0]
if label not in self.labels:
raise AsmException("Unknown label '%s'" % label)
self.output[i] = struct.pack(fmt, self.labels[label] + self.org)
continue
raise AsmException("Unsupported output element %s" % repr(self.output[i]))
def get_output(self):
return self.output
def main():
if len(sys.argv) != 2:
sys.exit("usage: casm.py <fname.asm>")
infname = sys.argv[1]
outfname = os.path.splitext(infname)[0] + ".bin"
with open(infname, "r") as f:
data = f.read().splitlines()
data = [(i + 1, ln) for i, ln in enumerate(data) if ln.strip()]
c = Assembler()
c.phase1(data)
c.phase2()
with open(outfname, "wb") as f:
f.write(''.join(c.get_output()))
if __name__ == "__main__":
main()
| #!/usr/bin/python
import sys
import os
import string
import struct
from enum import Enum
class TokenTypes(Enum):
LABEL_INDICATOR = 1 # : in label declaration.
LITERAL_INT = 2
LITERAL_STR = 4
IDENTIFIER = 5 # Labels, etc.
class OutputElement(Enum):
LABEL = 1
class AsmException(Exception):
pass
class StreamException(Exception):
pass
class Stream():
def __init__(self, data, ln_no):
self.data = data
self.i = 0
self.stack = []
self.ln_no = ln_no
def push(self):
self.stack.append(self.i)
def pop(self):
self.i = self.stack.pop()
def dis(self):
self.stack.pop()
def reset(self):
self.i = 0
def peek(self, size=1, quiet=False):
if not quiet and self.i + size - 1 >= len(self.data):
raise StreamException("out of chars")
return self.data[self.i:self.i + size]
def get(self):
ch = self.peek()
self.i += 1
return ch
def unget(self):
if self.i == 0:
raise StreamException("tried to unget on first char")
self.i -= 1
def empty(self):
return self.i == len(self.data)
def s_anything(data):
return True
def s_ws(s):
while not s.empty() and s.peek() in " \t":
s.get()
return True
def s_cname(s, t):
CNAME0 = string.ascii_letters + "_.!:"
CNAME1 = string.ascii_letters + string.digits + "_."
token = ""
if s.peek() not in CNAME0:
return False
token = s.get()
try:
while s.peek() in CNAME1:
token += s.get()
except StreamException:
pass
t.append((TokenTypes.IDENTIFIER, token)) # TODO: maybe add more info on type
return True
def s_comma(s):
if s.peek() == ',':
s.get()
return True
return False
def s_qstring(s, t):
if s.peek() != '"':
return False
s.get()
string = ""
while s.peek() != '"':
string += s.get()
s.get()
t.append((TokenTypes.LITERAL_STR, string))
return True
def s_declit(s, t):
lit = ""
while not s.empty() and s.peek() in string.digits:
lit += s.get()
if not lit:
return False
t.append((TokenTypes.LITERAL_INT, int(lit)))
return True
def s_hexlit(s, t):
if s.peek(2, True) != '0x':
return False
s.get()
s.get()
lit = ""
while not s.empty() and s.peek() in string.hexdigits:
lit += s.get()
if not lit:
return False
t.append((TokenTypes.LITERAL_INT, int(lit, 16)))
return True
def s_arg(s, t):
# "asdf"
# 0x1234
# 1234
# identyfikator
s.push()
if s_qstring(s, t):
s.dis()
return True
s.pop()
s.push()
if s_cname(s, t):
s.dis()
return True
s.pop()
s.push()
if s_hexlit(s, t):
s.dis()
return True
s.pop()
s.push()
if s_declit(s, t):
s.dis()
return True
s.pop()
return False
def s_label(s, t):
# WS* : CNAME [WS* COMMENT]
s.reset()
s_ws(s)
if s.peek(1, True) != ':':
return False
s.get()
t.append((TokenTypes.LABEL_INDICATOR, ':'))
if not s_cname(s, t):
t.pop()
raise AsmException("Missing label name after : in line %i" % s.ln_no)
s_ws(s)
try:
res = s_comment(s)
if res:
return True
except StreamException:
return True
return False
def s_stmt(s, t):
# WS* CNAME WS+ (ARG WS* [, WS* ARG])? WS* COMMENT?
s.reset()
s_ws(s)
if not s_cname(s, t):
return False
s_ws(s)
if not s.empty():
while s_arg(s, t):
s_ws(s)
if s.empty() or not s_comma(s):
break
s_ws(s)
s_ws(s)
try:
res = s_comment(s)
if res:
return True
except StreamException:
return True
return False
def s_comment(s):
# WS* "#" WS* COMMENT
# s.reset()
s_ws(s)
if s.peek() == '#':
return True
return False
def s_parse(s):
t = []
if s_comment(s):
return t
if s_label(s, t):
return t
if s_stmt(s, t):
return t
return t
class Assembler():
def __init__(self):
self.output = []
self.output_sz = 0
self.labels = {}
self.current_ln_no = None
self.ins_set = {
"NOP",
"INT3"
}
self.ins_list = [
#INT3 CC 8 - - - - -
( "INT3", 0xcc, 8, None, None, None, None, None, ),
#NOP 90 8 - - - - -
( "NOP", 0x90, 8, None, None, None, None, None, )
]
def handle_data(self, tokens):
fmt = {
"db": ("B", 1),
"dw": ("H", 2),
"dd": ("I", 4),
"dq": ("Q", 8)
}[tokens[0][1]]
output = []
output_sz = 0
for t in tokens[1:]:
if t[0] == TokenTypes.LITERAL_STR:
output.append(t[1])
output_sz += len(t[1])
continue
if t[0] == TokenTypes.LITERAL_INT:
output.append(struct.pack(fmt[0], t[1]))
output_sz += fmt[1]
continue
if t[0] == TokenTypes.IDENTIFIER:
output.append((OutputElement.LABEL, t[1], fmt[1]))
output_sz += fmt[1]
continue
raise Exception("handle data failed")
#print output
return (output, output_sz)
def handle_org(self, tokens):
self.org = tokens[1][1]
return True
def handle_ins(self, tokens):
ins = tokens[0][1].upper()
for e in self.ins_list:
if e[0] == ins:
break
else:
raise AsmException("Instruction found but not found wtf '%s'" % ins)
self.output.append(chr(e[1]))
self.output_sz += e[2] / 8
return True
def handle_label(self, tokens):
if len(tokens) != 2:
raise AsmException(
"Unexpected extra characters in label name on line %i" % (
self.current_ln_no
))
if tokens[1][0] != TokenTypes.IDENTIFIER:
raise AsmException(
"Syntax error at line %i" % self.current_ln_no)
label_name = tokens[1][1]
if label_name in self.labels:
raise AsmException("Label redeclared in line %i" % self.current_ln_no)
self.labels[label_name] = self.output_sz
return True
def phase1(self, data):
for ln_no, ln in data:
res = self.phase1_worker(ln_no, ln)
if not res:
raise AsmException("Something went wrong in phase 1 at line %i" % ln_no)
def phase1_worker(self, ln_no, ln):
self.current_ln_no = ln_no
s = Stream(ln, ln_no)
try:
tokens = s_parse(s)
except StreamException as e:
print ln_no, ":", e
raise
if len(tokens) == 0:
return True
print tokens
if (tokens[0][0] == TokenTypes.IDENTIFIER and
tokens[0][1] in {"db", "dw", "dd", "dq"}):
output, output_sz = self.handle_data(tokens)
self.output += output
self.output_sz += output_sz
return True
if tokens[0][0] == TokenTypes.IDENTIFIER and tokens[0][1] in {"!org"}:
return self.handle_org(tokens)
if (tokens[0][0] == TokenTypes.IDENTIFIER and
tokens[0][1].upper() in self.ins_set):
return self.handle_ins(tokens)
if tokens[0][0] == TokenTypes.LABEL_INDICATOR:
return self.handle_label(tokens)
raise AsmException("Unknown directive/instruction/etc in line %i" % ln_no)
def phase2(self):
for i in xrange(len(self.output)):
if type(self.output[i]) is str:
continue
el_type = self.output[i][0]
el_args = self.output[i][1:]
if el_type == OutputElement.LABEL:
fmt = {
1: "B",
2: "H",
4: "I",
8: "Q"
}[el_args[1]]
label = el_args[0]
if label not in self.labels:
raise AsmException("Unknown label '%s'" % label)
self.output[i] = struct.pack(fmt, self.labels[label] + self.org)
continue
raise AsmException("Unsupported output element %s" % repr(self.output[i]))
def get_output(self):
return self.output
def main():
if len(sys.argv) != 2:
sys.exit("usage: casm.py <fname.asm>")
infname = sys.argv[1]
outfname = os.path.splitext(infname)[0] + ".bin"
with open(infname, "r") as f:
data = f.read().splitlines()
data = [(i + 1, ln) for i, ln in enumerate(data) if ln.strip()]
c = Assembler()
c.phase1(data)
c.phase2()
with open(outfname, "wb") as f:
f.write(''.join(c.get_output()))
if __name__ == "__main__":
main() | en | 0.460053 | #!/usr/bin/python # : in label declaration. # Labels, etc. # TODO: maybe add more info on type # "asdf" # 0x1234 # 1234 # identyfikator # WS* : CNAME [WS* COMMENT] # WS* CNAME WS+ (ARG WS* [, WS* ARG])? WS* COMMENT? # WS* "#" WS* COMMENT # s.reset() #INT3 CC 8 - - - - - #NOP 90 8 - - - - - #print output | 2.864206 | 3 |
flask_db.py | apanshi/graphene_test | 0 | 6630857 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_graphql import GraphQLView
from graphene_sqlalchemy import SQLAlchemyObjectType
import graphene
app = Flask(__name__)
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://test:[email protected]:3306/study?charset=utf8'
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
class Book(db.Model):
__tablename__ = 'book'
id = db.Column(db.INT, primary_key=True)
name = db.Column(db.String, default="")
def format(self):
return dict(id=self.id, name=self.name)
class BookQuery(SQLAlchemyObjectType):
class Meta:
model = Book
class Query(graphene.ObjectType):
books = graphene.List(BookQuery)
book = graphene.Field(BookQuery, id = graphene.Int())
def resolve_books(self, info):
print("line")
print(info.context)
query = BookQuery.get_query(info) # SQLAlchemy query
return query.all()
def resolve_book(self, info, id):
print("ccc")
print(info)
query = BookQuery.get_query(info) # SQLAlchemy query
print(query.filter(Book.id == id))
return query.filter(Book.id == id).first()
schema = graphene.Schema(query=Query)
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql',
schema=schema, graphiql=True))
app.run(port=4901)
| from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_graphql import GraphQLView
from graphene_sqlalchemy import SQLAlchemyObjectType
import graphene
app = Flask(__name__)
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://test:[email protected]:3306/study?charset=utf8'
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
class Book(db.Model):
__tablename__ = 'book'
id = db.Column(db.INT, primary_key=True)
name = db.Column(db.String, default="")
def format(self):
return dict(id=self.id, name=self.name)
class BookQuery(SQLAlchemyObjectType):
class Meta:
model = Book
class Query(graphene.ObjectType):
books = graphene.List(BookQuery)
book = graphene.Field(BookQuery, id = graphene.Int())
def resolve_books(self, info):
print("line")
print(info.context)
query = BookQuery.get_query(info) # SQLAlchemy query
return query.all()
def resolve_book(self, info, id):
print("ccc")
print(info)
query = BookQuery.get_query(info) # SQLAlchemy query
print(query.filter(Book.id == id))
return query.filter(Book.id == id).first()
schema = graphene.Schema(query=Query)
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql',
schema=schema, graphiql=True))
app.run(port=4901)
| en | 0.265849 | # SQLAlchemy query # SQLAlchemy query | 2.80415 | 3 |
pyvim/editor.py | lieryan/pyvim | 0 | 6630858 | """
The main editor class.
Usage::
files_to_edit = ['file1.txt', 'file2.py']
e = Editor(files_to_edit)
e.run() # Runs the event loop, starts interaction.
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import Condition
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.styles import DynamicStyle
from .commands.completer import create_command_completer
from .commands.handler import handle_command
from .commands.preview import CommandPreviewer
from .help import HELP_TEXT
from .key_bindings import create_key_bindings
from .layout import EditorLayout, get_terminal_title
from .style import generate_built_in_styles, get_editor_style_by_name
from .window_arrangement import WindowArrangement
from .io import FileIO, DirectoryIO, HttpIO, GZipFileIO
import pygments
import os
__all__ = (
'Editor',
)
class Editor(object):
"""
The main class. Containing the whole editor.
:param config_directory: Place where configuration is stored.
:param input: (Optionally) `prompt_toolkit.input.Input` object.
:param output: (Optionally) `prompt_toolkit.output.Output` object.
"""
def __init__(self, config_directory='~/.pyvim', input=None, output=None):
self.input = input
self.output = output
# Vi options.
self.show_line_numbers = True
self.highlight_search = True
self.paste_mode = False
self.show_ruler = True
self.show_wildmenu = True
self.expand_tab = True # Insect spaces instead of tab characters.
self.tabstop = 4 # Number of spaces that a tab character represents.
self.incsearch = True # Show matches while typing search string.
self.ignore_case = False # Ignore case while searching.
self.enable_mouse_support = True
self.display_unprintable_characters = True # ':set list'
self.enable_jedi = True # ':set jedi', for Python Jedi completion.
self.scroll_offset = 0 # ':set scrolloff'
self.relative_number = False # ':set relativenumber'
self.wrap_lines = True # ':set wrap'
self.break_indent = False # ':set breakindent'
self.cursorline = False # ':set cursorline'
self.cursorcolumn = False # ':set cursorcolumn'
self.colorcolumn = [] # ':set colorcolumn'. List of integers.
# Ensure config directory exists.
self.config_directory = os.path.abspath(os.path.expanduser(config_directory))
if not os.path.exists(self.config_directory):
os.mkdir(self.config_directory)
self.window_arrangement = WindowArrangement(self)
self.message = None
# Load styles. (Mapping from name to Style class.)
self.styles = generate_built_in_styles()
self.current_style = get_editor_style_by_name('vim')
# I/O backends.
self.io_backends = [
DirectoryIO(),
HttpIO(),
GZipFileIO(), # Should come before FileIO.
FileIO(),
]
# Create history and search buffers.
def handle_action(buff):
' When enter is pressed in the Vi command line. '
text = buff.text # Remember: leave_command_mode resets the buffer.
# First leave command mode. We want to make sure that the working
# pane is focussed again before executing the command handlers.
self.leave_command_mode(append_to_history=True)
# Execute command.
handle_command(self, text)
commands_history = FileHistory(os.path.join(self.config_directory, 'commands_history'))
self.command_buffer = Buffer(
accept_handler=handle_action,
enable_history_search=True,
completer=create_command_completer(self),
history=commands_history,
multiline=False)
search_buffer_history = FileHistory(os.path.join(self.config_directory, 'search_history'))
self.search_buffer = Buffer(
history=search_buffer_history,
enable_history_search=True,
multiline=False)
# Create key bindings registry.
self.key_bindings = create_key_bindings(self)
# Create layout and CommandLineInterface instance.
self.editor_layout = EditorLayout(self, self.window_arrangement)
self.application = self._create_application()
# Hide message when a key is pressed.
def key_pressed(_):
self.message = None
self.application.key_processor.before_key_press += key_pressed
# Command line previewer.
self.previewer = CommandPreviewer(self)
def load_initial_files(self, locations, in_tab_pages=False, hsplit=False, vsplit=False):
"""
Load a list of files.
"""
assert in_tab_pages + hsplit + vsplit <= 1 # Max one of these options.
# When no files were given, open at least one empty buffer.
locations2 = locations or [None]
# First file
self.window_arrangement.open_buffer(locations2[0])
for f in locations2[1:]:
if in_tab_pages:
self.window_arrangement.create_tab(f)
elif hsplit:
self.window_arrangement.hsplit(location=f)
elif vsplit:
self.window_arrangement.vsplit(location=f)
else:
self.window_arrangement.open_buffer(f)
self.window_arrangement.active_tab_index = 0
if locations and len(locations) > 1:
self.show_message('%i files loaded.' % len(locations))
def _create_application(self):
"""
Create CommandLineInterface instance.
"""
# Create Application.
application = Application(
input=self.input,
output=self.output,
editing_mode=EditingMode.VI,
layout=self.editor_layout.layout,
key_bindings=self.key_bindings,
# get_title=lambda: get_terminal_title(self),
style=DynamicStyle(lambda: self.current_style),
paste_mode=Condition(lambda: self.paste_mode),
# ignore_case=Condition(lambda: self.ignore_case), # TODO
include_default_pygments_style=False,
mouse_support=Condition(lambda: self.enable_mouse_support),
full_screen=True,
enable_page_navigation_bindings=True)
# Handle command line previews.
# (e.g. when typing ':colorscheme blue', it should already show the
# preview before pressing enter.)
def preview(_):
if self.application.layout.has_focus(self.command_buffer):
self.previewer.preview(self.command_buffer.text)
self.command_buffer.on_text_changed += preview
return application
@property
def current_editor_buffer(self):
"""
Return the `EditorBuffer` that is currently active.
"""
current_buffer = self.application.current_buffer
# Find/return the EditorBuffer with this name.
for b in self.window_arrangement.editor_buffers:
if b.buffer == current_buffer:
return b
@property
def add_key_binding(self):
"""
Shortcut for adding new key bindings.
(Mostly useful for a pyvimrc file, that receives this Editor instance
as input.)
"""
return self.key_bindings.add
def show_message(self, message):
"""
Set a warning message. The layout will render it as a "pop-up" at the
bottom.
"""
self.message = message
def use_colorscheme(self, name='default'):
"""
Apply new colorscheme. (By name.)
"""
try:
self.current_style = get_editor_style_by_name(name)
except pygments.util.ClassNotFound:
pass
def sync_with_prompt_toolkit(self):
"""
Update the prompt-toolkit Layout and FocusStack.
"""
# After executing a command, make sure that the layout of
# prompt-toolkit matches our WindowArrangement.
self.editor_layout.update()
# Make sure that the focus stack of prompt-toolkit has the current
# page.
window = self.window_arrangement.active_pt_window
if window:
self.application.layout.focus(window)
def show_help(self):
"""
Show help in new window.
"""
self.window_arrangement.hsplit(text=HELP_TEXT)
self.sync_with_prompt_toolkit() # Show new window.
def run(self):
"""
Run the event loop for the interface.
This starts the interaction.
"""
# Make sure everything is in sync, before starting.
self.sync_with_prompt_toolkit()
def pre_run():
# Start in navigation mode.
self.application.vi_state.input_mode = InputMode.NAVIGATION
# Run eventloop of prompt_toolkit.
self.application.run(pre_run=pre_run)
def enter_command_mode(self):
"""
Go into command mode.
"""
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save()
def leave_command_mode(self, append_to_history=False):
"""
Leave command mode. Focus document window again.
"""
self.previewer.restore()
self.application.layout.focus_last()
self.application.vi_state.input_mode = InputMode.NAVIGATION
self.command_buffer.reset(append_to_history=append_to_history)
| """
The main editor class.
Usage::
files_to_edit = ['file1.txt', 'file2.py']
e = Editor(files_to_edit)
e.run() # Runs the event loop, starts interaction.
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import Condition
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.styles import DynamicStyle
from .commands.completer import create_command_completer
from .commands.handler import handle_command
from .commands.preview import CommandPreviewer
from .help import HELP_TEXT
from .key_bindings import create_key_bindings
from .layout import EditorLayout, get_terminal_title
from .style import generate_built_in_styles, get_editor_style_by_name
from .window_arrangement import WindowArrangement
from .io import FileIO, DirectoryIO, HttpIO, GZipFileIO
import pygments
import os
__all__ = (
'Editor',
)
class Editor(object):
"""
The main class. Containing the whole editor.
:param config_directory: Place where configuration is stored.
:param input: (Optionally) `prompt_toolkit.input.Input` object.
:param output: (Optionally) `prompt_toolkit.output.Output` object.
"""
def __init__(self, config_directory='~/.pyvim', input=None, output=None):
self.input = input
self.output = output
# Vi options.
self.show_line_numbers = True
self.highlight_search = True
self.paste_mode = False
self.show_ruler = True
self.show_wildmenu = True
self.expand_tab = True # Insect spaces instead of tab characters.
self.tabstop = 4 # Number of spaces that a tab character represents.
self.incsearch = True # Show matches while typing search string.
self.ignore_case = False # Ignore case while searching.
self.enable_mouse_support = True
self.display_unprintable_characters = True # ':set list'
self.enable_jedi = True # ':set jedi', for Python Jedi completion.
self.scroll_offset = 0 # ':set scrolloff'
self.relative_number = False # ':set relativenumber'
self.wrap_lines = True # ':set wrap'
self.break_indent = False # ':set breakindent'
self.cursorline = False # ':set cursorline'
self.cursorcolumn = False # ':set cursorcolumn'
self.colorcolumn = [] # ':set colorcolumn'. List of integers.
# Ensure config directory exists.
self.config_directory = os.path.abspath(os.path.expanduser(config_directory))
if not os.path.exists(self.config_directory):
os.mkdir(self.config_directory)
self.window_arrangement = WindowArrangement(self)
self.message = None
# Load styles. (Mapping from name to Style class.)
self.styles = generate_built_in_styles()
self.current_style = get_editor_style_by_name('vim')
# I/O backends.
self.io_backends = [
DirectoryIO(),
HttpIO(),
GZipFileIO(), # Should come before FileIO.
FileIO(),
]
# Create history and search buffers.
def handle_action(buff):
' When enter is pressed in the Vi command line. '
text = buff.text # Remember: leave_command_mode resets the buffer.
# First leave command mode. We want to make sure that the working
# pane is focussed again before executing the command handlers.
self.leave_command_mode(append_to_history=True)
# Execute command.
handle_command(self, text)
commands_history = FileHistory(os.path.join(self.config_directory, 'commands_history'))
self.command_buffer = Buffer(
accept_handler=handle_action,
enable_history_search=True,
completer=create_command_completer(self),
history=commands_history,
multiline=False)
search_buffer_history = FileHistory(os.path.join(self.config_directory, 'search_history'))
self.search_buffer = Buffer(
history=search_buffer_history,
enable_history_search=True,
multiline=False)
# Create key bindings registry.
self.key_bindings = create_key_bindings(self)
# Create layout and CommandLineInterface instance.
self.editor_layout = EditorLayout(self, self.window_arrangement)
self.application = self._create_application()
# Hide message when a key is pressed.
def key_pressed(_):
self.message = None
self.application.key_processor.before_key_press += key_pressed
# Command line previewer.
self.previewer = CommandPreviewer(self)
def load_initial_files(self, locations, in_tab_pages=False, hsplit=False, vsplit=False):
"""
Load a list of files.
"""
assert in_tab_pages + hsplit + vsplit <= 1 # Max one of these options.
# When no files were given, open at least one empty buffer.
locations2 = locations or [None]
# First file
self.window_arrangement.open_buffer(locations2[0])
for f in locations2[1:]:
if in_tab_pages:
self.window_arrangement.create_tab(f)
elif hsplit:
self.window_arrangement.hsplit(location=f)
elif vsplit:
self.window_arrangement.vsplit(location=f)
else:
self.window_arrangement.open_buffer(f)
self.window_arrangement.active_tab_index = 0
if locations and len(locations) > 1:
self.show_message('%i files loaded.' % len(locations))
def _create_application(self):
"""
Create CommandLineInterface instance.
"""
# Create Application.
application = Application(
input=self.input,
output=self.output,
editing_mode=EditingMode.VI,
layout=self.editor_layout.layout,
key_bindings=self.key_bindings,
# get_title=lambda: get_terminal_title(self),
style=DynamicStyle(lambda: self.current_style),
paste_mode=Condition(lambda: self.paste_mode),
# ignore_case=Condition(lambda: self.ignore_case), # TODO
include_default_pygments_style=False,
mouse_support=Condition(lambda: self.enable_mouse_support),
full_screen=True,
enable_page_navigation_bindings=True)
# Handle command line previews.
# (e.g. when typing ':colorscheme blue', it should already show the
# preview before pressing enter.)
def preview(_):
if self.application.layout.has_focus(self.command_buffer):
self.previewer.preview(self.command_buffer.text)
self.command_buffer.on_text_changed += preview
return application
@property
def current_editor_buffer(self):
"""
Return the `EditorBuffer` that is currently active.
"""
current_buffer = self.application.current_buffer
# Find/return the EditorBuffer with this name.
for b in self.window_arrangement.editor_buffers:
if b.buffer == current_buffer:
return b
@property
def add_key_binding(self):
"""
Shortcut for adding new key bindings.
(Mostly useful for a pyvimrc file, that receives this Editor instance
as input.)
"""
return self.key_bindings.add
def show_message(self, message):
"""
Set a warning message. The layout will render it as a "pop-up" at the
bottom.
"""
self.message = message
def use_colorscheme(self, name='default'):
"""
Apply new colorscheme. (By name.)
"""
try:
self.current_style = get_editor_style_by_name(name)
except pygments.util.ClassNotFound:
pass
def sync_with_prompt_toolkit(self):
"""
Update the prompt-toolkit Layout and FocusStack.
"""
# After executing a command, make sure that the layout of
# prompt-toolkit matches our WindowArrangement.
self.editor_layout.update()
# Make sure that the focus stack of prompt-toolkit has the current
# page.
window = self.window_arrangement.active_pt_window
if window:
self.application.layout.focus(window)
def show_help(self):
"""
Show help in new window.
"""
self.window_arrangement.hsplit(text=HELP_TEXT)
self.sync_with_prompt_toolkit() # Show new window.
def run(self):
"""
Run the event loop for the interface.
This starts the interaction.
"""
# Make sure everything is in sync, before starting.
self.sync_with_prompt_toolkit()
def pre_run():
# Start in navigation mode.
self.application.vi_state.input_mode = InputMode.NAVIGATION
# Run eventloop of prompt_toolkit.
self.application.run(pre_run=pre_run)
def enter_command_mode(self):
"""
Go into command mode.
"""
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save()
def leave_command_mode(self, append_to_history=False):
"""
Leave command mode. Focus document window again.
"""
self.previewer.restore()
self.application.layout.focus_last()
self.application.vi_state.input_mode = InputMode.NAVIGATION
self.command_buffer.reset(append_to_history=append_to_history)
| en | 0.724406 | The main editor class. Usage:: files_to_edit = ['file1.txt', 'file2.py'] e = Editor(files_to_edit) e.run() # Runs the event loop, starts interaction. The main class. Containing the whole editor. :param config_directory: Place where configuration is stored. :param input: (Optionally) `prompt_toolkit.input.Input` object. :param output: (Optionally) `prompt_toolkit.output.Output` object. # Vi options. # Insect spaces instead of tab characters. # Number of spaces that a tab character represents. # Show matches while typing search string. # Ignore case while searching. # ':set list' # ':set jedi', for Python Jedi completion. # ':set scrolloff' # ':set relativenumber' # ':set wrap' # ':set breakindent' # ':set cursorline' # ':set cursorcolumn' # ':set colorcolumn'. List of integers. # Ensure config directory exists. # Load styles. (Mapping from name to Style class.) # I/O backends. # Should come before FileIO. # Create history and search buffers. # Remember: leave_command_mode resets the buffer. # First leave command mode. We want to make sure that the working # pane is focussed again before executing the command handlers. # Execute command. # Create key bindings registry. # Create layout and CommandLineInterface instance. # Hide message when a key is pressed. # Command line previewer. Load a list of files. # Max one of these options. # When no files were given, open at least one empty buffer. # First file Create CommandLineInterface instance. # Create Application. # get_title=lambda: get_terminal_title(self), # ignore_case=Condition(lambda: self.ignore_case), # TODO # Handle command line previews. # (e.g. when typing ':colorscheme blue', it should already show the # preview before pressing enter.) Return the `EditorBuffer` that is currently active. # Find/return the EditorBuffer with this name. Shortcut for adding new key bindings. (Mostly useful for a pyvimrc file, that receives this Editor instance as input.) Set a warning message. The layout will render it as a "pop-up" at the bottom. Apply new colorscheme. (By name.) Update the prompt-toolkit Layout and FocusStack. # After executing a command, make sure that the layout of # prompt-toolkit matches our WindowArrangement. # Make sure that the focus stack of prompt-toolkit has the current # page. Show help in new window. # Show new window. Run the event loop for the interface. This starts the interaction. # Make sure everything is in sync, before starting. # Start in navigation mode. # Run eventloop of prompt_toolkit. Go into command mode. Leave command mode. Focus document window again. | 2.836896 | 3 |
mayan/apps/acls/migrations/0003_auto_20180402_0339.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 2 | 6630859 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('acls', '0002_auto_20150703_0513'),
]
operations = [
migrations.AlterModelOptions(
name='accesscontrollist',
options={
'ordering': ('pk',), 'verbose_name': 'Access entry',
'verbose_name_plural': 'Access entries'
},
),
]
| from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('acls', '0002_auto_20150703_0513'),
]
operations = [
migrations.AlterModelOptions(
name='accesscontrollist',
options={
'ordering': ('pk',), 'verbose_name': 'Access entry',
'verbose_name_plural': 'Access entries'
},
),
]
| none | 1 | 1.49669 | 1 |
|
app/__init__.py | calcutec/flask-burtonblog | 0 | 6630860 | import os
from flask.ext.socketio import SocketIO
from .momentjs import momentjs
from flask import Flask
from jinja2 import FileSystemLoader
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
from flask.ext.assets import Environment, Bundle
from config import ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, SQLALCHEMY_DATABASE_URI
from flask_wtf.csrf import CsrfProtect
staticdirectory = "static"
class MyFlask(Flask):
@property
def static_folder(self):
if self.config.get('STATIC_FOLDER') is not None:
return os.path.join(self.root_path, self.config.get('STATIC_FOLDER'))
@static_folder.setter
def static_folder(self, value):
self.config['STATIC_FOLDER'] = value
# Now these are equivalent:
app = Flask(__name__, static_folder=staticdirectory)
app.config['STATIC_FOLDER'] = staticdirectory
async_mode = 'eventlet'
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
socketio = SocketIO(app, async_mode=async_mode)
base_dir = os.path.dirname(os.path.realpath(__file__))
app.jinja_loader = FileSystemLoader(os.path.join(base_dir, staticdirectory, 'templates'))
app.jinja_env.globals['momentjs'] = momentjs
app.config.from_object('config')
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
assets = Environment(app)
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
lm.login_message = 'Please log in to access this page.'
mail = Mail(app)
CsrfProtect(app)
js_templates = Bundle('templates/archive_entry.js', 'templates/member.js', 'templates/header.js', 'templates/nav.js',
'templates/main_entry.js', 'templates/person.js', 'templates/photo_detail.js',
'templates/home_page.js', 'templates/comments.js', 'templates/comment.js',
'templates/upload_form.js', 'templates/photo_text_form.js', 'templates/photo_inputs.js',
'templates/comment_form.js', 'templates/story_detail.js', 'templates/votes.js',
'templates/followers.js', output='templates/templates.js')
assets.register('js_templates', js_templates)
app.config['OAUTH_CREDENTIALS'] = {
'facebook': {
'id': os.environ['FACEBOOK_AUTH'],
'secret': os.environ['FACEBOOK_AUTH_SECRET']
},
'google': {
'id': os.environ['GOOGLE_AUTH'],
'secret': os.environ['GOOGLE_AUTH_SECRET']
}
}
if not app.debug and MAIL_SERVER != '':
import logging
from logging.handlers import SMTPHandler
credentials = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT),
'no-reply@' + MAIL_SERVER, ADMINS,
'burtonblog failure', credentials)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not app.debug and os.environ.get('HEROKU') is None:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/burtonblog.log', 'a',
1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('burtonblog startup')
if os.environ.get('HEROKU') is not None:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('burtonblog startup')
app.config["S3_LOCATION"] = 'https://s3.amazonaws.com/aperturus/'
app.config["S3_UPLOAD_DIRECTORY"] = 'user_imgs'
app.config["S3_BUCKET"] = 'aperturus'
app.config["S3_REGION"] = 'us-east-1'
app.config["AWS_ACCESS_KEY_ID"] = os.environ['AWS_ACCESS_KEY_ID']
app.config["AWS_SECRET_ACCESS_KEY"] = os.environ['AWS_SECRET_ACCESS_KEY']
from app import views, models
| import os
from flask.ext.socketio import SocketIO
from .momentjs import momentjs
from flask import Flask
from jinja2 import FileSystemLoader
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
from flask.ext.assets import Environment, Bundle
from config import ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, SQLALCHEMY_DATABASE_URI
from flask_wtf.csrf import CsrfProtect
staticdirectory = "static"
class MyFlask(Flask):
@property
def static_folder(self):
if self.config.get('STATIC_FOLDER') is not None:
return os.path.join(self.root_path, self.config.get('STATIC_FOLDER'))
@static_folder.setter
def static_folder(self, value):
self.config['STATIC_FOLDER'] = value
# Now these are equivalent:
app = Flask(__name__, static_folder=staticdirectory)
app.config['STATIC_FOLDER'] = staticdirectory
async_mode = 'eventlet'
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
socketio = SocketIO(app, async_mode=async_mode)
base_dir = os.path.dirname(os.path.realpath(__file__))
app.jinja_loader = FileSystemLoader(os.path.join(base_dir, staticdirectory, 'templates'))
app.jinja_env.globals['momentjs'] = momentjs
app.config.from_object('config')
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
assets = Environment(app)
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
lm.login_message = 'Please log in to access this page.'
mail = Mail(app)
CsrfProtect(app)
js_templates = Bundle('templates/archive_entry.js', 'templates/member.js', 'templates/header.js', 'templates/nav.js',
'templates/main_entry.js', 'templates/person.js', 'templates/photo_detail.js',
'templates/home_page.js', 'templates/comments.js', 'templates/comment.js',
'templates/upload_form.js', 'templates/photo_text_form.js', 'templates/photo_inputs.js',
'templates/comment_form.js', 'templates/story_detail.js', 'templates/votes.js',
'templates/followers.js', output='templates/templates.js')
assets.register('js_templates', js_templates)
app.config['OAUTH_CREDENTIALS'] = {
'facebook': {
'id': os.environ['FACEBOOK_AUTH'],
'secret': os.environ['FACEBOOK_AUTH_SECRET']
},
'google': {
'id': os.environ['GOOGLE_AUTH'],
'secret': os.environ['GOOGLE_AUTH_SECRET']
}
}
if not app.debug and MAIL_SERVER != '':
import logging
from logging.handlers import SMTPHandler
credentials = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT),
'no-reply@' + MAIL_SERVER, ADMINS,
'burtonblog failure', credentials)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not app.debug and os.environ.get('HEROKU') is None:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/burtonblog.log', 'a',
1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('burtonblog startup')
if os.environ.get('HEROKU') is not None:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('burtonblog startup')
app.config["S3_LOCATION"] = 'https://s3.amazonaws.com/aperturus/'
app.config["S3_UPLOAD_DIRECTORY"] = 'user_imgs'
app.config["S3_BUCKET"] = 'aperturus'
app.config["S3_REGION"] = 'us-east-1'
app.config["AWS_ACCESS_KEY_ID"] = os.environ['AWS_ACCESS_KEY_ID']
app.config["AWS_SECRET_ACCESS_KEY"] = os.environ['AWS_SECRET_ACCESS_KEY']
from app import views, models
| en | 0.882797 | # Now these are equivalent: # monkey patching is necessary because this application uses a background # thread | 2.029605 | 2 |
lsp/lwm2m_object.py | MoatyX/lwm2m-objects-generator | 1 | 6630861 | <filename>lsp/lwm2m_object.py
import xml.etree.ElementTree as ET
import lsp.util
from lsp.lwm2m_resource import Lwm2mResource
class Lwm2mObject:
def __init__(self, xml_path: str):
self.xml_path = xml_path
self.RESOURCES = []
self.MULTI_INSTANCE = None
self.RES_COUNT = None
self.HEADER_GUARD = None
self.OBJ_DESC = None
self.OBJ_INST = None
self.OBJ_NAME = None
self.OBJ_ID = None
def parse(self):
tree = ET.parse(self.xml_path)
if tree is None:
print(f"failed to parse {self.xml_path}")
return
root = tree.getroot()[0] # LWM2M(actual root) -> OBJECT(use this as the "root")
self.OBJ_ID = root.find("ObjectID").text
self.OBJ_NAME = str(root.find("Name").text).replace(' ', '_').replace('-', '_').replace('/', '_').replace('.', '_')
self.OBJ_DESC = root.find("Description1").text
self.HEADER_GUARD = "NX_GENERATED_" + self.OBJ_NAME.upper() + "_ID_" + self.OBJ_ID + "_H_"
self.OBJ_INST = root.find("MultipleInstances").text
self.MULTI_INSTANCE = lsp.util.obj_is_multiple(self.OBJ_INST)
resources_raw = root.find("Resources")
for res_raw in resources_raw:
res = Lwm2mResource(res_raw)
res.parse()
self.RESOURCES.append(res)
pass
self.RES_COUNT = len(self.RESOURCES)
pass
| <filename>lsp/lwm2m_object.py
import xml.etree.ElementTree as ET
import lsp.util
from lsp.lwm2m_resource import Lwm2mResource
class Lwm2mObject:
def __init__(self, xml_path: str):
self.xml_path = xml_path
self.RESOURCES = []
self.MULTI_INSTANCE = None
self.RES_COUNT = None
self.HEADER_GUARD = None
self.OBJ_DESC = None
self.OBJ_INST = None
self.OBJ_NAME = None
self.OBJ_ID = None
def parse(self):
tree = ET.parse(self.xml_path)
if tree is None:
print(f"failed to parse {self.xml_path}")
return
root = tree.getroot()[0] # LWM2M(actual root) -> OBJECT(use this as the "root")
self.OBJ_ID = root.find("ObjectID").text
self.OBJ_NAME = str(root.find("Name").text).replace(' ', '_').replace('-', '_').replace('/', '_').replace('.', '_')
self.OBJ_DESC = root.find("Description1").text
self.HEADER_GUARD = "NX_GENERATED_" + self.OBJ_NAME.upper() + "_ID_" + self.OBJ_ID + "_H_"
self.OBJ_INST = root.find("MultipleInstances").text
self.MULTI_INSTANCE = lsp.util.obj_is_multiple(self.OBJ_INST)
resources_raw = root.find("Resources")
for res_raw in resources_raw:
res = Lwm2mResource(res_raw)
res.parse()
self.RESOURCES.append(res)
pass
self.RES_COUNT = len(self.RESOURCES)
pass
| en | 0.780448 | # LWM2M(actual root) -> OBJECT(use this as the "root") | 2.477866 | 2 |
xmas-requests/script.py | lhzaccarelli/thm-scripts | 1 | 6630862 | <reponame>lhzaccarelli/thm-scripts<gh_stars>1-10
import requests
#Host on port 3000
host = "http://10.10.169.100:3000/"
#Original path /
path = ""
#Variable for flag
flag = ""
print ("Running")
#Run the script while path is not equal to 'end'
while (path != "end"):
#Get request to server
response = requests.get(host+path)
#Convert the response to json
json = response.json()
#Get value and next path
value = json['value']
path = json['next']
#Check if value is different to 'end'
if (value != "end"):
#Copy value char to the end of flag
flag += value
#When loop is over, print flag.
print("Flag: " + flag) | import requests
#Host on port 3000
host = "http://10.10.169.100:3000/"
#Original path /
path = ""
#Variable for flag
flag = ""
print ("Running")
#Run the script while path is not equal to 'end'
while (path != "end"):
#Get request to server
response = requests.get(host+path)
#Convert the response to json
json = response.json()
#Get value and next path
value = json['value']
path = json['next']
#Check if value is different to 'end'
if (value != "end"):
#Copy value char to the end of flag
flag += value
#When loop is over, print flag.
print("Flag: " + flag) | en | 0.803653 | #Host on port 3000 #Original path / #Variable for flag #Run the script while path is not equal to 'end' #Get request to server #Convert the response to json #Get value and next path #Check if value is different to 'end' #Copy value char to the end of flag #When loop is over, print flag. | 3.056008 | 3 |
wit_core/tests/test_http_server.py | LucasOliveiraS/wit-core | 1 | 6630863 | <filename>wit_core/tests/test_http_server.py
import pytest
from fastapi.testclient import TestClient
from wit_core.api.http_server import app
client = TestClient(app)
def test_http_message_response(mocker):
process_intent = mocker.patch("wit_core.api.http_server.process_intent", return_value="Message processed")
response = client.post("/message", json={"message": "Message input"})
process_intent.assert_called_with("Message input")
assert response.status_code == 200
assert response.json() == {"res": "Message processed"}
def test_http_message_response_error(mocker):
mocker.patch("wit_core.api.http_server.process_intent", side_effect=Exception("No intents were found for the question"))
response = client.post("/message", json={"message": "Message input"})
assert response.status_code == 500
assert response.json() == {"detail": "Error processing message"}
def test_http_message_response_empty_string(mocker):
response = client.post("/message", json={"message": ""})
assert response.status_code == 500
assert response.json() == {"detail": "Not allowed empty string"}
| <filename>wit_core/tests/test_http_server.py
import pytest
from fastapi.testclient import TestClient
from wit_core.api.http_server import app
client = TestClient(app)
def test_http_message_response(mocker):
process_intent = mocker.patch("wit_core.api.http_server.process_intent", return_value="Message processed")
response = client.post("/message", json={"message": "Message input"})
process_intent.assert_called_with("Message input")
assert response.status_code == 200
assert response.json() == {"res": "Message processed"}
def test_http_message_response_error(mocker):
mocker.patch("wit_core.api.http_server.process_intent", side_effect=Exception("No intents were found for the question"))
response = client.post("/message", json={"message": "Message input"})
assert response.status_code == 500
assert response.json() == {"detail": "Error processing message"}
def test_http_message_response_empty_string(mocker):
response = client.post("/message", json={"message": ""})
assert response.status_code == 500
assert response.json() == {"detail": "Not allowed empty string"}
| none | 1 | 2.377831 | 2 |
|
ext/pixler/tools/tmx2bin.py | slembcke/GGJ20-template | 2 | 6630864 | import sys
import struct
import base64
import xml.etree.ElementTree
filename = sys.argv[1]
map = xml.etree.ElementTree.parse(filename).getroot()
data = map.find('layer/data')
data_u32 = base64.b64decode(data.text).strip()
format = "{0}I".format(len(data_u32)/4)
values = struct.unpack(format, data_u32)
# TODO is the padding weird?
attribs = [(e - 1) >> 8 for e in values] + [0]*64
values = [(e - 1) & 0xFF for e in values]
def attr_byte(i):
i0 = 4*(i % 8) + 128*(i/8)
return (attribs[i0] << 0) | (attribs[i0 + 2] << 2) | (attribs[i0 + 64] << 4) | (attribs[i0 + 66] << 6)
if len(values) == 960:
values += [attr_byte(i) for i in range(64)]
format = "{0}B".format(len(values))
data_u8 = struct.pack(format, *values)
sys.stdout.write(data_u8)
| import sys
import struct
import base64
import xml.etree.ElementTree
filename = sys.argv[1]
map = xml.etree.ElementTree.parse(filename).getroot()
data = map.find('layer/data')
data_u32 = base64.b64decode(data.text).strip()
format = "{0}I".format(len(data_u32)/4)
values = struct.unpack(format, data_u32)
# TODO is the padding weird?
attribs = [(e - 1) >> 8 for e in values] + [0]*64
values = [(e - 1) & 0xFF for e in values]
def attr_byte(i):
i0 = 4*(i % 8) + 128*(i/8)
return (attribs[i0] << 0) | (attribs[i0 + 2] << 2) | (attribs[i0 + 64] << 4) | (attribs[i0 + 66] << 6)
if len(values) == 960:
values += [attr_byte(i) for i in range(64)]
format = "{0}B".format(len(values))
data_u8 = struct.pack(format, *values)
sys.stdout.write(data_u8)
| en | 0.62787 | # TODO is the padding weird? | 2.189558 | 2 |
thaniya_server_archive/src/thaniya_server_archive/archive/BackupDataFile.py | jkpubsrc/Thaniya | 1 | 6630865 | <reponame>jkpubsrc/Thaniya<filename>thaniya_server_archive/src/thaniya_server_archive/archive/BackupDataFile.py
import typing
import re
import time
import os
import random
import jk_utils
import jk_json
#
# This class represents a backup data file. Backup data files form the main data content of a backup.
#
class BackupDataFile(object):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, fe):
self.__fileName = fe.name
self.__filePath = fe.path
self.__size = jk_utils.AmountOfBytes.parse(fe.stat().st_size)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@property
def filePath(self) -> str:
return self.__filePath
#
@property
def fileName(self) -> str:
return self.__fileName
#
@property
def sizeInBytes(self) -> jk_utils.AmountOfBytes:
return self.__size
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
| import typing
import re
import time
import os
import random
import jk_utils
import jk_json
#
# This class represents a backup data file. Backup data files form the main data content of a backup.
#
class BackupDataFile(object):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, fe):
self.__fileName = fe.name
self.__filePath = fe.path
self.__size = jk_utils.AmountOfBytes.parse(fe.stat().st_size)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@property
def filePath(self) -> str:
return self.__filePath
#
@property
def fileName(self) -> str:
return self.__fileName
#
@property
def sizeInBytes(self) -> jk_utils.AmountOfBytes:
return self.__size
#
################################################################################################################################
## Static Methods
################################################################################################################################
# | de | 0.809024 | # # This class represents a backup data file. Backup data files form the main data content of a backup. # ################################################################################################################################ ## Constructors ################################################################################################################################ # ################################################################################################################################ ## Public Properties ################################################################################################################################ ################################################################################################################################ ## Public Methods ################################################################################################################################ # # # ################################################################################################################################ ## Static Methods ################################################################################################################################ # | 2.757348 | 3 |
list/3.py | EmilianStoyanov/python_exercises_practice_solution | 0 | 6630866 | def max_num_in_list(items):
tot = max(items)
return tot
print(max_num_in_list([1, 2, -8, 0]))
#
# def max_num_in_list(list):
# max = list[0]
# for a in list:
# if a > max:
# max = a
# return max
#
#
# print(max_num_in_list([1, 2, -8, 0]))
"""
Write a Python program to get the largest number from a list. Go to the editor
"""
| def max_num_in_list(items):
tot = max(items)
return tot
print(max_num_in_list([1, 2, -8, 0]))
#
# def max_num_in_list(list):
# max = list[0]
# for a in list:
# if a > max:
# max = a
# return max
#
#
# print(max_num_in_list([1, 2, -8, 0]))
"""
Write a Python program to get the largest number from a list. Go to the editor
"""
| en | 0.672429 | # # def max_num_in_list(list): # max = list[0] # for a in list: # if a > max: # max = a # return max # # # print(max_num_in_list([1, 2, -8, 0])) Write a Python program to get the largest number from a list. Go to the editor | 4.397749 | 4 |
compression/compress.py | oxixes/bad-apple-cg50 | 1 | 6630867 | import numpy as np
import cv2
import math
import sys
if len(sys.argv) < 2:
print("Usage: compress.py [video]")
print("Note: video must have a resolution lower than or equal to 384x216 px.")
exit()
totalBytes = 0
video = cv2.VideoCapture(sys.argv[1])
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(video.get(cv2.CAP_PROP_FPS))
scale = int(216 / h)
if w > 384 or h > 216:
print("Video must have a resolution lower than or equal to 384x216 px.")
exit()
print(f"H:{h}, W:{w}, S:{scale}, FPS:{fps}")
frameCounter = 0
continuing = True
f = open("data.bin", "wb")
f.write(frameCount.to_bytes(2, 'big'))
f.write(w.to_bytes(2, 'big'))
f.write(h.to_bytes(2, 'big'))
f.write(fps.to_bytes(2, 'big'))
f.write(scale.to_bytes(1, 'big'))
while (frameCounter < frameCount and continuing):
continuing, frame = video.read()
frameCounter += 1
frameArray = []
for y in range(h):
for x in range(w):
brightness = int(np.sum(frame[y, x])/3)
white = True if brightness > 127 else False
frameArray.append(white)
frameArray.append(None)
rep = []
previous = None
numRep = 0
for i in frameArray:
if i != previous and previous != None:
rep.append(numRep)
numRep = 0
previous = i
if i == previous:
numRep += 1
if previous == None:
numRep = 0
previous = i
rep[len(rep) - 1] = rep[len(rep) - 1] + 1
f.write(len(rep).to_bytes(2, 'big'))
for i in range(len(rep)):
f.write(rep[i].to_bytes(2, 'big'))
if frameArray[0] == True:
f.write(int(1).to_bytes(1, 'big'))
else:
f.write(int(0).to_bytes(1, 'big'))
print(f"Frame #{frameCounter} -> {len(rep) * 2} + 3 bytes")
totalBytes += (len(rep) * 2) + 3
print(f"TOTAL BYTES: {totalBytes}. Output at data.bin.")
video.release()
f.close()
| import numpy as np
import cv2
import math
import sys
if len(sys.argv) < 2:
print("Usage: compress.py [video]")
print("Note: video must have a resolution lower than or equal to 384x216 px.")
exit()
totalBytes = 0
video = cv2.VideoCapture(sys.argv[1])
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(video.get(cv2.CAP_PROP_FPS))
scale = int(216 / h)
if w > 384 or h > 216:
print("Video must have a resolution lower than or equal to 384x216 px.")
exit()
print(f"H:{h}, W:{w}, S:{scale}, FPS:{fps}")
frameCounter = 0
continuing = True
f = open("data.bin", "wb")
f.write(frameCount.to_bytes(2, 'big'))
f.write(w.to_bytes(2, 'big'))
f.write(h.to_bytes(2, 'big'))
f.write(fps.to_bytes(2, 'big'))
f.write(scale.to_bytes(1, 'big'))
while (frameCounter < frameCount and continuing):
continuing, frame = video.read()
frameCounter += 1
frameArray = []
for y in range(h):
for x in range(w):
brightness = int(np.sum(frame[y, x])/3)
white = True if brightness > 127 else False
frameArray.append(white)
frameArray.append(None)
rep = []
previous = None
numRep = 0
for i in frameArray:
if i != previous and previous != None:
rep.append(numRep)
numRep = 0
previous = i
if i == previous:
numRep += 1
if previous == None:
numRep = 0
previous = i
rep[len(rep) - 1] = rep[len(rep) - 1] + 1
f.write(len(rep).to_bytes(2, 'big'))
for i in range(len(rep)):
f.write(rep[i].to_bytes(2, 'big'))
if frameArray[0] == True:
f.write(int(1).to_bytes(1, 'big'))
else:
f.write(int(0).to_bytes(1, 'big'))
print(f"Frame #{frameCounter} -> {len(rep) * 2} + 3 bytes")
totalBytes += (len(rep) * 2) + 3
print(f"TOTAL BYTES: {totalBytes}. Output at data.bin.")
video.release()
f.close()
| ja | 0.125969 | #{frameCounter} -> {len(rep) * 2} + 3 bytes") | 2.952041 | 3 |
lib/galaxy/model/view/utils.py | maikenp/galaxy | 1 | 6630868 | """
View wrappers
"""
from inspect import getmembers
from sqlalchemy import (
Column,
MetaData,
Table,
)
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
class View:
"""Base class for Views."""
@staticmethod
def _make_table(name, selectable, pkeys):
""" Create a view.
:param name: The name of the view.
:param selectable: SQLAlchemy selectable.
:param pkeys: set of primary keys for the selectable.
"""
columns = [
Column(
c.name,
c.type,
primary_key=(c.name in pkeys)
)
for c in selectable.c
]
# We do not use the metadata object from model.mapping.py that contains all the Table objects
# because that would create a circular import (create_view is called from View objects
# in model.view; but those View objects are imported into model.mapping.py where the
# metadata object we need is defined). Thus, we do not use the after_create/before_drop
# hooks to automate creating/dropping views. Instead, this is taken care of in install_views().
# The metadata object passed to Table() should be empty: this table is internal to a View
# object and is not intended to be created in the database.
return Table(name, MetaData(), *columns)
class CreateView(DDLElement):
def __init__(self, name, selectable):
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiler.compiles(CreateView)
def compile_create_view(element, compiler, **kw):
compiled_selectable = compiler.sql_compiler.process(element.selectable, literal_binds=True)
return f'CREATE VIEW {element.name} AS {compiled_selectable}'
@compiler.compiles(DropView)
def compile_drop_view(element, compiler, **kw):
return f'DROP VIEW IF EXISTS {element.name}'
def is_view_model(o):
return hasattr(o, '__view__') and issubclass(o, View)
def install_views(engine):
import galaxy.model.view
views = getmembers(galaxy.model.view, is_view_model)
for _, view in views:
# adding DropView here because our unit-testing calls this function when
# it mocks the app and CreateView will attempt to rebuild an existing
# view in a database that is already made, the right answer is probably
# to change the sql that gest emitted when CreateView is rendered.
engine.execute(DropView(view.name))
engine.execute(CreateView(view.name, view.__view__))
| """
View wrappers
"""
from inspect import getmembers
from sqlalchemy import (
Column,
MetaData,
Table,
)
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
class View:
"""Base class for Views."""
@staticmethod
def _make_table(name, selectable, pkeys):
""" Create a view.
:param name: The name of the view.
:param selectable: SQLAlchemy selectable.
:param pkeys: set of primary keys for the selectable.
"""
columns = [
Column(
c.name,
c.type,
primary_key=(c.name in pkeys)
)
for c in selectable.c
]
# We do not use the metadata object from model.mapping.py that contains all the Table objects
# because that would create a circular import (create_view is called from View objects
# in model.view; but those View objects are imported into model.mapping.py where the
# metadata object we need is defined). Thus, we do not use the after_create/before_drop
# hooks to automate creating/dropping views. Instead, this is taken care of in install_views().
# The metadata object passed to Table() should be empty: this table is internal to a View
# object and is not intended to be created in the database.
return Table(name, MetaData(), *columns)
class CreateView(DDLElement):
def __init__(self, name, selectable):
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiler.compiles(CreateView)
def compile_create_view(element, compiler, **kw):
compiled_selectable = compiler.sql_compiler.process(element.selectable, literal_binds=True)
return f'CREATE VIEW {element.name} AS {compiled_selectable}'
@compiler.compiles(DropView)
def compile_drop_view(element, compiler, **kw):
return f'DROP VIEW IF EXISTS {element.name}'
def is_view_model(o):
return hasattr(o, '__view__') and issubclass(o, View)
def install_views(engine):
import galaxy.model.view
views = getmembers(galaxy.model.view, is_view_model)
for _, view in views:
# adding DropView here because our unit-testing calls this function when
# it mocks the app and CreateView will attempt to rebuild an existing
# view in a database that is already made, the right answer is probably
# to change the sql that gest emitted when CreateView is rendered.
engine.execute(DropView(view.name))
engine.execute(CreateView(view.name, view.__view__))
| en | 0.860576 | View wrappers Base class for Views. Create a view. :param name: The name of the view. :param selectable: SQLAlchemy selectable. :param pkeys: set of primary keys for the selectable. # We do not use the metadata object from model.mapping.py that contains all the Table objects # because that would create a circular import (create_view is called from View objects # in model.view; but those View objects are imported into model.mapping.py where the # metadata object we need is defined). Thus, we do not use the after_create/before_drop # hooks to automate creating/dropping views. Instead, this is taken care of in install_views(). # The metadata object passed to Table() should be empty: this table is internal to a View # object and is not intended to be created in the database. # adding DropView here because our unit-testing calls this function when # it mocks the app and CreateView will attempt to rebuild an existing # view in a database that is already made, the right answer is probably # to change the sql that gest emitted when CreateView is rendered. | 2.656724 | 3 |
scripts/set_sample_data.py | CM-haruna-oka/serverlss-etl-sample | 0 | 6630869 | import boto3
import yaml
with open('scripts/config.yml', 'r') as yml:
config = yaml.safe_load(yml)
DEVICES_TABLE_NAME = 'Devices'
def set_test_data(table_name, data):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_name)
for i in data:
table.put_item(Item=i)
set_test_data(DEVICES_TABLE_NAME, config['devices'])
| import boto3
import yaml
with open('scripts/config.yml', 'r') as yml:
config = yaml.safe_load(yml)
DEVICES_TABLE_NAME = 'Devices'
def set_test_data(table_name, data):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_name)
for i in data:
table.put_item(Item=i)
set_test_data(DEVICES_TABLE_NAME, config['devices'])
| none | 1 | 2.213918 | 2 |
|
tests/backend/mongodb/test_admin.py | ldmberman/bigchaindb | 0 | 6630870 | """Tests for the :mod:`bigchaindb.backend.mongodb.admin` module."""
import copy
from unittest import mock
import pytest
from pymongo.database import Database
from pymongo.errors import OperationFailure
@pytest.fixture
def mock_replicaset_config():
return {
'config': {
'_id': 'bigchain-rs',
'members': [
{
'_id': 0,
'arbiterOnly': False,
'buildIndexes': True,
'hidden': False,
'host': 'localhost:27017',
'priority': 1.0,
'slaveDelay': 0,
'tags': {},
'votes': 1
}
],
'version': 1
}
}
@pytest.fixture
def connection():
from bigchaindb.backend import connect
connection = connect()
# connection is a lazy object. It only actually creates a connection to
# the database when its first used.
# During the setup of a MongoDBConnection some `Database.command` are
# executed to make sure that the replica set is correctly initialized.
# Here we force the the connection setup so that all required
# `Database.command` are executed before we mock them it in the tests.
connection.connect()
return connection
def test_add_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
expected_config['config']['version'] += 1
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
add_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_add_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
add_replicas(connection, ['localhost:27018'])
def test_remove_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['version'] += 1
# add some hosts to the configuration to remove
mock_replicaset_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
remove_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_remove_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
remove_replicas(connection, ['localhost:27018'])
| """Tests for the :mod:`bigchaindb.backend.mongodb.admin` module."""
import copy
from unittest import mock
import pytest
from pymongo.database import Database
from pymongo.errors import OperationFailure
@pytest.fixture
def mock_replicaset_config():
return {
'config': {
'_id': 'bigchain-rs',
'members': [
{
'_id': 0,
'arbiterOnly': False,
'buildIndexes': True,
'hidden': False,
'host': 'localhost:27017',
'priority': 1.0,
'slaveDelay': 0,
'tags': {},
'votes': 1
}
],
'version': 1
}
}
@pytest.fixture
def connection():
from bigchaindb.backend import connect
connection = connect()
# connection is a lazy object. It only actually creates a connection to
# the database when its first used.
# During the setup of a MongoDBConnection some `Database.command` are
# executed to make sure that the replica set is correctly initialized.
# Here we force the the connection setup so that all required
# `Database.command` are executed before we mock them it in the tests.
connection.connect()
return connection
def test_add_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
expected_config['config']['version'] += 1
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
add_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_add_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
add_replicas(connection, ['localhost:27018'])
def test_remove_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['version'] += 1
# add some hosts to the configuration to remove
mock_replicaset_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
remove_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_remove_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
remove_replicas(connection, ['localhost:27018'])
| en | 0.871185 | Tests for the :mod:`bigchaindb.backend.mongodb.admin` module. # connection is a lazy object. It only actually creates a connection to # the database when its first used. # During the setup of a MongoDBConnection some `Database.command` are # executed to make sure that the replica set is correctly initialized. # Here we force the the connection setup so that all required # `Database.command` are executed before we mock them it in the tests. # add some hosts to the configuration to remove | 2.406196 | 2 |
tests/integration/test_cpt.py | POFK/LensFinder | 0 | 6630871 | #!/usr/bin/env python
# coding=utf-8
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F
from lens.checkpoint import save, load
import unittest
from unittest import mock
class TestCpt(unittest.TestCase):
"""Checkpoint test"""
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch("torch.save")
def test_save(self, torch_save):
path = 'path_to_save'
epoch = 1
model = mock.MagicMock()
loss = mock.MagicMock()
device = torch.device('cpu')
save_dict = {}
save_dict['epoch'] = epoch
save_dict['model_state_dict'] = model.state_dict()
save_dict['loss'] = loss
save_dict['device'] = device
save(path, epoch, model, loss, device, opt=None)
# torch_save.assert_called_once_with(save_dict, path)
@mock.patch("torch.load")
def test_load(self, torch_load):
torch_load = mock.MagicMock()
path = 'path_to_save'
model = mock.MagicMock()
opt = mock.MagicMock()
args = mock.MagicMock()
args.use_cuda=False
load(path, model, args=args, opt=None)
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/env python
# coding=utf-8
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F
from lens.checkpoint import save, load
import unittest
from unittest import mock
class TestCpt(unittest.TestCase):
"""Checkpoint test"""
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch("torch.save")
def test_save(self, torch_save):
path = 'path_to_save'
epoch = 1
model = mock.MagicMock()
loss = mock.MagicMock()
device = torch.device('cpu')
save_dict = {}
save_dict['epoch'] = epoch
save_dict['model_state_dict'] = model.state_dict()
save_dict['loss'] = loss
save_dict['device'] = device
save(path, epoch, model, loss, device, opt=None)
# torch_save.assert_called_once_with(save_dict, path)
@mock.patch("torch.load")
def test_load(self, torch_load):
torch_load = mock.MagicMock()
path = 'path_to_save'
model = mock.MagicMock()
opt = mock.MagicMock()
args = mock.MagicMock()
args.use_cuda=False
load(path, model, args=args, opt=None)
if __name__ == "__main__":
unittest.main()
| en | 0.531213 | #!/usr/bin/env python # coding=utf-8 Checkpoint test # torch_save.assert_called_once_with(save_dict, path) | 2.50343 | 3 |
website/ttkom/forms.py | CharlesRigal/projet_website | 0 | 6630872 | <reponame>CharlesRigal/projet_website
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import Comment
class ChangeForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email")
class RegisterForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text="Requis")
class Meta:
model = User
fields = ("username", "email", "<PASSWORD>", "<PASSWORD>")
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["content"]
labels = {"content": "commentaire"}
| from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import Comment
class ChangeForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email")
class RegisterForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text="Requis")
class Meta:
model = User
fields = ("username", "email", "<PASSWORD>", "<PASSWORD>")
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["content"]
labels = {"content": "commentaire"} | none | 1 | 2.257729 | 2 |
|
tests/test_mock.py | SuminAndrew/tornado-mocked-httpclient | 7 | 6630873 | # coding=utf-8
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, asynchronous, RequestHandler
from tornado_mock.httpclient import get_response_stub, patch_http_client, set_stub
class TestHandler(RequestHandler):
@asynchronous
def get(self, *args, **kwargs):
def callback(response):
self.write('{} : '.format(response.code))
if 'Content-Type' in response.headers:
self.write('{} : '.format(response.headers.get('Content-Type')))
self.finish(response.body)
self_uri = 'http://' + self.request.host + self.request.path
self.application.http_client.fetch(self_uri + '?arg1=val1&arg2=val2', method='POST', body='', callback=callback)
def post(self, *args, **kwargs):
self.write('NO MOCK')
self.set_header('Content-Type', 'text/html')
class _BaseHTTPClientMockTest(AsyncHTTPTestCase):
def get_app(self):
app = Application([
('/simple_fetch', TestHandler)
])
self.app_http_client = app.http_client = self.get_app_http_client()
return app
def get_app_http_client(self):
raise NotImplementedError()
def test_missing_mock_fail_on_unknown_true(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1&arg2=val2&arg3=val3'), request_method='POST',
response_body='GET MOCK'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'599 : ')
def test_not_matching_mock(self):
patch_http_client(self.app_http_client)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'599 : ')
def test_missing_mock_fail_on_unknown_false(self):
patch_http_client(self.app_http_client, fail_on_unknown=False)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'200 : text/html : NO MOCK')
def test_mock_response_body(self):
patch_http_client(self.app_http_client)
# Test that mock for GET request is not used
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1'), response_body='GET MOCK'
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1'), request_method='POST',
response_body='POST MOCK', response_code=400, response_headers={'Content-Type': 'text/plain'}
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : text/plain : POST MOCK')
def test_mock_response_file_xml(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1&arg2=val2'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>$data_tpl</response>')
def test_mock_response_body_processor(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=$val_tpl'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400,
val_tpl='val1', data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>MOCK DATA</response>')
def test_mock_response_body_no_processor(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=$val_tpl'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400, response_body_processor=None,
val_tpl='val1', data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>$data_tpl</response>')
def test_mock_response_function(self):
patch_http_client(self.app_http_client)
def _response_function(request):
return get_response_stub(
request, code=404, buffer='RESPONSE FUNCTION', headers={'Content-Type': 'text/plain'}
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_function=_response_function
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'404 : text/plain : RESPONSE FUNCTION')
def test_mock_response_file_json(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_file='tests/response_stub.json', response_code=400,
data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/json : {"response": "MOCK DATA"}')
def test_mock_response_file_other(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_file='tests/response_stub.txt', response_code=401,
data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'401 : text/plain : RESPONSE : MOCK DATA')
def test_identical_mocks(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_body='FIRST'
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_body='SECOND'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'200 : SECOND')
class SimpleAsyncHTTPClientMockTest(_BaseHTTPClientMockTest):
def get_app_http_client(self):
AsyncHTTPClient.configure('tornado.simple_httpclient.SimpleAsyncHTTPClient')
return AsyncHTTPClient(force_instance=True)
class CurlAsyncHTTPClientMockTest(_BaseHTTPClientMockTest):
def get_app_http_client(self):
AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
return AsyncHTTPClient(force_instance=True)
| # coding=utf-8
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, asynchronous, RequestHandler
from tornado_mock.httpclient import get_response_stub, patch_http_client, set_stub
class TestHandler(RequestHandler):
@asynchronous
def get(self, *args, **kwargs):
def callback(response):
self.write('{} : '.format(response.code))
if 'Content-Type' in response.headers:
self.write('{} : '.format(response.headers.get('Content-Type')))
self.finish(response.body)
self_uri = 'http://' + self.request.host + self.request.path
self.application.http_client.fetch(self_uri + '?arg1=val1&arg2=val2', method='POST', body='', callback=callback)
def post(self, *args, **kwargs):
self.write('NO MOCK')
self.set_header('Content-Type', 'text/html')
class _BaseHTTPClientMockTest(AsyncHTTPTestCase):
def get_app(self):
app = Application([
('/simple_fetch', TestHandler)
])
self.app_http_client = app.http_client = self.get_app_http_client()
return app
def get_app_http_client(self):
raise NotImplementedError()
def test_missing_mock_fail_on_unknown_true(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1&arg2=val2&arg3=val3'), request_method='POST',
response_body='GET MOCK'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'599 : ')
def test_not_matching_mock(self):
patch_http_client(self.app_http_client)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'599 : ')
def test_missing_mock_fail_on_unknown_false(self):
patch_http_client(self.app_http_client, fail_on_unknown=False)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'200 : text/html : NO MOCK')
def test_mock_response_body(self):
patch_http_client(self.app_http_client)
# Test that mock for GET request is not used
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1'), response_body='GET MOCK'
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1'), request_method='POST',
response_body='POST MOCK', response_code=400, response_headers={'Content-Type': 'text/plain'}
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : text/plain : POST MOCK')
def test_mock_response_file_xml(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=val1&arg2=val2'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>$data_tpl</response>')
def test_mock_response_body_processor(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=$val_tpl'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400,
val_tpl='val1', data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>MOCK DATA</response>')
def test_mock_response_body_no_processor(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch?arg1=$val_tpl'), request_method='POST',
response_file='tests/response_stub.xml', response_code=400, response_body_processor=None,
val_tpl='val1', data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/xml : <response>$data_tpl</response>')
def test_mock_response_function(self):
patch_http_client(self.app_http_client)
def _response_function(request):
return get_response_stub(
request, code=404, buffer='RESPONSE FUNCTION', headers={'Content-Type': 'text/plain'}
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_function=_response_function
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'404 : text/plain : RESPONSE FUNCTION')
def test_mock_response_file_json(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_file='tests/response_stub.json', response_code=400,
data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'400 : application/json : {"response": "MOCK DATA"}')
def test_mock_response_file_other(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_file='tests/response_stub.txt', response_code=401,
data_tpl='MOCK DATA'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'401 : text/plain : RESPONSE : MOCK DATA')
def test_identical_mocks(self):
patch_http_client(self.app_http_client)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_body='FIRST'
)
set_stub(
self.app_http_client, self.get_url('/simple_fetch'), request_method='POST',
response_body='SECOND'
)
response = self.fetch('/simple_fetch')
self.assertEqual(response.body, b'200 : SECOND')
class SimpleAsyncHTTPClientMockTest(_BaseHTTPClientMockTest):
def get_app_http_client(self):
AsyncHTTPClient.configure('tornado.simple_httpclient.SimpleAsyncHTTPClient')
return AsyncHTTPClient(force_instance=True)
class CurlAsyncHTTPClientMockTest(_BaseHTTPClientMockTest):
def get_app_http_client(self):
AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
return AsyncHTTPClient(force_instance=True)
| en | 0.88212 | # coding=utf-8 # Test that mock for GET request is not used | 2.38616 | 2 |
app/badges.py | AstroAntics/throat | 0 | 6630874 | """ Here we store badges. """
from .storage import FILE_NAMESPACE, mtype_from_file, calculate_file_hash, store_file
from peewee import JOIN
from .models import Badge, UserMetadata, SubMod
from flask_babel import lazy_gettext as _l
import uuid
class Badges:
"""
Badge exposes a stable API for dealing with user badges.
We need to be able to look up a badge by id and name, along with the ability to
iterate through all of the badges.
We also want to be able to create badges.
For backwards compatability we will allow "fetching" of old_badges but only by ID.
This will also create an interfact for Triggers, as Badges and Triggers are interlinked.
"""
def __iter__(self):
"""
Returns a list of all badges in the database.
"""
badge_query = Badge.select(Badge.bid, Badge.name, Badge.alt, Badge.icon, Badge.score, Badge.trigger, Badge.rank).order_by(Badge.rank, Badge.name)
return (x for x in badge_query)
def __getitem__(self, bid):
"""
Returns a badge from the database.
"""
try:
return Badge.get(Badge.bid == bid)
except Badge.DoesNotExist:
return None
def update_badge(self, bid, name, alt, icon, score, rank, trigger):
"""
Updates the information related to a badge, updates icon if provided.
"""
if icon:
icon = gen_icon(icon)
else:
icon = self[bid].icon
Badge.update(name=name, alt=alt, icon=icon, score=score, rank=rank, trigger=trigger).where(Badge.bid == bid).execute()
@staticmethod
def new_badge(name, alt, icon, score, rank, trigger=None):
"""
Creates a new badge with an optional trigger.
"""
icon = gen_icon(icon)
Badge.create(name=name, alt=alt, icon=icon, score=score, rank=rank, trigger=trigger)
@staticmethod
def delete_badge(bid):
"""
Deletes a badge by ID
"""
Badge.delete().where(Badge.bid == bid).execute()
UserMetadata.delete().where((UserMetadata.key == "badge") & (UserMetadata.value == bid)).execute()
@staticmethod
def assign_userbadge(uid, bid):
"""
Gives a badge to a user
"""
UserMetadata.get_or_create(key="badge", uid=uid, value=bid)
@staticmethod
# Removes a badge from a user
def revoke_badge(uid, bid):
UserMetadata.delete().where(UserMetadata.key =="badge") & (UserMetadata.uid == uid) & (UserMetadata.value == str(bid))).execute()
@staticmethod
def unassign_userbadge(uid, bid):
"""
Removes a badge from a user
"""
UserMetadata.delete().where((UserMetadata.key == "badge") & (UserMetadata.uid == uid) & (UserMetadata.value == str(bid))).execute()
@staticmethod
def triggers():
"""
Lists available triggers that can be attached to a badge.
"""
return triggers.keys()
@staticmethod
def badges_for_user(uid):
"""
Returns a list of badges associated with a user.
"""
return (Badge.select(Badge.bid, Badge.name, Badge.icon, Badge.score, Badge.alt, Badge.rank).
join(UserMetadata, JOIN.LEFT_OUTER, on=(UserMetadata.value.cast("int") == Badge.bid),
).where((UserMetadata.uid == uid) & (UserMetadata.key == "badge")).order_by(Badge.rank, Badge.name))
def gen_icon(icon):
mtype = mtype_from_file(icon, allow_video_formats=False)
if mtype is None:
raise Exception(_l("Invalid file type. Only jpg, png and gif allowed."))
fhash = calculate_file_hash(icon)
basename = str(uuid.uuid5(FILE_NAMESPACE, fhash))
f_name = store_file(icon, basename, mtype, remove_metadata=True)
return f_name
badges = Badges()
def admin(bid):
"""
Auto assigns badges to admins.
"""
for user in UserMetadata.select().where(
(UserMetadata.key == "admin") & (UserMetadata.value == "1")
):
print("Giving ", bid, " to:", user.uid)
badges.assign_userbadge(user.uid, bid)
def mod(bid):
"""
Auto assigns badges to mods.
"""
for user in SubMod.select().where((~SubMod.invite)):
print("Giving ", bid, " to:", user.uid)
badges.assign_userbadge(user.uid, bid)
# TODO actually hook these up
triggers = {
"admin": admin,
"mod": mod,
}
| """ Here we store badges. """
from .storage import FILE_NAMESPACE, mtype_from_file, calculate_file_hash, store_file
from peewee import JOIN
from .models import Badge, UserMetadata, SubMod
from flask_babel import lazy_gettext as _l
import uuid
class Badges:
"""
Badge exposes a stable API for dealing with user badges.
We need to be able to look up a badge by id and name, along with the ability to
iterate through all of the badges.
We also want to be able to create badges.
For backwards compatability we will allow "fetching" of old_badges but only by ID.
This will also create an interfact for Triggers, as Badges and Triggers are interlinked.
"""
def __iter__(self):
"""
Returns a list of all badges in the database.
"""
badge_query = Badge.select(Badge.bid, Badge.name, Badge.alt, Badge.icon, Badge.score, Badge.trigger, Badge.rank).order_by(Badge.rank, Badge.name)
return (x for x in badge_query)
def __getitem__(self, bid):
"""
Returns a badge from the database.
"""
try:
return Badge.get(Badge.bid == bid)
except Badge.DoesNotExist:
return None
def update_badge(self, bid, name, alt, icon, score, rank, trigger):
"""
Updates the information related to a badge, updates icon if provided.
"""
if icon:
icon = gen_icon(icon)
else:
icon = self[bid].icon
Badge.update(name=name, alt=alt, icon=icon, score=score, rank=rank, trigger=trigger).where(Badge.bid == bid).execute()
@staticmethod
def new_badge(name, alt, icon, score, rank, trigger=None):
"""
Creates a new badge with an optional trigger.
"""
icon = gen_icon(icon)
Badge.create(name=name, alt=alt, icon=icon, score=score, rank=rank, trigger=trigger)
@staticmethod
def delete_badge(bid):
"""
Deletes a badge by ID
"""
Badge.delete().where(Badge.bid == bid).execute()
UserMetadata.delete().where((UserMetadata.key == "badge") & (UserMetadata.value == bid)).execute()
@staticmethod
def assign_userbadge(uid, bid):
"""
Gives a badge to a user
"""
UserMetadata.get_or_create(key="badge", uid=uid, value=bid)
@staticmethod
# Removes a badge from a user
def revoke_badge(uid, bid):
UserMetadata.delete().where(UserMetadata.key =="badge") & (UserMetadata.uid == uid) & (UserMetadata.value == str(bid))).execute()
@staticmethod
def unassign_userbadge(uid, bid):
"""
Removes a badge from a user
"""
UserMetadata.delete().where((UserMetadata.key == "badge") & (UserMetadata.uid == uid) & (UserMetadata.value == str(bid))).execute()
@staticmethod
def triggers():
"""
Lists available triggers that can be attached to a badge.
"""
return triggers.keys()
@staticmethod
def badges_for_user(uid):
"""
Returns a list of badges associated with a user.
"""
return (Badge.select(Badge.bid, Badge.name, Badge.icon, Badge.score, Badge.alt, Badge.rank).
join(UserMetadata, JOIN.LEFT_OUTER, on=(UserMetadata.value.cast("int") == Badge.bid),
).where((UserMetadata.uid == uid) & (UserMetadata.key == "badge")).order_by(Badge.rank, Badge.name))
def gen_icon(icon):
mtype = mtype_from_file(icon, allow_video_formats=False)
if mtype is None:
raise Exception(_l("Invalid file type. Only jpg, png and gif allowed."))
fhash = calculate_file_hash(icon)
basename = str(uuid.uuid5(FILE_NAMESPACE, fhash))
f_name = store_file(icon, basename, mtype, remove_metadata=True)
return f_name
badges = Badges()
def admin(bid):
"""
Auto assigns badges to admins.
"""
for user in UserMetadata.select().where(
(UserMetadata.key == "admin") & (UserMetadata.value == "1")
):
print("Giving ", bid, " to:", user.uid)
badges.assign_userbadge(user.uid, bid)
def mod(bid):
"""
Auto assigns badges to mods.
"""
for user in SubMod.select().where((~SubMod.invite)):
print("Giving ", bid, " to:", user.uid)
badges.assign_userbadge(user.uid, bid)
# TODO actually hook these up
triggers = {
"admin": admin,
"mod": mod,
}
| en | 0.926951 | Here we store badges. Badge exposes a stable API for dealing with user badges. We need to be able to look up a badge by id and name, along with the ability to iterate through all of the badges. We also want to be able to create badges. For backwards compatability we will allow "fetching" of old_badges but only by ID. This will also create an interfact for Triggers, as Badges and Triggers are interlinked. Returns a list of all badges in the database. Returns a badge from the database. Updates the information related to a badge, updates icon if provided. Creates a new badge with an optional trigger. Deletes a badge by ID Gives a badge to a user # Removes a badge from a user Removes a badge from a user Lists available triggers that can be attached to a badge. Returns a list of badges associated with a user. Auto assigns badges to admins. Auto assigns badges to mods. # TODO actually hook these up | 2.925529 | 3 |
Domain/models/GetPackagesDomainModel.py | arashmjr/ClubHouseFollowers | 0 | 6630875 |
class GetPackagesDomainModel:
package_id: str
package_name: str
price: int
def __init__(self, package_id: str, package_name: str, price: int):
self.package_id = package_id
self.package_name = package_name
self.price = price
def to_dict(self):
return {"package_id": self.package_id,
"package_name": self.package_name,
"price": self.price,
}
|
class GetPackagesDomainModel:
package_id: str
package_name: str
price: int
def __init__(self, package_id: str, package_name: str, price: int):
self.package_id = package_id
self.package_name = package_name
self.price = price
def to_dict(self):
return {"package_id": self.package_id,
"package_name": self.package_name,
"price": self.price,
}
| none | 1 | 2.801726 | 3 |
|
notebooks/drive-download-20190731T104457Z-001/20190710/20190710/code/dummy_02a/deeplab3_256x256_01/data.py | pyaf/severstal-steel-defect-detection | 0 | 6630876 | <reponame>pyaf/severstal-steel-defect-detection
from common import *
import pydicom
# https://www.kaggle.com/adkarhe/dicom-images
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview/resources
# https://www.kaggle.com/abhishek/train-your-own-mask-rcnn
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/98478#latest-568453
# Dataset Update: Non-annotated instances/images
# def run_fix_kaggle_data_error():
# csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/train-rle_old.csv'
# remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-train'
# dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train'
#
#
# # csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/test-rle_old.csv'
# # remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-test'
# # dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-test'
# #---
# dicom_file = get_dicom_file(dicom_dir)
# dicom_id = set(dicom_file.keys())
#
# df = pd.read_csv(csv_file)
# df_id = set(df.ImageId.values)
#
# remove_id = []
# non_diagnostic = read_list_from_file(remove_file)
# for k,v in dicom_file.items():
# #print(k,v)
# for s in non_diagnostic:
# if s in v:
# print (v)
# remove_id.append(k)
#
# remove_id=set(remove_id)
#
# #----
# print('remove_id :',len(remove_id))
# print('df_id :',len(df_id))
# print('dicom_id :',len(dicom_id))
# print('')
# print('dicom_id ∩ df_id :',len(set(dicom_id).intersection(df_id)))
# print('dicom_id ∩ remove_id :',len(set(dicom_id).intersection(remove_id)))
# print('df_id ∩ remove_id :',len(set(df_id).intersection(remove_id)))
# exit(0)
'''
You should be expecting 10712 images in the train set
and 1377 images in the public test set.
for test *.dcm files:
remove_id : 4
df_id : 1372
dicom_id : 1377
dicom_id ∩ df_id : 1372
dicom_id ∩ remove_id : 4
df_id ∩ remove_id : 0
for train *.dcm files:
remove_id : 33
df_id : 10675
dicom_id : 10712
dicom_id ∩ df_id : 10675
dicom_id ∩ remove_id : 33
df_id ∩ remove_id : 0
'''
# ----
def get_dicom_file(folder):
dicom_file = glob.glob(folder + '/**/**/*.dcm')
dicom_file = sorted(dicom_file)
image_id = [f.split('/')[-1][:-4] for f in dicom_file]
dicom_file = dict(zip(image_id, dicom_file))
return dicom_file
# ----
# https://www.kaggle.com/mnpinto/pneumothorax-fastai-starter-u-net-128x128
def run_length_decode(rle, height=1024, width=1024, fill_value=1):
component = np.zeros((height, width), np.float32)
component = component.reshape(-1)
rle = np.array([int(s) for s in rle.split(' ')])
rle = rle.reshape(-1, 2)
start = 0
for index, length in rle:
start = start+index
end = start+length
component[start: end] = fill_value
start = end
component = component.reshape(width, height).T
return component
# 1.2.276.0.7230010.3.1.4.8323329.10005.1517875220.958951
# 209126 1 1019 6 1015 10 1012 13 1010 14 1008 16 1007 16 1006 18 1004 20 1003 20 1002 22
def run_length_encode(component):
component = component.T.flatten()
start = np.where(component[1:] > component[:-1])[0]+1
end = np.where(component[:-1] > component[1:])[0]+1
length = end-start
rle = []
for i in range(len(length)):
if i == 0:
rle.extend([start[0], length[0]])
else:
rle.extend([start[i]-end[i-1], length[i]])
rle = ' '.join([str(r) for r in rle])
return rle
def gb_to_component(df, height=1024, width=1024):
rle = df['EncodedPixels'].values
if np.all(rle == '-1'):
component = np.zeros((1, height, width), np.float32)
return component, 0
component = np.array([run_length_decode(r, height, width, 1) for r in rle])
num_component = len(component)
return component, num_component
def component_to_mask(component):
mask = component.sum(0)
mask = (mask > 0.5).astype(np.float32)
return mask
def mask_to_component(mask, threshold=0.5):
H, W = mask.shape
binary = cv2.threshold(mask, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, label = cv2.connectedComponents(binary.astype(np.uint8))
num_component = num_component-1
component = np.zeros((num_component, H, W), np.float32)
for i in range(0, num_component):
#component[i][label==(i+1)] = mask[label==(i+1)]
component[i] = label == (i+1)
return component, num_component
### draw ############################################
def draw_input_overlay(image):
overlay = cv2.applyColorMap(image, cv2.COLORMAP_BONE)
return overlay
def draw_mask_overlay(mask):
height, width = mask.shape
overlay = np.zeros((height, width, 3), np.uint8)
overlay[mask > 0] = (0, 0, 255)
return overlay
def draw_truth_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 2] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
def draw_predict_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 1] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
# ------
def mask_to_inner_contour(component):
component = component > 0.5
pad = np.lib.pad(component, ((1, 1), (1, 1)), 'reflect')
contour = component & (
(pad[1:-1, 1:-1] != pad[:-2, 1:-1])
| (pad[1:-1, 1:-1] != pad[2:, 1:-1])
| (pad[1:-1, 1:-1] != pad[1:-1, :-2])
| (pad[1:-1, 1:-1] != pad[1:-1, 2:])
)
return contour
def draw_contour_overlay(image, component, thickness=1):
contour = mask_to_inner_contour(component)
if thickness == 1:
image[contour] = (0, 0, 255)
else:
for y, x in np.stack(np.where(contour)).T:
cv2.circle(image, (x, y), thickness,
(0, 0, 255), lineType=cv2.LINE_4)
return image
###- combined results --#
def draw_result_overlay(input, truth, probability):
input = draw_input_overlay(input)
input1 = cv2.resize(input, dsize=(512, 512))
if truth.shape != (512, 512):
truth1 = cv2.resize(truth, dsize=(512, 512))
probability1 = cv2.resize(probability, dsize=(512, 512))
else:
truth1 = truth
probability1 = probability
# ---
overlay1 = draw_truth_overlay(input1.copy(), truth1, 0.5)
overlay2 = draw_predict_overlay(input1.copy(), probability1, 0.5)
overlay3 = np.zeros((512, 512, 3), np.uint8)
overlay3 = draw_truth_overlay(overlay3, truth1, 1.0)
overlay3 = draw_predict_overlay(overlay3, probability1, 1.0)
draw_shadow_text(overlay3, 'truth', (2, 12), 0.5, (0, 0, 255), 1)
draw_shadow_text(overlay3, 'predict', (2, 24), 0.5, (0, 255, 0), 1)
# <todo> results afer post process ...
overlay4 = np.zeros((512, 512, 3), np.uint8)
overlay = np.hstack([
input,
np.hstack([
np.vstack([overlay1, overlay2]),
np.vstack([overlay4, overlay3]),
])
])
return overlay
### check #######################################################################################
# lstrip
def run_process_0():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
df.rename(columns={' EncodedPixels': 'EncodedPixels', }, inplace=True)
df['EncodedPixels'] = df['EncodedPixels'].str.lstrip(to_strip=None)
df.to_csv('/root/share/project/kaggle/2019/chest/data/train-rle.csv',
columns=['ImageId', 'EncodedPixels'], index=False)
zz = 0
def run_split_dataset():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.more.csv')
gb = df.groupby('ImageId')
uid = list(gb.groups.keys())
num_component = []
for i in uid:
df = gb.get_group(i)
num_component.append(df['count'].values[0]) # count= num of instances
num_component = np.array(num_component, np.int32)
neg_index = np.where(num_component == 0)[0]
pos_index = np.where(num_component >= 1)[0] # those which have more than one instances
print('num_component==0 : %d' % (len(neg_index)))
print('num_component>=1 : %d' % (len(pos_index)))
print('len(uid) : %d' % (len(uid)))
np.random.shuffle(neg_index)
np.random.shuffle(pos_index)
train_split = np.concatenate([neg_index[300:], pos_index[300:], ])
valid_split = np.concatenate([neg_index[:300], pos_index[:300], ])
uid = np.array(uid, np.object)
train_split = uid[train_split]
valid_split = uid[valid_split]
np.save('/root/share/project/kaggle/2019/chest/data/split/train_%d' %
len(train_split), train_split)
np.save('/root/share/project/kaggle/2019/chest/data/split/valid_%d' %
len(valid_split), valid_split)
zz = 0
# main #################################################################
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
run_split_dataset()
print('\nsucess!')
| from common import *
import pydicom
# https://www.kaggle.com/adkarhe/dicom-images
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview/resources
# https://www.kaggle.com/abhishek/train-your-own-mask-rcnn
# https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/98478#latest-568453
# Dataset Update: Non-annotated instances/images
# def run_fix_kaggle_data_error():
# csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/train-rle_old.csv'
# remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-train'
# dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train'
#
#
# # csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/test-rle_old.csv'
# # remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-test'
# # dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-test'
# #---
# dicom_file = get_dicom_file(dicom_dir)
# dicom_id = set(dicom_file.keys())
#
# df = pd.read_csv(csv_file)
# df_id = set(df.ImageId.values)
#
# remove_id = []
# non_diagnostic = read_list_from_file(remove_file)
# for k,v in dicom_file.items():
# #print(k,v)
# for s in non_diagnostic:
# if s in v:
# print (v)
# remove_id.append(k)
#
# remove_id=set(remove_id)
#
# #----
# print('remove_id :',len(remove_id))
# print('df_id :',len(df_id))
# print('dicom_id :',len(dicom_id))
# print('')
# print('dicom_id ∩ df_id :',len(set(dicom_id).intersection(df_id)))
# print('dicom_id ∩ remove_id :',len(set(dicom_id).intersection(remove_id)))
# print('df_id ∩ remove_id :',len(set(df_id).intersection(remove_id)))
# exit(0)
'''
You should be expecting 10712 images in the train set
and 1377 images in the public test set.
for test *.dcm files:
remove_id : 4
df_id : 1372
dicom_id : 1377
dicom_id ∩ df_id : 1372
dicom_id ∩ remove_id : 4
df_id ∩ remove_id : 0
for train *.dcm files:
remove_id : 33
df_id : 10675
dicom_id : 10712
dicom_id ∩ df_id : 10675
dicom_id ∩ remove_id : 33
df_id ∩ remove_id : 0
'''
# ----
def get_dicom_file(folder):
dicom_file = glob.glob(folder + '/**/**/*.dcm')
dicom_file = sorted(dicom_file)
image_id = [f.split('/')[-1][:-4] for f in dicom_file]
dicom_file = dict(zip(image_id, dicom_file))
return dicom_file
# ----
# https://www.kaggle.com/mnpinto/pneumothorax-fastai-starter-u-net-128x128
def run_length_decode(rle, height=1024, width=1024, fill_value=1):
component = np.zeros((height, width), np.float32)
component = component.reshape(-1)
rle = np.array([int(s) for s in rle.split(' ')])
rle = rle.reshape(-1, 2)
start = 0
for index, length in rle:
start = start+index
end = start+length
component[start: end] = fill_value
start = end
component = component.reshape(width, height).T
return component
# 1.2.276.0.7230010.3.1.4.8323329.10005.1517875220.958951
# 209126 1 1019 6 1015 10 1012 13 1010 14 1008 16 1007 16 1006 18 1004 20 1003 20 1002 22
def run_length_encode(component):
component = component.T.flatten()
start = np.where(component[1:] > component[:-1])[0]+1
end = np.where(component[:-1] > component[1:])[0]+1
length = end-start
rle = []
for i in range(len(length)):
if i == 0:
rle.extend([start[0], length[0]])
else:
rle.extend([start[i]-end[i-1], length[i]])
rle = ' '.join([str(r) for r in rle])
return rle
def gb_to_component(df, height=1024, width=1024):
rle = df['EncodedPixels'].values
if np.all(rle == '-1'):
component = np.zeros((1, height, width), np.float32)
return component, 0
component = np.array([run_length_decode(r, height, width, 1) for r in rle])
num_component = len(component)
return component, num_component
def component_to_mask(component):
mask = component.sum(0)
mask = (mask > 0.5).astype(np.float32)
return mask
def mask_to_component(mask, threshold=0.5):
H, W = mask.shape
binary = cv2.threshold(mask, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, label = cv2.connectedComponents(binary.astype(np.uint8))
num_component = num_component-1
component = np.zeros((num_component, H, W), np.float32)
for i in range(0, num_component):
#component[i][label==(i+1)] = mask[label==(i+1)]
component[i] = label == (i+1)
return component, num_component
### draw ############################################
def draw_input_overlay(image):
overlay = cv2.applyColorMap(image, cv2.COLORMAP_BONE)
return overlay
def draw_mask_overlay(mask):
height, width = mask.shape
overlay = np.zeros((height, width, 3), np.uint8)
overlay[mask > 0] = (0, 0, 255)
return overlay
def draw_truth_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 2] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
def draw_predict_overlay(image, component, alpha=0.5):
component = component*255
overlay = image.astype(np.float32)
overlay[:, :, 1] += component*alpha
overlay = np.clip(overlay, 0, 255)
overlay = overlay.astype(np.uint8)
return overlay
# ------
def mask_to_inner_contour(component):
component = component > 0.5
pad = np.lib.pad(component, ((1, 1), (1, 1)), 'reflect')
contour = component & (
(pad[1:-1, 1:-1] != pad[:-2, 1:-1])
| (pad[1:-1, 1:-1] != pad[2:, 1:-1])
| (pad[1:-1, 1:-1] != pad[1:-1, :-2])
| (pad[1:-1, 1:-1] != pad[1:-1, 2:])
)
return contour
def draw_contour_overlay(image, component, thickness=1):
contour = mask_to_inner_contour(component)
if thickness == 1:
image[contour] = (0, 0, 255)
else:
for y, x in np.stack(np.where(contour)).T:
cv2.circle(image, (x, y), thickness,
(0, 0, 255), lineType=cv2.LINE_4)
return image
###- combined results --#
def draw_result_overlay(input, truth, probability):
input = draw_input_overlay(input)
input1 = cv2.resize(input, dsize=(512, 512))
if truth.shape != (512, 512):
truth1 = cv2.resize(truth, dsize=(512, 512))
probability1 = cv2.resize(probability, dsize=(512, 512))
else:
truth1 = truth
probability1 = probability
# ---
overlay1 = draw_truth_overlay(input1.copy(), truth1, 0.5)
overlay2 = draw_predict_overlay(input1.copy(), probability1, 0.5)
overlay3 = np.zeros((512, 512, 3), np.uint8)
overlay3 = draw_truth_overlay(overlay3, truth1, 1.0)
overlay3 = draw_predict_overlay(overlay3, probability1, 1.0)
draw_shadow_text(overlay3, 'truth', (2, 12), 0.5, (0, 0, 255), 1)
draw_shadow_text(overlay3, 'predict', (2, 24), 0.5, (0, 255, 0), 1)
# <todo> results afer post process ...
overlay4 = np.zeros((512, 512, 3), np.uint8)
overlay = np.hstack([
input,
np.hstack([
np.vstack([overlay1, overlay2]),
np.vstack([overlay4, overlay3]),
])
])
return overlay
### check #######################################################################################
# lstrip
def run_process_0():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.csv')
df.rename(columns={' EncodedPixels': 'EncodedPixels', }, inplace=True)
df['EncodedPixels'] = df['EncodedPixels'].str.lstrip(to_strip=None)
df.to_csv('/root/share/project/kaggle/2019/chest/data/train-rle.csv',
columns=['ImageId', 'EncodedPixels'], index=False)
zz = 0
def run_split_dataset():
df = pd.read_csv(
'/root/share/project/kaggle/2019/chest/data/train-rle.more.csv')
gb = df.groupby('ImageId')
uid = list(gb.groups.keys())
num_component = []
for i in uid:
df = gb.get_group(i)
num_component.append(df['count'].values[0]) # count= num of instances
num_component = np.array(num_component, np.int32)
neg_index = np.where(num_component == 0)[0]
pos_index = np.where(num_component >= 1)[0] # those which have more than one instances
print('num_component==0 : %d' % (len(neg_index)))
print('num_component>=1 : %d' % (len(pos_index)))
print('len(uid) : %d' % (len(uid)))
np.random.shuffle(neg_index)
np.random.shuffle(pos_index)
train_split = np.concatenate([neg_index[300:], pos_index[300:], ])
valid_split = np.concatenate([neg_index[:300], pos_index[:300], ])
uid = np.array(uid, np.object)
train_split = uid[train_split]
valid_split = uid[valid_split]
np.save('/root/share/project/kaggle/2019/chest/data/split/train_%d' %
len(train_split), train_split)
np.save('/root/share/project/kaggle/2019/chest/data/split/valid_%d' %
len(valid_split), valid_split)
zz = 0
# main #################################################################
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
run_split_dataset()
print('\nsucess!') | en | 0.414767 | # https://www.kaggle.com/adkarhe/dicom-images # https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview/resources # https://www.kaggle.com/abhishek/train-your-own-mask-rcnn # https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/98478#latest-568453 # Dataset Update: Non-annotated instances/images # def run_fix_kaggle_data_error(): # csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/train-rle_old.csv' # remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-train' # dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-train' # # # # csv_file = '/root/share/project/kaggle/2019/chest/data/__download__/test-rle_old.csv' # # remove_file = '/root/share/project/kaggle/2019/chest/data/__download__/non-diagnostic-test' # # dicom_dir = '/root/share/project/kaggle/2019/chest/data/dicom/dicom-images-test' # #--- # dicom_file = get_dicom_file(dicom_dir) # dicom_id = set(dicom_file.keys()) # # df = pd.read_csv(csv_file) # df_id = set(df.ImageId.values) # # remove_id = [] # non_diagnostic = read_list_from_file(remove_file) # for k,v in dicom_file.items(): # #print(k,v) # for s in non_diagnostic: # if s in v: # print (v) # remove_id.append(k) # # remove_id=set(remove_id) # # #---- # print('remove_id :',len(remove_id)) # print('df_id :',len(df_id)) # print('dicom_id :',len(dicom_id)) # print('') # print('dicom_id ∩ df_id :',len(set(dicom_id).intersection(df_id))) # print('dicom_id ∩ remove_id :',len(set(dicom_id).intersection(remove_id))) # print('df_id ∩ remove_id :',len(set(df_id).intersection(remove_id))) # exit(0) You should be expecting 10712 images in the train set and 1377 images in the public test set. for test *.dcm files: remove_id : 4 df_id : 1372 dicom_id : 1377 dicom_id ∩ df_id : 1372 dicom_id ∩ remove_id : 4 df_id ∩ remove_id : 0 for train *.dcm files: remove_id : 33 df_id : 10675 dicom_id : 10712 dicom_id ∩ df_id : 10675 dicom_id ∩ remove_id : 33 df_id ∩ remove_id : 0 # ---- # ---- # https://www.kaggle.com/mnpinto/pneumothorax-fastai-starter-u-net-128x128 # 1.2.276.0.7230010.3.1.4.8323329.10005.1517875220.958951 # 209126 1 1019 6 1015 10 1012 13 1010 14 1008 16 1007 16 1006 18 1004 20 1003 20 1002 22 #component[i][label==(i+1)] = mask[label==(i+1)] ### draw ############################################ # ------ ###- combined results --# # --- # <todo> results afer post process ... ### check ####################################################################################### # lstrip # count= num of instances # those which have more than one instances # main ################################################################# | 2.192083 | 2 |
oauthlib/oauth2/rfc6749/grant_types/implicit.py | ButchershopCreative/oauthlib | 1 | 6630877 | <filename>oauthlib/oauth2/rfc6749/grant_types/implicit.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
from oauthlib import common
from oauthlib.common import log
from oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
class ImplicitGrant(GrantTypeBase):
"""`Implicit Grant`_
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
The flow illustrated in Figure 4 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier. The redirection URI includes
the access token in the URI fragment.
(D) The user-agent follows the redirection instructions by making a
request to the web-hosted client resource (which does not
include the fragment per [RFC2616]). The user-agent retains the
fragment information locally.
(E) The web-hosted client resource returns a web page (typically an
HTML document with an embedded script) capable of accessing the
full redirection URI including the fragment retained by the
user-agent, and extracting the access token (and other
parameters) contained in the fragment.
(F) The user-agent executes the script provided by the web-hosted
client resource locally, which extracts the access token.
(G) The user-agent passes the access token to the client.
See `Section 10.3`_ and `Section 10.16`_ for important security considerations
when using the implicit grant.
.. _`Implicit Grant`: http://tools.ietf.org/html/rfc6749#section-4.2
.. _`Section 10.3`: http://tools.ietf.org/html/rfc6749#section-10.3
.. _`Section 10.16`: http://tools.ietf.org/html/rfc6749#section-10.16
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_response(self, request, token_handler):
"""Create an authorization response.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The authorization server validates the request to ensure that all
required parameters are present and valid. The authorization server
MUST verify that the redirection URI to which it will redirect the
access token matches a redirection URI registered by the client as
described in `Section 3.1.2`_.
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
return self.create_token_response(request, token_handler)
def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
# request.scopes is only mandated in post auth and both pre and
# post auth use validate_authorization_request
if not request.scopes:
raise ValueError('Scopes must be set on post auth.')
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# http://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
token = token_handler.create_token(request, refresh_token=False)
return {'Location': common.add_params_to_uri(request.redirect_uri, token.items(),
fragment=True)}, None, 302
def validate_authorization_request(self, request):
return self.validate_token_request(request)
def validate_token_request(self, request):
"""Check the token request for normal and fatal errors.
This method is very similar to validate_authorization_request in
the AuthorizationCodeGrant but differ in a few subtle areas.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# REQUIRED. The client identifier as described in Section 2.2.
# http://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(state=request.state, request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(state=request.state, request=request)
# OPTIONAL. As described in Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# The authorization server MUST verify that the redirection URI
# to which it will redirect the access token matches a
# redirection URI registered by the client as described in
# Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(state=request.state, request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(state=request.state, request=request)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# http://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
if request.response_type is None:
raise errors.InvalidRequestError(state=request.state,
description='Missing response_type parameter.',
request=request)
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(state=request.state,
description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. Value MUST be set to "token".
if request.response_type != 'token':
raise errors.UnsupportedResponseTypeError(state=request.state, request=request)
log.debug('Validating use of response_type token for client %r (%r).',
request.client_id, request.client)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type, request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# http://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
return request.scopes, {
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
}
| <filename>oauthlib/oauth2/rfc6749/grant_types/implicit.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
from oauthlib import common
from oauthlib.common import log
from oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
class ImplicitGrant(GrantTypeBase):
"""`Implicit Grant`_
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
The flow illustrated in Figure 4 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier. The redirection URI includes
the access token in the URI fragment.
(D) The user-agent follows the redirection instructions by making a
request to the web-hosted client resource (which does not
include the fragment per [RFC2616]). The user-agent retains the
fragment information locally.
(E) The web-hosted client resource returns a web page (typically an
HTML document with an embedded script) capable of accessing the
full redirection URI including the fragment retained by the
user-agent, and extracting the access token (and other
parameters) contained in the fragment.
(F) The user-agent executes the script provided by the web-hosted
client resource locally, which extracts the access token.
(G) The user-agent passes the access token to the client.
See `Section 10.3`_ and `Section 10.16`_ for important security considerations
when using the implicit grant.
.. _`Implicit Grant`: http://tools.ietf.org/html/rfc6749#section-4.2
.. _`Section 10.3`: http://tools.ietf.org/html/rfc6749#section-10.3
.. _`Section 10.16`: http://tools.ietf.org/html/rfc6749#section-10.16
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_response(self, request, token_handler):
"""Create an authorization response.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The authorization server validates the request to ensure that all
required parameters are present and valid. The authorization server
MUST verify that the redirection URI to which it will redirect the
access token matches a redirection URI registered by the client as
described in `Section 3.1.2`_.
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
return self.create_token_response(request, token_handler)
def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
# request.scopes is only mandated in post auth and both pre and
# post auth use validate_authorization_request
if not request.scopes:
raise ValueError('Scopes must be set on post auth.')
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# http://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
token = token_handler.create_token(request, refresh_token=False)
return {'Location': common.add_params_to_uri(request.redirect_uri, token.items(),
fragment=True)}, None, 302
def validate_authorization_request(self, request):
return self.validate_token_request(request)
def validate_token_request(self, request):
"""Check the token request for normal and fatal errors.
This method is very similar to validate_authorization_request in
the AuthorizationCodeGrant but differ in a few subtle areas.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# REQUIRED. The client identifier as described in Section 2.2.
# http://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(state=request.state, request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(state=request.state, request=request)
# OPTIONAL. As described in Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# The authorization server MUST verify that the redirection URI
# to which it will redirect the access token matches a
# redirection URI registered by the client as described in
# Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(state=request.state, request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(state=request.state, request=request)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# http://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
if request.response_type is None:
raise errors.InvalidRequestError(state=request.state,
description='Missing response_type parameter.',
request=request)
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(state=request.state,
description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. Value MUST be set to "token".
if request.response_type != 'token':
raise errors.UnsupportedResponseTypeError(state=request.state, request=request)
log.debug('Validating use of response_type token for client %r (%r).',
request.client_id, request.client)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type, request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# http://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
return request.scopes, {
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
}
| en | 0.745231 | # -*- coding: utf-8 -*- oauthlib.oauth2.rfc6749.grant_types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Implicit Grant`_ The implicit grant type is used to obtain access tokens (it does not support the issuance of refresh tokens) and is optimized for public clients known to operate a particular redirection URI. These clients are typically implemented in a browser using a scripting language such as JavaScript. Unlike the authorization code grant type, in which the client makes separate requests for authorization and for an access token, the client receives the access token as the result of the authorization request. The implicit grant type does not include client authentication, and relies on the presence of the resource owner and the registration of the redirection URI. Because the access token is encoded into the redirection URI, it may be exposed to the resource owner and other applications residing on the same device:: +----------+ | Resource | | Owner | | | +----------+ ^ | (B) +----|-----+ Client Identifier +---------------+ | -+----(A)-- & Redirection URI --->| | | User- | | Authorization | | Agent -|----(B)-- User authenticates -->| Server | | | | | | |<---(C)--- Redirection URI ----<| | | | with Access Token +---------------+ | | in Fragment | | +---------------+ | |----(D)--- Redirection URI ---->| Web-Hosted | | | without Fragment | Client | | | | Resource | | (F) |<---(E)------- Script ---------<| | | | +---------------+ +-|--------+ | | (A) (G) Access Token | | ^ v +---------+ | | | Client | | | +---------+ Note: The lines illustrating steps (A) and (B) are broken into two parts as they pass through the user-agent. Figure 4: Implicit Grant Flow The flow illustrated in Figure 4 includes the following steps: (A) The client initiates the flow by directing the resource owner's user-agent to the authorization endpoint. The client includes its client identifier, requested scope, local state, and a redirection URI to which the authorization server will send the user-agent back once access is granted (or denied). (B) The authorization server authenticates the resource owner (via the user-agent) and establishes whether the resource owner grants or denies the client's access request. (C) Assuming the resource owner grants access, the authorization server redirects the user-agent back to the client using the redirection URI provided earlier. The redirection URI includes the access token in the URI fragment. (D) The user-agent follows the redirection instructions by making a request to the web-hosted client resource (which does not include the fragment per [RFC2616]). The user-agent retains the fragment information locally. (E) The web-hosted client resource returns a web page (typically an HTML document with an embedded script) capable of accessing the full redirection URI including the fragment retained by the user-agent, and extracting the access token (and other parameters) contained in the fragment. (F) The user-agent executes the script provided by the web-hosted client resource locally, which extracts the access token. (G) The user-agent passes the access token to the client. See `Section 10.3`_ and `Section 10.16`_ for important security considerations when using the implicit grant. .. _`Implicit Grant`: http://tools.ietf.org/html/rfc6749#section-4.2 .. _`Section 10.3`: http://tools.ietf.org/html/rfc6749#section-10.3 .. _`Section 10.16`: http://tools.ietf.org/html/rfc6749#section-10.16 Create an authorization response. The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format, per `Appendix B`_: response_type REQUIRED. Value MUST be set to "token". client_id REQUIRED. The client identifier as described in `Section 2.2`_. redirect_uri OPTIONAL. As described in `Section 3.1.2`_. scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in `Section 10.12`_. The authorization server validates the request to ensure that all required parameters are present and valid. The authorization server MUST verify that the redirection URI to which it will redirect the access token matches a redirection URI registered by the client as described in `Section 3.1.2`_. .. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2 .. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2 .. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3 .. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12 .. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B Return token or error embedded in the URI fragment. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format, per `Appendix B`_: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in `Section 7.1`_. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client; otherwise, REQUIRED. The scope of the access token as described by `Section 3.3`_. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. The authorization server MUST NOT issue a refresh token. .. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B .. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3 .. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1 # request.scopes is only mandated in post auth and both pre and # post auth use validate_authorization_request # If the request fails due to a missing, invalid, or mismatching # redirection URI, or if the client identifier is missing or invalid, # the authorization server SHOULD inform the resource owner of the # error and MUST NOT automatically redirect the user-agent to the # invalid redirection URI. # If the resource owner denies the access request or if the request # fails for reasons other than a missing or invalid redirection URI, # the authorization server informs the client by adding the following # parameters to the fragment component of the redirection URI using the # "application/x-www-form-urlencoded" format, per Appendix B: # http://tools.ietf.org/html/rfc6749#appendix-B Check the token request for normal and fatal errors. This method is very similar to validate_authorization_request in the AuthorizationCodeGrant but differ in a few subtle areas. A normal error could be a missing response_type parameter or the client attempting to access scope it is not allowed to ask authorization for. Normal errors can safely be included in the redirection URI and sent back to the client. Fatal errors occur when the client_id or redirect_uri is invalid or missing. These must be caught by the provider and handled, how this is done is outside of the scope of OAuthLib but showing an error page describing the issue is a good idea. # First check for fatal errors # If the request fails due to a missing, invalid, or mismatching # redirection URI, or if the client identifier is missing or invalid, # the authorization server SHOULD inform the resource owner of the # error and MUST NOT automatically redirect the user-agent to the # invalid redirection URI. # REQUIRED. The client identifier as described in Section 2.2. # http://tools.ietf.org/html/rfc6749#section-2.2 # OPTIONAL. As described in Section 3.1.2. # http://tools.ietf.org/html/rfc6749#section-3.1.2 # The authorization server MUST verify that the redirection URI # to which it will redirect the access token matches a # redirection URI registered by the client as described in # Section 3.1.2. # http://tools.ietf.org/html/rfc6749#section-3.1.2 # Then check for normal errors. # If the resource owner denies the access request or if the request # fails for reasons other than a missing or invalid redirection URI, # the authorization server informs the client by adding the following # parameters to the fragment component of the redirection URI using the # "application/x-www-form-urlencoded" format, per Appendix B. # http://tools.ietf.org/html/rfc6749#appendix-B # Note that the correct parameters to be added are automatically # populated through the use of specific exceptions. # REQUIRED. Value MUST be set to "token". # OPTIONAL. The scope of the access request as described by Section 3.3 # http://tools.ietf.org/html/rfc6749#section-3.3 | 2.082274 | 2 |
jtr/load/ls2jtr.py | mitchelljeff/SUMMAD4.3 | 1 | 6630878 | """
This script converts data from the SemEval-2007 Task 10 on English Lexical
Substitution to the jtr format.
"""
import json
import xmltodict
import re
import os
def load_substitituons(path):
subs = {}
with open(path, "r") as f:
for line in f.readlines()[1:]:
splits = line.split(" :: ")
id = splits[0].split(" ")[1]
sub = [x[:-2] for x in splits[1].split(";")][:-1]
print(id, sub)
subs[id] = sub
return subs
if __name__ == "__main__":
CLOZE_STYLE = False
for corpus_name in ["trial"]:
file_path = "./jtr/data/LS/%s/lexsub_%s_cleaned.xml" % (
corpus_name, corpus_name)
subs_path = "./jtr/data/LS/%s/gold.%s" % (corpus_name, corpus_name)
subs = load_substitituons(subs_path)
with open(file_path) as fd:
file_text = fd.read().replace("&", "&")
corpus = xmltodict.parse(file_text)["corpus"]
jtr = {"meta": "SemEval-2007 Task 10: Lexical Substitution"}
instances = []
for lexelt in corpus["lexelt"]:
for instance in lexelt["instance"]:
# fixme: not sure what is happening here
if str(instance) != "@id" and str(instance) != "context":
context = instance["context"]
id = instance["@id"]
word = re.search('_START_\w+_END_', context).group(0)[
7:-5]
context_masked = re.sub('_START_\w+_END_', 'XXXXX',
context)
context_recovered = re.sub('_START_\w+_END_', word,
context)
context_tokenized = context.split(" ")
word_position = [i for i, word in enumerate(context_tokenized) if word.startswith('_START_')][0]
# print("%s\t%s\t%s" % (id, word, context_masked))
if CLOZE_STYLE:
queb = {'id': id, 'support': [], 'questions': [
{'question': context_masked,
'answers': [
{'text': word}
]}
]}
else:
queb = {'id': id,
'support': [{'text': context_recovered}],
'questions': [
{'question': str(word_position),
'answers': [
{'text': sub} for sub in subs[id]
]}
]}
instances.append(queb)
jtr["instances"] = instances
with open("./jtr/data/LS/%s/lexsub_%s_cleaned.jsonl" % \
(corpus_name, corpus_name), 'w') as outfile:
json.dump(jtr, outfile, indent=2)
# create snippet
jtr['instances'] = jtr['instances'][:10]
def save_debug(directory_path, file_name):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
with open(directory_path + "/" + file_name, 'w') as outfile:
json.dump(jtr, outfile, indent=2)
save_debug("./data/LS/debug", "lexsub_debug_cleaned.jsonl")
save_debug("./data/LS", "snippet.jtr.json")
| """
This script converts data from the SemEval-2007 Task 10 on English Lexical
Substitution to the jtr format.
"""
import json
import xmltodict
import re
import os
def load_substitituons(path):
subs = {}
with open(path, "r") as f:
for line in f.readlines()[1:]:
splits = line.split(" :: ")
id = splits[0].split(" ")[1]
sub = [x[:-2] for x in splits[1].split(";")][:-1]
print(id, sub)
subs[id] = sub
return subs
if __name__ == "__main__":
CLOZE_STYLE = False
for corpus_name in ["trial"]:
file_path = "./jtr/data/LS/%s/lexsub_%s_cleaned.xml" % (
corpus_name, corpus_name)
subs_path = "./jtr/data/LS/%s/gold.%s" % (corpus_name, corpus_name)
subs = load_substitituons(subs_path)
with open(file_path) as fd:
file_text = fd.read().replace("&", "&")
corpus = xmltodict.parse(file_text)["corpus"]
jtr = {"meta": "SemEval-2007 Task 10: Lexical Substitution"}
instances = []
for lexelt in corpus["lexelt"]:
for instance in lexelt["instance"]:
# fixme: not sure what is happening here
if str(instance) != "@id" and str(instance) != "context":
context = instance["context"]
id = instance["@id"]
word = re.search('_START_\w+_END_', context).group(0)[
7:-5]
context_masked = re.sub('_START_\w+_END_', 'XXXXX',
context)
context_recovered = re.sub('_START_\w+_END_', word,
context)
context_tokenized = context.split(" ")
word_position = [i for i, word in enumerate(context_tokenized) if word.startswith('_START_')][0]
# print("%s\t%s\t%s" % (id, word, context_masked))
if CLOZE_STYLE:
queb = {'id': id, 'support': [], 'questions': [
{'question': context_masked,
'answers': [
{'text': word}
]}
]}
else:
queb = {'id': id,
'support': [{'text': context_recovered}],
'questions': [
{'question': str(word_position),
'answers': [
{'text': sub} for sub in subs[id]
]}
]}
instances.append(queb)
jtr["instances"] = instances
with open("./jtr/data/LS/%s/lexsub_%s_cleaned.jsonl" % \
(corpus_name, corpus_name), 'w') as outfile:
json.dump(jtr, outfile, indent=2)
# create snippet
jtr['instances'] = jtr['instances'][:10]
def save_debug(directory_path, file_name):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
with open(directory_path + "/" + file_name, 'w') as outfile:
json.dump(jtr, outfile, indent=2)
save_debug("./data/LS/debug", "lexsub_debug_cleaned.jsonl")
save_debug("./data/LS", "snippet.jtr.json")
| en | 0.654081 | This script converts data from the SemEval-2007 Task 10 on English Lexical Substitution to the jtr format. #038;") # fixme: not sure what is happening here # print("%s\t%s\t%s" % (id, word, context_masked)) # create snippet | 2.761166 | 3 |
puma/helpers/testing/mixin/not_a_test_case_enum.py | gift-surg/puma | 0 | 6630879 | from enum import Enum
class NotATestCaseEnum(Enum):
"""Base Enum to prevent a class with 'Test' in the name being detected as a TestCase"""
__test__ = False
| from enum import Enum
class NotATestCaseEnum(Enum):
"""Base Enum to prevent a class with 'Test' in the name being detected as a TestCase"""
__test__ = False
| en | 0.939194 | Base Enum to prevent a class with 'Test' in the name being detected as a TestCase | 2.268447 | 2 |
others/dp/dpl-2.py | c-yan/atcoder | 1 | 6630880 | from sys import setrecursionlimit
setrecursionlimit(10 ** 6)
N, *a = map(int, open(0).read().split())
M = 3001
INF = M * 10 ** 9
def f(l, r):
i = l * M + r
if dp[i] != INF:
return dp[i]
if l == r:
dp[i] = a[l]
return dp[i]
dp[i] = max(a[l] - f(l + 1, r), a[r] - f(l, r - 1))
return dp[i]
dp = [INF] * (M * M)
print(f(0, N - 1))
| from sys import setrecursionlimit
setrecursionlimit(10 ** 6)
N, *a = map(int, open(0).read().split())
M = 3001
INF = M * 10 ** 9
def f(l, r):
i = l * M + r
if dp[i] != INF:
return dp[i]
if l == r:
dp[i] = a[l]
return dp[i]
dp[i] = max(a[l] - f(l + 1, r), a[r] - f(l, r - 1))
return dp[i]
dp = [INF] * (M * M)
print(f(0, N - 1))
| none | 1 | 2.480069 | 2 |
|
euler5.py | healyshane/Python-Scripts | 0 | 6630881 | <reponame>healyshane/Python-Scripts<filename>euler5.py
#<NAME>, 25-FEB
# 2,520 is the smallest number that can be divided by
# each of the numbers from 1 to 10 without any
# remainder. Write a Python program using for and
# range to calculate the smallest positive number
# that is evenly divisible by all of the numbers
# from 1 to 20.
n = 0
y = 1
while y != 0: # continue loop until y=0
n = n + 2520 # increase by smallest number divisible 1:10
for i in range(1,21,1):
y = n % (i)
if y != 0:
break
print("The smallest positive number that is evenly divisible by all of the numbers from 1 to 20 is ", n)
| #<NAME>, 25-FEB
# 2,520 is the smallest number that can be divided by
# each of the numbers from 1 to 10 without any
# remainder. Write a Python program using for and
# range to calculate the smallest positive number
# that is evenly divisible by all of the numbers
# from 1 to 20.
n = 0
y = 1
while y != 0: # continue loop until y=0
n = n + 2520 # increase by smallest number divisible 1:10
for i in range(1,21,1):
y = n % (i)
if y != 0:
break
print("The smallest positive number that is evenly divisible by all of the numbers from 1 to 20 is ", n) | en | 0.905703 | #<NAME>, 25-FEB # 2,520 is the smallest number that can be divided by # each of the numbers from 1 to 10 without any # remainder. Write a Python program using for and # range to calculate the smallest positive number # that is evenly divisible by all of the numbers # from 1 to 20. # continue loop until y=0 # increase by smallest number divisible 1:10 | 3.959981 | 4 |
mydeep-lib/mydeep_api/dataset/image_path_column.py | flegac/deep-experiments | 0 | 6630882 | <gh_stars>0
import os
from typing import Iterator, Tuple, List
import cv2
from mydeep_api.dataset.column import Column
from mydeep_api.tensor import Tensor
from surili_core.workspace import Workspace
class ImagePathColumn(Column):
@staticmethod
def from_folder_tree(path: str, shape: Tuple[int, int] = None):
images = (Workspace.from_path(path)
.folders
.flatmap(lambda fs: fs.files)
.map(lambda p: os.path.relpath(p, start=path))
.to_list())
return ImagePathColumn(images, path, shape)
def __init__(self, images: List[str],
root_path: str = None,
shape: Tuple[int, int] = None):
self.images = images
self.root_path = root_path
self._shape = shape
def __iter__(self) -> Iterator[Tensor]:
for filename in self.images:
if self.root_path:
filename = os.path.join(self.root_path, filename)
yield (cv2.imread(filename))
def __len__(self):
return len(self.images)
@property
def shape(self) -> Tuple[int, int, int]:
if self._shape:
return next(iter(self)).shape
return None
| import os
from typing import Iterator, Tuple, List
import cv2
from mydeep_api.dataset.column import Column
from mydeep_api.tensor import Tensor
from surili_core.workspace import Workspace
class ImagePathColumn(Column):
@staticmethod
def from_folder_tree(path: str, shape: Tuple[int, int] = None):
images = (Workspace.from_path(path)
.folders
.flatmap(lambda fs: fs.files)
.map(lambda p: os.path.relpath(p, start=path))
.to_list())
return ImagePathColumn(images, path, shape)
def __init__(self, images: List[str],
root_path: str = None,
shape: Tuple[int, int] = None):
self.images = images
self.root_path = root_path
self._shape = shape
def __iter__(self) -> Iterator[Tensor]:
for filename in self.images:
if self.root_path:
filename = os.path.join(self.root_path, filename)
yield (cv2.imread(filename))
def __len__(self):
return len(self.images)
@property
def shape(self) -> Tuple[int, int, int]:
if self._shape:
return next(iter(self)).shape
return None | none | 1 | 2.67999 | 3 |
|
apps/modules/post/process/post.py | singod/flask-osroom | 1 | 6630883 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : <NAME>
from bson.objectid import ObjectId
from flask import request
from flask_babel import gettext
from flask_login import current_user
from apps.app import mdbs
from apps.modules.post.process.post_statistical import post_pv
from apps.modules.post.process.post_process import get_posts_pr, get_post_pr
from apps.core.utils.get_config import get_config
from apps.utils.format.obj_format import json_to_pyseq, str_to_num
def get_post(post_id=None):
if not post_id:
post_id = request.argget.all('post_id')
post_pv(post_id)
data = get_post_pr(post_id=post_id)
return data
def get_posts(page=None):
if not page:
page = str_to_num(request.argget.all('page', 1))
pre = str_to_num(request.argget.all('pre', get_config("post", "NUM_PAGE")))
sort = json_to_pyseq(request.argget.all('sort'))
status = request.argget.all('status', 'is_issued')
matching_rec = request.argget.all('matching_rec')
time_range = int(request.argget.all('time_range', 0))
keyword = request.argget.all('keyword', '').strip()
fields = json_to_pyseq(request.argget.all('fields'))
unwanted_fields = json_to_pyseq(request.argget.all('unwanted_fields'))
user_id = request.argget.all('user_id')
category_id = request.argget.all('category_id')
tag = request.argget.all('tag')
# 不能同时使用fields 和 unwanted_fields
temp_field = {}
if fields:
for f in fields:
temp_field[f] = 1
elif unwanted_fields:
for f in unwanted_fields:
temp_field[f] = 0
other_filter = {}
if user_id:
# 获取指定用户的post
other_filter["user_id"] = user_id
# 如果category_id为None, 则获取全部分类文章
if category_id:
try:
ObjectId(category_id)
# 指定分类
other_filter["category"] = category_id
except BaseException:
# 默认文集
other_filter["category"] = None
if tag:
other_filter["tags"] = tag
data = get_posts_pr(
page=page,
field=temp_field,
pre=pre,
sort=sort,
status=status,
time_range=time_range,
matching_rec=matching_rec,
keyword=keyword,
other_filter=other_filter)
return data
def post_like():
tid = request.argget.all('id')
like = mdbs["user"].db.user_like.find_one(
{"user_id": current_user.str_id, "type": "post"})
if not like:
user_like = {
"values": [],
"type": "post",
"user_id": current_user.str_id
}
mdbs["user"].db.user_like.insert_one(user_like)
r1 = mdbs["user"].db.user_like.update_one(
{"user_id": current_user.str_id, "type": "post"}, {"$addToSet": {"values": tid}})
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": 1}, "$addToSet": {"like_user_id": current_user.str_id}})
else:
if tid in like["values"]:
like["values"].remove(tid)
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": -1}, "$pull": {"like_user_id": current_user.str_id}})
else:
like["values"].append(tid)
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": 1}, "$addToSet": {"like_user_id": current_user.str_id}})
r1 = mdbs["user"].db.user_like.update_one({"user_id": current_user.str_id, "type": "post"},
{"$set": {"values": like["values"]}})
if r1.modified_count and r2.modified_count:
data = {"msg": gettext("Success"), "msg_type": "s", "custom_status": 201}
else:
data = {"msg": gettext("Failed"), "msg_type": "w", "custom_status": 400}
return data
| #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : <NAME>
from bson.objectid import ObjectId
from flask import request
from flask_babel import gettext
from flask_login import current_user
from apps.app import mdbs
from apps.modules.post.process.post_statistical import post_pv
from apps.modules.post.process.post_process import get_posts_pr, get_post_pr
from apps.core.utils.get_config import get_config
from apps.utils.format.obj_format import json_to_pyseq, str_to_num
def get_post(post_id=None):
if not post_id:
post_id = request.argget.all('post_id')
post_pv(post_id)
data = get_post_pr(post_id=post_id)
return data
def get_posts(page=None):
if not page:
page = str_to_num(request.argget.all('page', 1))
pre = str_to_num(request.argget.all('pre', get_config("post", "NUM_PAGE")))
sort = json_to_pyseq(request.argget.all('sort'))
status = request.argget.all('status', 'is_issued')
matching_rec = request.argget.all('matching_rec')
time_range = int(request.argget.all('time_range', 0))
keyword = request.argget.all('keyword', '').strip()
fields = json_to_pyseq(request.argget.all('fields'))
unwanted_fields = json_to_pyseq(request.argget.all('unwanted_fields'))
user_id = request.argget.all('user_id')
category_id = request.argget.all('category_id')
tag = request.argget.all('tag')
# 不能同时使用fields 和 unwanted_fields
temp_field = {}
if fields:
for f in fields:
temp_field[f] = 1
elif unwanted_fields:
for f in unwanted_fields:
temp_field[f] = 0
other_filter = {}
if user_id:
# 获取指定用户的post
other_filter["user_id"] = user_id
# 如果category_id为None, 则获取全部分类文章
if category_id:
try:
ObjectId(category_id)
# 指定分类
other_filter["category"] = category_id
except BaseException:
# 默认文集
other_filter["category"] = None
if tag:
other_filter["tags"] = tag
data = get_posts_pr(
page=page,
field=temp_field,
pre=pre,
sort=sort,
status=status,
time_range=time_range,
matching_rec=matching_rec,
keyword=keyword,
other_filter=other_filter)
return data
def post_like():
tid = request.argget.all('id')
like = mdbs["user"].db.user_like.find_one(
{"user_id": current_user.str_id, "type": "post"})
if not like:
user_like = {
"values": [],
"type": "post",
"user_id": current_user.str_id
}
mdbs["user"].db.user_like.insert_one(user_like)
r1 = mdbs["user"].db.user_like.update_one(
{"user_id": current_user.str_id, "type": "post"}, {"$addToSet": {"values": tid}})
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": 1}, "$addToSet": {"like_user_id": current_user.str_id}})
else:
if tid in like["values"]:
like["values"].remove(tid)
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": -1}, "$pull": {"like_user_id": current_user.str_id}})
else:
like["values"].append(tid)
r2 = mdbs["web"].db.post.update_one({"_id": ObjectId(tid)}, {
"$inc": {"like": 1}, "$addToSet": {"like_user_id": current_user.str_id}})
r1 = mdbs["user"].db.user_like.update_one({"user_id": current_user.str_id, "type": "post"},
{"$set": {"values": like["values"]}})
if r1.modified_count and r2.modified_count:
data = {"msg": gettext("Success"), "msg_type": "s", "custom_status": 201}
else:
data = {"msg": gettext("Failed"), "msg_type": "w", "custom_status": 400}
return data
| zh | 0.740143 | #!/usr/bin/env python # -*-coding:utf-8-*- # @Time : 2017/11/1 ~ 2019/9/1 # @Author : <NAME> # 不能同时使用fields 和 unwanted_fields # 获取指定用户的post # 如果category_id为None, 则获取全部分类文章 # 指定分类 # 默认文集 | 2.136978 | 2 |
src/bs_processors/utils/file_util.py | RaduW/bs-processors | 1 | 6630884 | """
Utilities for applying processors to files
"""
import shutil
from fnmatch import fnmatch
from typing import Callable, List, Any, Sequence
from bs4 import BeautifulSoup
import os
from os import path, walk
import logging
import re
_log = logging.getLogger("bs-processors")
def process_directory(processor: Callable[[List[Any]], List[Any]], parser_type: str,
input_dir: str, output_dir: str,
file_selector):
"""
Processes a directory with the specified processor
* **processor**: a file processor
* **parser_type**: processor 'html.parser', 'html', 'xml' ( BeautifulSoup parser)
* **input_dir**: the input directory
* **output_dir**: the output directory
* **file_selector**: something that can be transformed into a file_name predicate
if the predicate is true than the file will be processed if not the file will be
copied from input dir to output dir, see `to_file_selector_predicate` for details
about the file selector.
"""
file_selector = to_file_selector_predicate(file_selector)
for dirpath, dirnames, filenames in walk(input_dir):
rel_path = dirpath[len(input_dir):]
if len(rel_path) > 0 and rel_path[0] == path.sep:
rel_path= rel_path[1:] # remove start '/'
current_output_dir = path.join(output_dir, rel_path)
if not path.exists(current_output_dir):
os.makedirs(current_output_dir)
for fname in filenames:
input_fname = path.join(dirpath, fname)
output_fname = path.join(current_output_dir, fname)
if file_selector(input_fname):
_log.debug(f"processing '{input_fname}' into '{output_fname}'")
process_file(processor, parser_type, input_fname, output_fname)
else:
_log.debug(f"copying '{input_fname}' into '{output_fname}'")
shutil.copy(input_fname, output_fname)
def process_file(processor: Callable[[List[Any]], List[Any]], parser_type: str, input_file: str, output_file: str):
"""
Processes a file with the passed processor and saves the result in the output file
* **processor**: the processor to be applied
* **parser_type**: BeautifulSoup parser type ( 'html', 'xml', 'html.parser', etc)
* **input_file**: the input file name
* **output_file**: the result file name
"""
with open(input_file, "rt") as f:
soup = BeautifulSoup(f, parser_type)
result = processor([soup])
output_len = len(result)
if output_len == 0:
_log.warning(f"processing '{input_file}' did NOT generate any output")
return
if output_len > 1:
_log.warning(f"processing '{input_file}' generated multiple output elements saving only the first one")
result = result[0]
if result.name != '[document]':
_log.warning(f"processing '{input_file}' did not yield a beautiful soup element creating one")
soup = BeautifulSoup(features=parser_type)
result = soup.append(result)
directory_name, f_name = path.split(output_file)
if not path.exists(directory_name):
os.makedirs(directory_name)
with open(output_file, "wt") as f:
f.write(result.prettify())
def process_html_file(processor: Callable[[List[Any]], List[Any]], input_file: str, output_file: str):
process_file(processor, 'html.parser', input_file, output_file)
def to_file_selector_predicate(pred):
"""
Creates a file selector predicate from a variety of arguments
* **pred**: something that can be transformed in a file name predicate
* None: will match everything
* a str: will be interpreted as a unix file pattern (e.g. *.txt )
* a sequence: will be interpreted as a sequence of unix file patterns
(e.g. [*.txt, *.py]
* a regular expression, will create a predicate with re.fullmath (i.e. full match
of the full file name)
* a predicate that takes a string (the full file name)
* **return**: a file name predicate
>>> pm = to_file_selector_predicate('*.txt')
>>> pm('abc/def.txt')
True
>>> pm('abc/def.doc')
False
>>> pm = to_file_selector_predicate(['*.txt', '*.doc'])
>>> pm('abc/def.doc')
True
>>> pm('abc/def.txt')
True
>>> pm('abc/def.tt')
False
>>> pm = to_file_selector_predicate(re.compile("(abc)|(def+)"))
>>> pm("abc")
True
>>> pm("abcd")
False
>>> pm("def")
True
>>> pm("deffff")
True
>>> pm("something")
False
>>> pm = to_file_selector_predicate(lambda x: x.endswith("txt"))
>>> pm("abc.txt")
True
>>> pm("abc.tt")
False
"""
if pred is None:
return True # select everything
# pred is a string
if isinstance(pred, str):
return pattern_match_pred([pred])
# pred is a list like object
elif isinstance(pred, (tuple, list, set, frozenset)):
return pattern_match_pred(pred)
# pred is a regex
elif isinstance(pred, re.Pattern):
return lambda fname: pred.fullmatch(fname) is not None
# pred must be a predicate, use it as is
else:
return pred
def pattern_match_pred(patterns: Sequence[str]) -> Callable[[str], bool]:
"""
Creates a unix file pattern match predicate from a sequence of patterns
* **patterns**: sequence of patterns
* **return**: predicate
>>> pm = pattern_match_pred(["*.exe", "*.txt", "*.do?"])
>>> pm( "abc.txt")
True
>>> pm( "User/bubu/xyz.txt")
True
>>> pm( "abc.txta")
False
>>> pm('abc.exe')
True
>>> pm('abc.ex')
False
>>> pm('abc.doc')
True
"""
def inner(file_name: str) -> bool:
for pattern in patterns:
if fnmatch(file_name, pattern):
return True
return False
return inner
| """
Utilities for applying processors to files
"""
import shutil
from fnmatch import fnmatch
from typing import Callable, List, Any, Sequence
from bs4 import BeautifulSoup
import os
from os import path, walk
import logging
import re
_log = logging.getLogger("bs-processors")
def process_directory(processor: Callable[[List[Any]], List[Any]], parser_type: str,
input_dir: str, output_dir: str,
file_selector):
"""
Processes a directory with the specified processor
* **processor**: a file processor
* **parser_type**: processor 'html.parser', 'html', 'xml' ( BeautifulSoup parser)
* **input_dir**: the input directory
* **output_dir**: the output directory
* **file_selector**: something that can be transformed into a file_name predicate
if the predicate is true than the file will be processed if not the file will be
copied from input dir to output dir, see `to_file_selector_predicate` for details
about the file selector.
"""
file_selector = to_file_selector_predicate(file_selector)
for dirpath, dirnames, filenames in walk(input_dir):
rel_path = dirpath[len(input_dir):]
if len(rel_path) > 0 and rel_path[0] == path.sep:
rel_path= rel_path[1:] # remove start '/'
current_output_dir = path.join(output_dir, rel_path)
if not path.exists(current_output_dir):
os.makedirs(current_output_dir)
for fname in filenames:
input_fname = path.join(dirpath, fname)
output_fname = path.join(current_output_dir, fname)
if file_selector(input_fname):
_log.debug(f"processing '{input_fname}' into '{output_fname}'")
process_file(processor, parser_type, input_fname, output_fname)
else:
_log.debug(f"copying '{input_fname}' into '{output_fname}'")
shutil.copy(input_fname, output_fname)
def process_file(processor: Callable[[List[Any]], List[Any]], parser_type: str, input_file: str, output_file: str):
"""
Processes a file with the passed processor and saves the result in the output file
* **processor**: the processor to be applied
* **parser_type**: BeautifulSoup parser type ( 'html', 'xml', 'html.parser', etc)
* **input_file**: the input file name
* **output_file**: the result file name
"""
with open(input_file, "rt") as f:
soup = BeautifulSoup(f, parser_type)
result = processor([soup])
output_len = len(result)
if output_len == 0:
_log.warning(f"processing '{input_file}' did NOT generate any output")
return
if output_len > 1:
_log.warning(f"processing '{input_file}' generated multiple output elements saving only the first one")
result = result[0]
if result.name != '[document]':
_log.warning(f"processing '{input_file}' did not yield a beautiful soup element creating one")
soup = BeautifulSoup(features=parser_type)
result = soup.append(result)
directory_name, f_name = path.split(output_file)
if not path.exists(directory_name):
os.makedirs(directory_name)
with open(output_file, "wt") as f:
f.write(result.prettify())
def process_html_file(processor: Callable[[List[Any]], List[Any]], input_file: str, output_file: str):
process_file(processor, 'html.parser', input_file, output_file)
def to_file_selector_predicate(pred):
"""
Creates a file selector predicate from a variety of arguments
* **pred**: something that can be transformed in a file name predicate
* None: will match everything
* a str: will be interpreted as a unix file pattern (e.g. *.txt )
* a sequence: will be interpreted as a sequence of unix file patterns
(e.g. [*.txt, *.py]
* a regular expression, will create a predicate with re.fullmath (i.e. full match
of the full file name)
* a predicate that takes a string (the full file name)
* **return**: a file name predicate
>>> pm = to_file_selector_predicate('*.txt')
>>> pm('abc/def.txt')
True
>>> pm('abc/def.doc')
False
>>> pm = to_file_selector_predicate(['*.txt', '*.doc'])
>>> pm('abc/def.doc')
True
>>> pm('abc/def.txt')
True
>>> pm('abc/def.tt')
False
>>> pm = to_file_selector_predicate(re.compile("(abc)|(def+)"))
>>> pm("abc")
True
>>> pm("abcd")
False
>>> pm("def")
True
>>> pm("deffff")
True
>>> pm("something")
False
>>> pm = to_file_selector_predicate(lambda x: x.endswith("txt"))
>>> pm("abc.txt")
True
>>> pm("abc.tt")
False
"""
if pred is None:
return True # select everything
# pred is a string
if isinstance(pred, str):
return pattern_match_pred([pred])
# pred is a list like object
elif isinstance(pred, (tuple, list, set, frozenset)):
return pattern_match_pred(pred)
# pred is a regex
elif isinstance(pred, re.Pattern):
return lambda fname: pred.fullmatch(fname) is not None
# pred must be a predicate, use it as is
else:
return pred
def pattern_match_pred(patterns: Sequence[str]) -> Callable[[str], bool]:
"""
Creates a unix file pattern match predicate from a sequence of patterns
* **patterns**: sequence of patterns
* **return**: predicate
>>> pm = pattern_match_pred(["*.exe", "*.txt", "*.do?"])
>>> pm( "abc.txt")
True
>>> pm( "User/bubu/xyz.txt")
True
>>> pm( "abc.txta")
False
>>> pm('abc.exe')
True
>>> pm('abc.ex')
False
>>> pm('abc.doc')
True
"""
def inner(file_name: str) -> bool:
for pattern in patterns:
if fnmatch(file_name, pattern):
return True
return False
return inner
| en | 0.665602 | Utilities for applying processors to files Processes a directory with the specified processor * **processor**: a file processor * **parser_type**: processor 'html.parser', 'html', 'xml' ( BeautifulSoup parser) * **input_dir**: the input directory * **output_dir**: the output directory * **file_selector**: something that can be transformed into a file_name predicate if the predicate is true than the file will be processed if not the file will be copied from input dir to output dir, see `to_file_selector_predicate` for details about the file selector. # remove start '/' Processes a file with the passed processor and saves the result in the output file * **processor**: the processor to be applied * **parser_type**: BeautifulSoup parser type ( 'html', 'xml', 'html.parser', etc) * **input_file**: the input file name * **output_file**: the result file name Creates a file selector predicate from a variety of arguments * **pred**: something that can be transformed in a file name predicate * None: will match everything * a str: will be interpreted as a unix file pattern (e.g. *.txt ) * a sequence: will be interpreted as a sequence of unix file patterns (e.g. [*.txt, *.py] * a regular expression, will create a predicate with re.fullmath (i.e. full match of the full file name) * a predicate that takes a string (the full file name) * **return**: a file name predicate >>> pm = to_file_selector_predicate('*.txt') >>> pm('abc/def.txt') True >>> pm('abc/def.doc') False >>> pm = to_file_selector_predicate(['*.txt', '*.doc']) >>> pm('abc/def.doc') True >>> pm('abc/def.txt') True >>> pm('abc/def.tt') False >>> pm = to_file_selector_predicate(re.compile("(abc)|(def+)")) >>> pm("abc") True >>> pm("abcd") False >>> pm("def") True >>> pm("deffff") True >>> pm("something") False >>> pm = to_file_selector_predicate(lambda x: x.endswith("txt")) >>> pm("abc.txt") True >>> pm("abc.tt") False # select everything # pred is a string # pred is a list like object # pred is a regex # pred must be a predicate, use it as is Creates a unix file pattern match predicate from a sequence of patterns * **patterns**: sequence of patterns * **return**: predicate >>> pm = pattern_match_pred(["*.exe", "*.txt", "*.do?"]) >>> pm( "abc.txt") True >>> pm( "User/bubu/xyz.txt") True >>> pm( "abc.txta") False >>> pm('abc.exe') True >>> pm('abc.ex') False >>> pm('abc.doc') True | 3.236342 | 3 |
Problemset/beautiful-arrangement-ii/beautiful-arrangement-ii.py | KivenCkl/LeetCode | 7 | 6630885 |
# @Title: 优美的排列 II (Beautiful Arrangement II)
# @Author: KivenC
# @Date: 2018-08-05 20:45:40
# @Runtime: 88 ms
# @Memory: N/A
class Solution:
def constructArray(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
if n <= 1 or k < 1 or n <= k:
return []
res = [1]
for i, val in enumerate(range(k, 0, -1)):
res.append(res[-1]+(-1)**i*val)
for i in range(k+2, n+1):
res.append(i)
return res
|
# @Title: 优美的排列 II (Beautiful Arrangement II)
# @Author: KivenC
# @Date: 2018-08-05 20:45:40
# @Runtime: 88 ms
# @Memory: N/A
class Solution:
def constructArray(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
if n <= 1 or k < 1 or n <= k:
return []
res = [1]
for i, val in enumerate(range(k, 0, -1)):
res.append(res[-1]+(-1)**i*val)
for i in range(k+2, n+1):
res.append(i)
return res
| en | 0.331698 | # @Title: 优美的排列 II (Beautiful Arrangement II) # @Author: KivenC # @Date: 2018-08-05 20:45:40 # @Runtime: 88 ms # @Memory: N/A :type n: int :type k: int :rtype: List[int] | 3.011672 | 3 |
setup.py | sonthonaxrk/flexmock | 0 | 6630886 | import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='flexmock',
version='0.10.4',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='http://flexmock.readthedocs.org',
license='BSD License',
py_modules=['flexmock'],
description='flexmock is a testing library for Python that makes it easy to create mocks,'
'stubs and fakes.',
long_description=codecs.open('README.rst', encoding='utf8').read(),
keywords='flexmock mock stub test unittest pytest nose',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
],
)
| import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='flexmock',
version='0.10.4',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='http://flexmock.readthedocs.org',
license='BSD License',
py_modules=['flexmock'],
description='flexmock is a testing library for Python that makes it easy to create mocks,'
'stubs and fakes.',
long_description=codecs.open('README.rst', encoding='utf8').read(),
keywords='flexmock mock stub test unittest pytest nose',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
],
)
| none | 1 | 1.473581 | 1 |
|
3 Multi Face Recognition using a WebCam.py | cyanamous/Computer-Vision-Playground | 1 | 6630887 | import cv2
video = cv2.VideoCapture(0)
#Cascade
face_cascade = cv2.CascadeClassifier("./haarcascade_frontalface_default.xml")
a = 1
while True:
a = a + 1
check, frame = video.read()
print(frame)
print(check)
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces= face_cascade.detectMultiScale(gray_img, scaleFactor = 1.05, minNeighbors=5)
x,y,w,h = faces[0]
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
for x,y,w,h in faces:
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
# x,y,w,h = faces[0]
# img = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3)
cv2.imshow("Video",img)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
| import cv2
video = cv2.VideoCapture(0)
#Cascade
face_cascade = cv2.CascadeClassifier("./haarcascade_frontalface_default.xml")
a = 1
while True:
a = a + 1
check, frame = video.read()
print(frame)
print(check)
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces= face_cascade.detectMultiScale(gray_img, scaleFactor = 1.05, minNeighbors=5)
x,y,w,h = faces[0]
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
for x,y,w,h in faces:
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
# x,y,w,h = faces[0]
# img = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3)
cv2.imshow("Video",img)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
| en | 0.348867 | #Cascade # x,y,w,h = faces[0] # img = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3) | 2.895485 | 3 |
Code/hash-table/hashtable.py | abrusebas1997/CS-1.3-Core-Data-Structures | 0 | 6630888 | <filename>Code/hash-table/hashtable.py
#!python
from linkedlist import LinkedList
class HashTable(object):
def __init__(self, init_size=8):
"""Initialize this hash table with the given initial size."""
self.buckets = [LinkedList() for i in range(init_size)]
self.size = 0 # Number of key-value entries
def __str__(self):
"""Return a formatted string representation of this hash table."""
items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]
return '{' + ', '.join(items) + '}'
def __repr__(self):
"""Return a string representation of this hash table."""
return 'HashTable({!r})'.format(self.items())
def _bucket_index(self, key):
"""Return the bucket index where the given key would be stored."""
return hash(key) % len(self.buckets)
def load_factor(self):
"""Return the load factor, the ratio of number of entries to buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# TODO: Calculate load factor
# return ...
# load factor = entries/ buckets
return self.size / len(self.buckets)
def keys(self):
"""Return a list of all keys in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all keys in each of the buckets
all_keys = []
for bucket in self.buckets:
for key, value in bucket.items():
all_keys.append(key)
return all_keys
def values(self):
"""Return a list of all values in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all values in each of the buckets
all_values = []
for bucket in self.buckets:
for key, value in bucket.items():
all_values.append(value)
return all_values
def items(self):
"""Return a list of all entries (key-value pairs) in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all pairs of key-value entries in each of the buckets
all_items = []
for bucket in self.buckets:
all_items.extend(bucket.items())
return all_items
def length(self):
"""Return the number of key-value entries by traversing its buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Count number of key-value entries in each of the buckets
item_count = 0
for bucket in self.buckets:
item_count += bucket.length()
return item_count
# Equivalent to this list comprehension:
return sum(bucket.length() for bucket in self.buckets)
def contains(self, key):
"""Return True if this hash table contains the given key, or False.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
return entry is not None # True or False
def get(self, key):
"""Return the value associated with the given key, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Return the given key's associated value
assert isinstance(entry, tuple)
assert len(entry) == 2
return entry[1]
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def set(self, key, value):
"""Insert or update the given key with its associated value.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# In this case, the given key's value is being updated
# Remove the old key-value entry from the bucket first
bucket.delete(entry)
self.size -= 1
# Insert the new key-value entry into the bucket in either case
bucket.append((key, value))
self.size += 1
# TODO: Check if the load factor exceeds a threshold such as 0.75
# ...
if self.load_factor() > 0.75:
# TODO: If so, automatically resize to reduce the load factor
# ...
self._resize()
def delete(self, key):
"""Delete the given key and its associated value, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Remove the key-value entry from the bucket
bucket.delete(entry)
self.size -= 1
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def _resize(self, new_size=None):
"""Resize this hash table's buckets and rehash all key-value entries.
Should be called automatically when load factor exceeds a threshold
such as 0.75 after an insertion (when set is called with a new key).
Best and worst case running time: ??? under what conditions? [TODO]
Best and worst case space usage: ??? what uses this memory? [TODO]"""
# If unspecified, choose new size dynamically based on current size
if new_size is None:
new_size = len(self.buckets) * 2 # Double size
# Option to reduce size if buckets are sparsely filled (low load factor)
elif new_size is 0:
new_size = len(self.buckets) / 2 # Half size
# TODO: Get a list to temporarily hold all current key-value entries
# ...
temporarily = self.items()
# TODO: Create a new list of new_size total empty linked list buckets
# ...
self.buckets = [LinkedList() for i in range(new_size)]
self.size = 0
# TODO: Insert each key-value entry into the new list of buckets,
# which will rehash them into a new bucket index based on the new size
# ...
for key, value in old_items:
self.set(key, value)
def test_hash_table():
ht = HashTable(4)
print('HashTable: ' + str(ht))
print('Setting entries:')
ht.set('I', 1)
print('set(I, 1): ' + str(ht))
ht.set('V', 5)
print('set(V, 5): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
ht.set('X', 10)
print('set(X, 10): ' + str(ht))
ht.set('L', 50) # Should trigger resize
print('set(L, 50): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
print('Getting entries:')
print('get(I): ' + str(ht.get('I')))
print('get(V): ' + str(ht.get('V')))
print('get(X): ' + str(ht.get('X')))
print('get(L): ' + str(ht.get('L')))
print('contains(X): ' + str(ht.contains('X')))
print('contains(Z): ' + str(ht.contains('Z')))
print('Deleting entries:')
ht.delete('I')
print('delete(I): ' + str(ht))
ht.delete('V')
print('delete(V): ' + str(ht))
ht.delete('X')
print('delete(X): ' + str(ht))
ht.delete('L')
print('delete(L): ' + str(ht))
print('contains(X): ' + str(ht.contains('X')))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
if __name__ == '__main__':
test_hash_table()
| <filename>Code/hash-table/hashtable.py
#!python
from linkedlist import LinkedList
class HashTable(object):
def __init__(self, init_size=8):
"""Initialize this hash table with the given initial size."""
self.buckets = [LinkedList() for i in range(init_size)]
self.size = 0 # Number of key-value entries
def __str__(self):
"""Return a formatted string representation of this hash table."""
items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]
return '{' + ', '.join(items) + '}'
def __repr__(self):
"""Return a string representation of this hash table."""
return 'HashTable({!r})'.format(self.items())
def _bucket_index(self, key):
"""Return the bucket index where the given key would be stored."""
return hash(key) % len(self.buckets)
def load_factor(self):
"""Return the load factor, the ratio of number of entries to buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# TODO: Calculate load factor
# return ...
# load factor = entries/ buckets
return self.size / len(self.buckets)
def keys(self):
"""Return a list of all keys in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all keys in each of the buckets
all_keys = []
for bucket in self.buckets:
for key, value in bucket.items():
all_keys.append(key)
return all_keys
def values(self):
"""Return a list of all values in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all values in each of the buckets
all_values = []
for bucket in self.buckets:
for key, value in bucket.items():
all_values.append(value)
return all_values
def items(self):
"""Return a list of all entries (key-value pairs) in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all pairs of key-value entries in each of the buckets
all_items = []
for bucket in self.buckets:
all_items.extend(bucket.items())
return all_items
def length(self):
"""Return the number of key-value entries by traversing its buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Count number of key-value entries in each of the buckets
item_count = 0
for bucket in self.buckets:
item_count += bucket.length()
return item_count
# Equivalent to this list comprehension:
return sum(bucket.length() for bucket in self.buckets)
def contains(self, key):
"""Return True if this hash table contains the given key, or False.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
return entry is not None # True or False
def get(self, key):
"""Return the value associated with the given key, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Return the given key's associated value
assert isinstance(entry, tuple)
assert len(entry) == 2
return entry[1]
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def set(self, key, value):
"""Insert or update the given key with its associated value.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# In this case, the given key's value is being updated
# Remove the old key-value entry from the bucket first
bucket.delete(entry)
self.size -= 1
# Insert the new key-value entry into the bucket in either case
bucket.append((key, value))
self.size += 1
# TODO: Check if the load factor exceeds a threshold such as 0.75
# ...
if self.load_factor() > 0.75:
# TODO: If so, automatically resize to reduce the load factor
# ...
self._resize()
def delete(self, key):
"""Delete the given key and its associated value, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Remove the key-value entry from the bucket
bucket.delete(entry)
self.size -= 1
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def _resize(self, new_size=None):
"""Resize this hash table's buckets and rehash all key-value entries.
Should be called automatically when load factor exceeds a threshold
such as 0.75 after an insertion (when set is called with a new key).
Best and worst case running time: ??? under what conditions? [TODO]
Best and worst case space usage: ??? what uses this memory? [TODO]"""
# If unspecified, choose new size dynamically based on current size
if new_size is None:
new_size = len(self.buckets) * 2 # Double size
# Option to reduce size if buckets are sparsely filled (low load factor)
elif new_size is 0:
new_size = len(self.buckets) / 2 # Half size
# TODO: Get a list to temporarily hold all current key-value entries
# ...
temporarily = self.items()
# TODO: Create a new list of new_size total empty linked list buckets
# ...
self.buckets = [LinkedList() for i in range(new_size)]
self.size = 0
# TODO: Insert each key-value entry into the new list of buckets,
# which will rehash them into a new bucket index based on the new size
# ...
for key, value in old_items:
self.set(key, value)
def test_hash_table():
ht = HashTable(4)
print('HashTable: ' + str(ht))
print('Setting entries:')
ht.set('I', 1)
print('set(I, 1): ' + str(ht))
ht.set('V', 5)
print('set(V, 5): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
ht.set('X', 10)
print('set(X, 10): ' + str(ht))
ht.set('L', 50) # Should trigger resize
print('set(L, 50): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
print('Getting entries:')
print('get(I): ' + str(ht.get('I')))
print('get(V): ' + str(ht.get('V')))
print('get(X): ' + str(ht.get('X')))
print('get(L): ' + str(ht.get('L')))
print('contains(X): ' + str(ht.contains('X')))
print('contains(Z): ' + str(ht.contains('Z')))
print('Deleting entries:')
ht.delete('I')
print('delete(I): ' + str(ht))
ht.delete('V')
print('delete(V): ' + str(ht))
ht.delete('X')
print('delete(X): ' + str(ht))
ht.delete('L')
print('delete(L): ' + str(ht))
print('contains(X): ' + str(ht.contains('X')))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
if __name__ == '__main__':
test_hash_table()
| en | 0.825442 | #!python Initialize this hash table with the given initial size. # Number of key-value entries Return a formatted string representation of this hash table. Return a string representation of this hash table. Return the bucket index where the given key would be stored. Return the load factor, the ratio of number of entries to buckets. Best and worst case running time: ??? under what conditions? [TODO] # TODO: Calculate load factor # return ... # load factor = entries/ buckets Return a list of all keys in this hash table. Best and worst case running time: ??? under what conditions? [TODO] # Collect all keys in each of the buckets Return a list of all values in this hash table. Best and worst case running time: ??? under what conditions? [TODO] # Collect all values in each of the buckets Return a list of all entries (key-value pairs) in this hash table. Best and worst case running time: ??? under what conditions? [TODO] # Collect all pairs of key-value entries in each of the buckets Return the number of key-value entries by traversing its buckets. Best and worst case running time: ??? under what conditions? [TODO] # Count number of key-value entries in each of the buckets # Equivalent to this list comprehension: Return True if this hash table contains the given key, or False. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO] # Find the bucket the given key belongs in # Check if an entry with the given key exists in that bucket # True or False Return the value associated with the given key, or raise KeyError. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO] # Find the bucket the given key belongs in # Find the entry with the given key in that bucket, if one exists # Found # Return the given key's associated value # Not found Insert or update the given key with its associated value. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO] # Find the bucket the given key belongs in # Find the entry with the given key in that bucket, if one exists # Check if an entry with the given key exists in that bucket # Found # In this case, the given key's value is being updated # Remove the old key-value entry from the bucket first # Insert the new key-value entry into the bucket in either case # TODO: Check if the load factor exceeds a threshold such as 0.75 # ... # TODO: If so, automatically resize to reduce the load factor # ... Delete the given key and its associated value, or raise KeyError. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO] # Find the bucket the given key belongs in # Find the entry with the given key in that bucket, if one exists # Found # Remove the key-value entry from the bucket # Not found Resize this hash table's buckets and rehash all key-value entries. Should be called automatically when load factor exceeds a threshold such as 0.75 after an insertion (when set is called with a new key). Best and worst case running time: ??? under what conditions? [TODO] Best and worst case space usage: ??? what uses this memory? [TODO] # If unspecified, choose new size dynamically based on current size # Double size # Option to reduce size if buckets are sparsely filled (low load factor) # Half size # TODO: Get a list to temporarily hold all current key-value entries # ... # TODO: Create a new list of new_size total empty linked list buckets # ... # TODO: Insert each key-value entry into the new list of buckets, # which will rehash them into a new bucket index based on the new size # ... # Should trigger resize | 4.082914 | 4 |
malaya/segmentation.py | ahmed3991/malaya | 1 | 6630889 | import json
import re
from functools import lru_cache
from math import log10
from malaya.text.regex import _expressions
from malaya.model.tf import Segmentation
from malaya.path import PATH_PREPROCESSING, S3_PATH_PREPROCESSING
from malaya.supervised import transformer as load_transformer
from malaya.function import check_file
from herpetologist import check_type
from typing import List
_transformer_availability = {
'small': {
'Size (MB)': 42.7,
'Quantized Size (MB)': 13.1,
'Sequence Accuracy': 0.8217,
},
'base': {
'Size (MB)': 234,
'Quantized Size (MB)': 63.8,
'Sequence Accuracy': 0.8759,
},
}
REGEX_TOKEN = re.compile(r'\b[a-z]{2,}\b')
NGRAM_SEP = '_'
def _read_stats(gram = 1):
try:
with open(PATH_PREPROCESSING[gram]['model']) as fopen:
return json.load(fopen)
except Exception as e:
raise Exception(
f"{e}, file corrupted due to some reasons, please run `malaya.clear_cache('preprocessing')` and try again"
)
class _Pdist(dict):
@staticmethod
def default_unk_func(key, total):
return 1.0 / total
def __init__(self, data = None, total = None, unk_func = None, **kwargs):
super().__init__(**kwargs)
data = data or {}
for key, count in data.items():
self[key] = self.get(key, 0) + int(count)
self.total = float(total or sum(self.values()))
self.unk_prob = unk_func or self.default_unk_func
def __call__(self, key):
if key in self:
return self[key] / self.total
else:
return self.unk_prob(key, self.total)
class Segmenter:
def __init__(self, max_split_length = 20):
self.unigrams = _read_stats(1)
self.bigrams = _read_stats(2)
self.N = sum(self.unigrams.values())
self.L = max_split_length
self.Pw = _Pdist(self.unigrams, self.N, self.unk_probability)
self.P2w = _Pdist(self.bigrams, self.N)
self.case_split = re.compile(_expressions['camel_split'])
def condProbWord(self, word, prev):
try:
return self.P2w[prev + NGRAM_SEP + word] / float(self.Pw[prev])
except KeyError:
return self.Pw(word)
@staticmethod
def unk_probability(key, total):
return 10.0 / (total * 10 ** len(key))
@staticmethod
def combine(first, rem):
(first_prob, first_word) = first
(rem_prob, rem_words) = rem
return first_prob + rem_prob, [first_word] + rem_words
def splits(self, text):
return [
(text[: i + 1], text[i + 1 :])
for i in range(min(len(text), self.L))
]
@lru_cache(maxsize = 65536)
def find_segment(self, text, prev = '<S>'):
if not text:
return 0.0, []
candidates = [
self.combine(
(log10(self.condProbWord(first, prev)), first),
self.find_segment(rem, first),
)
for first, rem in self.splits(text)
]
return max(candidates)
@lru_cache(maxsize = 65536)
def _segment(self, word):
if word.islower():
return ' '.join(self.find_segment(word)[1])
else:
return self.case_split.sub(r' \1', word)
@check_type
def segment(self, strings: List[str]):
"""
Segment strings.
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
results = []
for string in strings:
string = string.split()
result = []
for word in string:
result.append(self._segment(word))
results.append(' '.join(result))
return results
def viterbi(max_split_length: int = 20, **kwargs):
"""
Load Segmenter class using viterbi algorithm.
Parameters
----------
max_split_length: int, (default=20)
max length of words in a sentence to segment
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
result : malaya.segmentation.Segmenter class
"""
check_file(PATH_PREPROCESSING[1], S3_PATH_PREPROCESSING[1], **kwargs)
check_file(PATH_PREPROCESSING[2], S3_PATH_PREPROCESSING[2], **kwargs)
return Segmenter(max_split_length = max_split_length)
def available_transformer():
"""
List available transformer models.
"""
from malaya.function import describe_availability
return describe_availability(_transformer_availability)
@check_type
def transformer(model: str = 'small', quantized: bool = False, **kwargs):
"""
Load transformer encoder-decoder model to Segmentize.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'small'`` - Transformer SMALL parameters.
* ``'base'`` - Transformer BASE parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.Segmentation class
"""
model = model.lower()
if model not in _transformer_availability:
raise Exception(
'model not supported, please check supported models from `malaya.segmentation.available_transformer()`.'
)
return load_transformer.load(
module = 'segmentation',
model = model,
encoder = 'yttm',
model_class = Segmentation,
quantized = quantized,
**kwargs,
)
| import json
import re
from functools import lru_cache
from math import log10
from malaya.text.regex import _expressions
from malaya.model.tf import Segmentation
from malaya.path import PATH_PREPROCESSING, S3_PATH_PREPROCESSING
from malaya.supervised import transformer as load_transformer
from malaya.function import check_file
from herpetologist import check_type
from typing import List
_transformer_availability = {
'small': {
'Size (MB)': 42.7,
'Quantized Size (MB)': 13.1,
'Sequence Accuracy': 0.8217,
},
'base': {
'Size (MB)': 234,
'Quantized Size (MB)': 63.8,
'Sequence Accuracy': 0.8759,
},
}
REGEX_TOKEN = re.compile(r'\b[a-z]{2,}\b')
NGRAM_SEP = '_'
def _read_stats(gram = 1):
try:
with open(PATH_PREPROCESSING[gram]['model']) as fopen:
return json.load(fopen)
except Exception as e:
raise Exception(
f"{e}, file corrupted due to some reasons, please run `malaya.clear_cache('preprocessing')` and try again"
)
class _Pdist(dict):
@staticmethod
def default_unk_func(key, total):
return 1.0 / total
def __init__(self, data = None, total = None, unk_func = None, **kwargs):
super().__init__(**kwargs)
data = data or {}
for key, count in data.items():
self[key] = self.get(key, 0) + int(count)
self.total = float(total or sum(self.values()))
self.unk_prob = unk_func or self.default_unk_func
def __call__(self, key):
if key in self:
return self[key] / self.total
else:
return self.unk_prob(key, self.total)
class Segmenter:
def __init__(self, max_split_length = 20):
self.unigrams = _read_stats(1)
self.bigrams = _read_stats(2)
self.N = sum(self.unigrams.values())
self.L = max_split_length
self.Pw = _Pdist(self.unigrams, self.N, self.unk_probability)
self.P2w = _Pdist(self.bigrams, self.N)
self.case_split = re.compile(_expressions['camel_split'])
def condProbWord(self, word, prev):
try:
return self.P2w[prev + NGRAM_SEP + word] / float(self.Pw[prev])
except KeyError:
return self.Pw(word)
@staticmethod
def unk_probability(key, total):
return 10.0 / (total * 10 ** len(key))
@staticmethod
def combine(first, rem):
(first_prob, first_word) = first
(rem_prob, rem_words) = rem
return first_prob + rem_prob, [first_word] + rem_words
def splits(self, text):
return [
(text[: i + 1], text[i + 1 :])
for i in range(min(len(text), self.L))
]
@lru_cache(maxsize = 65536)
def find_segment(self, text, prev = '<S>'):
if not text:
return 0.0, []
candidates = [
self.combine(
(log10(self.condProbWord(first, prev)), first),
self.find_segment(rem, first),
)
for first, rem in self.splits(text)
]
return max(candidates)
@lru_cache(maxsize = 65536)
def _segment(self, word):
if word.islower():
return ' '.join(self.find_segment(word)[1])
else:
return self.case_split.sub(r' \1', word)
@check_type
def segment(self, strings: List[str]):
"""
Segment strings.
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
results = []
for string in strings:
string = string.split()
result = []
for word in string:
result.append(self._segment(word))
results.append(' '.join(result))
return results
def viterbi(max_split_length: int = 20, **kwargs):
"""
Load Segmenter class using viterbi algorithm.
Parameters
----------
max_split_length: int, (default=20)
max length of words in a sentence to segment
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
result : malaya.segmentation.Segmenter class
"""
check_file(PATH_PREPROCESSING[1], S3_PATH_PREPROCESSING[1], **kwargs)
check_file(PATH_PREPROCESSING[2], S3_PATH_PREPROCESSING[2], **kwargs)
return Segmenter(max_split_length = max_split_length)
def available_transformer():
"""
List available transformer models.
"""
from malaya.function import describe_availability
return describe_availability(_transformer_availability)
@check_type
def transformer(model: str = 'small', quantized: bool = False, **kwargs):
"""
Load transformer encoder-decoder model to Segmentize.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'small'`` - Transformer SMALL parameters.
* ``'base'`` - Transformer BASE parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.Segmentation class
"""
model = model.lower()
if model not in _transformer_availability:
raise Exception(
'model not supported, please check supported models from `malaya.segmentation.available_transformer()`.'
)
return load_transformer.load(
module = 'segmentation',
model = model,
encoder = 'yttm',
model_class = Segmentation,
quantized = quantized,
**kwargs,
)
| en | 0.37357 | Segment strings. Example, "sayasygkan negarasaya" -> "saya sygkan negara saya" Parameters ---------- strings : List[str] Returns ------- result: List[str] Load Segmenter class using viterbi algorithm. Parameters ---------- max_split_length: int, (default=20) max length of words in a sentence to segment validate: bool, optional (default=True) if True, malaya will check model availability and download if not available. Returns ------- result : malaya.segmentation.Segmenter class List available transformer models. Load transformer encoder-decoder model to Segmentize. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: malaya.model.tf.Segmentation class | 2.009169 | 2 |
Mundo1/021.py | eliascastrosousa/exerciciospythonmundo1 | 0 | 6630890 | <filename>Mundo1/021.py
# desafio 021 - faça um programa em python que abra e reproduza o audio de um arquivo em MP3.
import pygame
pygame.init()
pygame.mixer.music.load('pedrapapel.mp3')
pygame.mixer.music.play()
while(pygame.mixer.music.get_busy()): pass | <filename>Mundo1/021.py
# desafio 021 - faça um programa em python que abra e reproduza o audio de um arquivo em MP3.
import pygame
pygame.init()
pygame.mixer.music.load('pedrapapel.mp3')
pygame.mixer.music.play()
while(pygame.mixer.music.get_busy()): pass | pt | 0.960386 | # desafio 021 - faça um programa em python que abra e reproduza o audio de um arquivo em MP3. | 3.463248 | 3 |
rman_operators/__init__.py | N500/RenderManForBlender | 5 | 6630891 | <gh_stars>1-10
from . import rman_operators_printer
from . import rman_operators_view3d
from . import rman_operators_render
from . import rman_operators_rib
from . import rman_operators_nodetree
from . import rman_operators_collections
from . import rman_operators_editors
from . import rman_operators_stylized
from . import rman_operators_mesh
def register():
rman_operators_printer.register()
rman_operators_view3d.register()
rman_operators_render.register()
rman_operators_rib.register()
rman_operators_nodetree.register()
rman_operators_collections.register()
rman_operators_editors.register()
rman_operators_stylized.register()
rman_operators_mesh.register()
def unregister():
rman_operators_printer.unregister()
rman_operators_view3d.unregister()
rman_operators_render.unregister()
rman_operators_rib.unregister()
rman_operators_nodetree.unregister()
rman_operators_collections.unregister()
rman_operators_editors.unregister()
rman_operators_stylized.unregister()
rman_operators_mesh.unregister() | from . import rman_operators_printer
from . import rman_operators_view3d
from . import rman_operators_render
from . import rman_operators_rib
from . import rman_operators_nodetree
from . import rman_operators_collections
from . import rman_operators_editors
from . import rman_operators_stylized
from . import rman_operators_mesh
def register():
rman_operators_printer.register()
rman_operators_view3d.register()
rman_operators_render.register()
rman_operators_rib.register()
rman_operators_nodetree.register()
rman_operators_collections.register()
rman_operators_editors.register()
rman_operators_stylized.register()
rman_operators_mesh.register()
def unregister():
rman_operators_printer.unregister()
rman_operators_view3d.unregister()
rman_operators_render.unregister()
rman_operators_rib.unregister()
rman_operators_nodetree.unregister()
rman_operators_collections.unregister()
rman_operators_editors.unregister()
rman_operators_stylized.unregister()
rman_operators_mesh.unregister() | none | 1 | 1.148152 | 1 |
|
python/solar.py | patrickmoffitt/Local-Solar-Noon | 1 | 6630892 | <filename>python/solar.py
#
# Created by <NAME> on 2019-02-04.
#
from math import sin, trunc
from time import gmtime, localtime, struct_time
from datetime import datetime, timedelta, timezone
class Solar:
# Days since Jan 1, 2000 12:00 UT
def epoch_2k_day(self, _query):
q = _query
tz = self.get_utc_offset()
query_date = datetime(q['tm']['tm_year'],
q['tm']['tm_mon'],
q['tm']['tm_mday'],
q['tm']['tm_hour'],
tzinfo=timezone(timedelta(minutes=tz['minutes'])))
epoch_year = datetime(2000, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
return (query_date - epoch_year) / timedelta(days=1)
# System of equations based upon Fourier analysis of a large MICA data set.
# Only valid from 2000 to 2050.
@staticmethod
def equation_of_time(_days):
cycle = round(_days / 365.25)
theta = 0.0172024 * (_days - 365.25 * cycle)
amp1 = 7.36303 - cycle * 0.00009
amp2 = 9.92465 - cycle * 0.00014
rho1 = 3.07892 - cycle * 0.00019
rho2 = -1.38995 + cycle * 0.00013
# Equation Of Time (EOT)
eot1 = amp1 * sin(1 * (theta + rho1))
eot2 = amp2 * sin(2 * (theta + rho2))
eot3 = 0.31730 * sin(3 * (theta - 0.94686))
eot4 = 0.21922 * sin(4 * (theta - 0.60716))
_eot = 0.00526 + eot1 + eot2 + eot3 + eot4 # minutes
return _eot
@staticmethod
def float_to_fixed(_float, width=2):
return round(_float, width)
@staticmethod
def get_query_now(_longitude):
_tm = struct_time(localtime())
return {
'tm': {
'tm_year': _tm.tm_year,
'tm_mon': _tm.tm_mon,
'tm_mday': _tm.tm_mday,
'tm_hour': _tm.tm_hour
},
'longitude': _longitude
}
# Get the local offset from UTC.
@staticmethod
def get_utc_offset():
utc = struct_time(gmtime())
local = struct_time(localtime())
day_ahead = local.tm_yday - utc.tm_yday
_days = day_ahead * 24
hours = local.tm_hour - utc.tm_hour + _days
minutes = local.tm_min - utc.tm_min
return {
'hours': hours + (minutes / 60.0),
'minutes': (hours * 60) + minutes,
'seconds': ((hours * 60) + minutes) * 60
}
# Adjust EOT for longitude and timezone.
def longitude_offset(self, _eot, _longitude):
tz = self.get_utc_offset()
return -1 * (_eot + (4 * _longitude) - tz['minutes'])
# Format decimal minutes to hours, minutes, and seconds text.
def minutes_to_clock(self, _time):
hours = trunc(_time / 60)
minutes = trunc(_time - (hours * 60))
seconds = round(abs(_time - (hours * 60) - minutes) * 60)
return {
'hours': self.zero_fill(hours, 2),
'minutes': self.zero_fill(minutes, 2),
'seconds': self.zero_fill(seconds, 2)
}
def solar_noon(self, longitude_offset):
noon_minutes = 12 * 60
t = self.minutes_to_clock(noon_minutes + longitude_offset)
return t['hours'] + ':' + t['minutes'] + ':' + t['seconds']
# Pad a number with leading zeros.
@staticmethod
def zero_fill(number, _width):
pad = _width - len(str(trunc(abs(number)))) + 1
return '{0:{fill}{width}}'.format(number, fill=0, width=pad)
| <filename>python/solar.py
#
# Created by <NAME> on 2019-02-04.
#
from math import sin, trunc
from time import gmtime, localtime, struct_time
from datetime import datetime, timedelta, timezone
class Solar:
# Days since Jan 1, 2000 12:00 UT
def epoch_2k_day(self, _query):
q = _query
tz = self.get_utc_offset()
query_date = datetime(q['tm']['tm_year'],
q['tm']['tm_mon'],
q['tm']['tm_mday'],
q['tm']['tm_hour'],
tzinfo=timezone(timedelta(minutes=tz['minutes'])))
epoch_year = datetime(2000, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
return (query_date - epoch_year) / timedelta(days=1)
# System of equations based upon Fourier analysis of a large MICA data set.
# Only valid from 2000 to 2050.
@staticmethod
def equation_of_time(_days):
cycle = round(_days / 365.25)
theta = 0.0172024 * (_days - 365.25 * cycle)
amp1 = 7.36303 - cycle * 0.00009
amp2 = 9.92465 - cycle * 0.00014
rho1 = 3.07892 - cycle * 0.00019
rho2 = -1.38995 + cycle * 0.00013
# Equation Of Time (EOT)
eot1 = amp1 * sin(1 * (theta + rho1))
eot2 = amp2 * sin(2 * (theta + rho2))
eot3 = 0.31730 * sin(3 * (theta - 0.94686))
eot4 = 0.21922 * sin(4 * (theta - 0.60716))
_eot = 0.00526 + eot1 + eot2 + eot3 + eot4 # minutes
return _eot
@staticmethod
def float_to_fixed(_float, width=2):
return round(_float, width)
@staticmethod
def get_query_now(_longitude):
_tm = struct_time(localtime())
return {
'tm': {
'tm_year': _tm.tm_year,
'tm_mon': _tm.tm_mon,
'tm_mday': _tm.tm_mday,
'tm_hour': _tm.tm_hour
},
'longitude': _longitude
}
# Get the local offset from UTC.
@staticmethod
def get_utc_offset():
utc = struct_time(gmtime())
local = struct_time(localtime())
day_ahead = local.tm_yday - utc.tm_yday
_days = day_ahead * 24
hours = local.tm_hour - utc.tm_hour + _days
minutes = local.tm_min - utc.tm_min
return {
'hours': hours + (minutes / 60.0),
'minutes': (hours * 60) + minutes,
'seconds': ((hours * 60) + minutes) * 60
}
# Adjust EOT for longitude and timezone.
def longitude_offset(self, _eot, _longitude):
tz = self.get_utc_offset()
return -1 * (_eot + (4 * _longitude) - tz['minutes'])
# Format decimal minutes to hours, minutes, and seconds text.
def minutes_to_clock(self, _time):
hours = trunc(_time / 60)
minutes = trunc(_time - (hours * 60))
seconds = round(abs(_time - (hours * 60) - minutes) * 60)
return {
'hours': self.zero_fill(hours, 2),
'minutes': self.zero_fill(minutes, 2),
'seconds': self.zero_fill(seconds, 2)
}
def solar_noon(self, longitude_offset):
noon_minutes = 12 * 60
t = self.minutes_to_clock(noon_minutes + longitude_offset)
return t['hours'] + ':' + t['minutes'] + ':' + t['seconds']
# Pad a number with leading zeros.
@staticmethod
def zero_fill(number, _width):
pad = _width - len(str(trunc(abs(number)))) + 1
return '{0:{fill}{width}}'.format(number, fill=0, width=pad)
| en | 0.755692 | # # Created by <NAME> on 2019-02-04. # # Days since Jan 1, 2000 12:00 UT # System of equations based upon Fourier analysis of a large MICA data set. # Only valid from 2000 to 2050. # Equation Of Time (EOT) # minutes # Get the local offset from UTC. # Adjust EOT for longitude and timezone. # Format decimal minutes to hours, minutes, and seconds text. # Pad a number with leading zeros. | 2.880506 | 3 |
tests/unit/test_events.py | jgu2/jade | 15 | 6630893 | <reponame>jgu2/jade<filename>tests/unit/test_events.py
"""
Unit tests for job event object and methods
"""
import os
from jade.events import (
StructuredLogEvent,
StructuredErrorLogEvent,
EventsSummary,
EVENT_NAME_UNHANDLED_ERROR,
)
def test_structured_event__create():
"""Test class initialization and methods"""
event = StructuredLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
country="Canada",
foo="foo info",
bar="bar info",
)
assert "timestamp" in str(event)
assert "source" in str(event)
assert "foo" in event.data
assert "bar" in event.data
def test_structured_error_event__create():
"""Test class initialization and methods"""
try:
raise Exception("test")
except Exception:
event = StructuredErrorLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
)
assert "timestamp" in str(event)
assert "error" in event.data
assert "filename" in event.data
assert "lineno" in event.data
def test_event_summary__show_events(test_data_dir, capsys):
"""Should print tabular events in terminal"""
event_dir = os.path.join(test_data_dir, "events", "job-outputs", "australia")
event_summary = EventsSummary(event_dir)
event_summary.show_events(EVENT_NAME_UNHANDLED_ERROR)
captured = capsys.readouterr()
assert "Exception" in captured.out
assert "australia" in captured.out
assert "united_states" not in captured.out
| """
Unit tests for job event object and methods
"""
import os
from jade.events import (
StructuredLogEvent,
StructuredErrorLogEvent,
EventsSummary,
EVENT_NAME_UNHANDLED_ERROR,
)
def test_structured_event__create():
"""Test class initialization and methods"""
event = StructuredLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
country="Canada",
foo="foo info",
bar="bar info",
)
assert "timestamp" in str(event)
assert "source" in str(event)
assert "foo" in event.data
assert "bar" in event.data
def test_structured_error_event__create():
"""Test class initialization and methods"""
try:
raise Exception("test")
except Exception:
event = StructuredErrorLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
)
assert "timestamp" in str(event)
assert "error" in event.data
assert "filename" in event.data
assert "lineno" in event.data
def test_event_summary__show_events(test_data_dir, capsys):
"""Should print tabular events in terminal"""
event_dir = os.path.join(test_data_dir, "events", "job-outputs", "australia")
event_summary = EventsSummary(event_dir)
event_summary.show_events(EVENT_NAME_UNHANDLED_ERROR)
captured = capsys.readouterr()
assert "Exception" in captured.out
assert "australia" in captured.out
assert "united_states" not in captured.out | en | 0.857021 | Unit tests for job event object and methods Test class initialization and methods Test class initialization and methods Should print tabular events in terminal | 2.708672 | 3 |
stock_portfolio/models/stock.py | tyler-fishbone/new_stock_portfolio | 0 | 6630894 | <reponame>tyler-fishbone/new_stock_portfolio<gh_stars>0
from sqlalchemy import (
Column,
Integer,
String,
DateTime,
ForeignKey,
)
from .meta import Base
from sqlalchemy.orm import relationship
from .association import association_table
# need to change this to stock info
class Stock(Base):
__tablename__ = 'stocks'
id = Column(Integer, primary_key = True)
account_id = relationship('Account', secondary = association_table, back_populates = 'stock_id')
symbol = Column(String, nullable=False, unique=True)
companyName = Column(String)
exchange = Column(String)
industry = Column(String)
website = Column(String)
description = Column(String)
CEO = Column(String)
issueType = Column(String)
sector = Column(String)
date = Column(DateTime) | from sqlalchemy import (
Column,
Integer,
String,
DateTime,
ForeignKey,
)
from .meta import Base
from sqlalchemy.orm import relationship
from .association import association_table
# need to change this to stock info
class Stock(Base):
__tablename__ = 'stocks'
id = Column(Integer, primary_key = True)
account_id = relationship('Account', secondary = association_table, back_populates = 'stock_id')
symbol = Column(String, nullable=False, unique=True)
companyName = Column(String)
exchange = Column(String)
industry = Column(String)
website = Column(String)
description = Column(String)
CEO = Column(String)
issueType = Column(String)
sector = Column(String)
date = Column(DateTime) | en | 0.948038 | # need to change this to stock info | 2.623194 | 3 |
mak-example/parse.py | ConnectedHomes/kafkatos3 | 6 | 6630895 | <reponame>ConnectedHomes/kafkatos3
#!/usr/bin/env python
import argparse
import sys
from kafkatos3.MessageArchiveKafka import MessageArchiveKafkaReader, MessageArchiveKafkaRecord, KafkaMessage
def main(argv):
parser = argparse.ArgumentParser(description='Example script to parse a MAK file', prog=argv[0])
parser.add_argument('file', help='filename to parse')
args = parser.parse_args(args=argv[1:])
bm = MessageArchiveKafkaReader(args.file)
header = bm.get_header()
print("File topic is " + header.get_topic())
print("File partition is " + str(header.get_partition()))
print("Staring offset is " + str(header.get_start_offset()))
print("File created at " + str(header.get_starttime()))
while bm.has_more_messages():
message = bm.read_message()
print("Processing message with offset: " + str(
message.offset) + ", key: " + message.key + ", value: " + message.value)
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
raise SystemExit(main(sys.argv))
if __name__ == '__main__':
entry_point()
| #!/usr/bin/env python
import argparse
import sys
from kafkatos3.MessageArchiveKafka import MessageArchiveKafkaReader, MessageArchiveKafkaRecord, KafkaMessage
def main(argv):
parser = argparse.ArgumentParser(description='Example script to parse a MAK file', prog=argv[0])
parser.add_argument('file', help='filename to parse')
args = parser.parse_args(args=argv[1:])
bm = MessageArchiveKafkaReader(args.file)
header = bm.get_header()
print("File topic is " + header.get_topic())
print("File partition is " + str(header.get_partition()))
print("Staring offset is " + str(header.get_start_offset()))
print("File created at " + str(header.get_starttime()))
while bm.has_more_messages():
message = bm.read_message()
print("Processing message with offset: " + str(
message.offset) + ", key: " + message.key + ", value: " + message.value)
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
raise SystemExit(main(sys.argv))
if __name__ == '__main__':
entry_point() | en | 0.413593 | #!/usr/bin/env python Zero-argument entry point for use with setuptools/distribute. | 2.561564 | 3 |
backend/data/jimm/models/__init__.py | MikeOwino/JittorVis | 139 | 6630896 | from .efficientnet import *
from .layers import *
from .resnet import *
from .resnetv2 import *
from .vision_transformer import *
from .vision_transformer_hybrid import *
from .hrnet import *
from .swin_transformer import *
from .volo import *
| from .efficientnet import *
from .layers import *
from .resnet import *
from .resnetv2 import *
from .vision_transformer import *
from .vision_transformer_hybrid import *
from .hrnet import *
from .swin_transformer import *
from .volo import *
| none | 1 | 1.02289 | 1 |
|
events/views.py | exenin/Django-CRM | 0 | 6630897 | <reponame>exenin/Django-CRM
from datetime import date, datetime, timedelta
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
TemplateView, UpdateView, View)
from common.models import Attachments, Comment, User
from common.tasks import send_email_user_mentions
from events.forms import EventAttachmentForm, EventCommentForm, EventForm
from events.models import Event
from events.tasks import send_email
from common.access_decorators_mixins import (
sales_access_required, marketing_access_required, SalesAccessRequiredMixin, MarketingAccessRequiredMixin)
from teams.models import Teams
@login_required
@sales_access_required
def events_list(request):
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.all()
elif request.user.google.all():
users = User.objects.none()
elif request.user.role == 'USER':
users = User.objects.filter(role='ADMIN')
if request.method == 'GET':
context = {}
if request.user.role == 'ADMIN' or request.user.is_superuser:
events = Event.objects.all().distinct()
else:
events = Event.objects.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
context['events'] = events.order_by('id')
# context['status'] = status
context['users'] = users
return render(request, 'events_list.html', context)
if request.method == 'POST':
context = {}
# context['status'] = status
context['users'] = users
events = Event.objects.filter()
if request.user.role == 'ADMIN' or request.user.is_superuser:
events = events
else:
events = events.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
if request.POST.get('event_name', None):
events = events.filter(
name__icontains=request.POST.get('event_name'))
if request.POST.get('created_by', None):
events = events.filter(
created_by__id=request.POST.get('created_by'))
if request.POST.getlist('assigned_to', None):
events = events.filter(
assigned_to__in=request.POST.getlist('assigned_to'))
context['assigned_to'] = request.POST.getlist('assigned_to')
if request.POST.get('date_of_meeting', None):
events = events.filter(
date_of_meeting=request.POST.get('date_of_meeting'))
context['events'] = events.distinct().order_by('id')
return render(request, 'events_list.html', context)
@login_required
@sales_access_required
def event_create(request):
if request.method == 'GET':
context = {}
context["form"] = EventForm(request_user=request.user)
return render(request, 'event_create.html', context)
if request.method == 'POST':
form = EventForm(request.POST, request_user=request.user)
if form.is_valid():
start_date = form.cleaned_data.get('start_date')
end_date = form.cleaned_data.get('end_date')
# recurring_days
recurring_days = request.POST.getlist('recurring_days')
if form.cleaned_data.get('event_type') == 'Non-Recurring':
event = form.save(commit=False)
event.date_of_meeting = start_date
event.created_by = request.user
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
if form.cleaned_data.get('event_type') == 'Recurring':
delta = end_date - start_date
all_dates = []
required_dates = []
for day in range(delta.days + 1):
each_date = (start_date + timedelta(days=day))
if each_date.strftime("%A") in recurring_days:
required_dates.append(each_date)
for each in required_dates:
each = datetime.strptime(str(each), '%Y-%m-%d').date()
data = form.cleaned_data
event = Event.objects.create(
created_by=request.user, start_date=start_date, end_date=end_date,
name=data['name'], event_type=data['event_type'],
description=data['description'], start_time=data['start_time'],
end_time=data['end_time'], date_of_meeting=each
)
event.contacts.add(*request.POST.getlist('contacts'))
event.assigned_to.add(*request.POST.getlist('assigned_to'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
return JsonResponse({'error': False, 'success_url': reverse('events:events_list')})
else:
return JsonResponse({'error': True, 'errors': form.errors, })
@login_required
@sales_access_required
def event_detail_view(request, event_id):
event = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event.created_by == request.user or request.user in event.assigned_to.all()):
raise PermissionDenied
if request.method == 'GET':
context = {}
context['event'] = event
context['attachments'] = event.events_attachment.all()
context['comments'] = event.events_comments.all()
if request.user.is_superuser or request.user.role == 'ADMIN':
context['users_mention'] = list(
User.objects.all().values('username'))
elif request.user != event.created_by:
context['users_mention'] = [
{'username': event.created_by.username}]
else:
context['users_mention'] = list(
event.assigned_to.all().values('username'))
return render(request, 'event_detail.html', context)
@login_required
@sales_access_required
def event_update(request, event_id):
event_obj = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event_obj.created_by == request.user or request.user in event_obj.assigned_to.all()):
raise PermissionDenied
if request.method == 'GET':
context = {}
context["event_obj"] = event_obj
context["form"] = EventForm(
instance=event_obj, request_user=request.user)
selected_recurring_days = Event.objects.filter(
name=event_obj.name).values_list('date_of_meeting', flat=True)
selected_recurring_days = [day.strftime(
'%A') for day in selected_recurring_days]
context['selected_recurring_days'] = selected_recurring_days
return render(request, 'event_create.html', context)
if request.method == 'POST':
form = EventForm(request.POST, instance=event_obj,
request_user=request.user)
if form.is_valid():
start_date = form.cleaned_data.get('start_date')
end_date = form.cleaned_data.get('end_date')
# recurring_days
# recurring_days = request.POST.getlist('days')
if form.cleaned_data.get('event_type') == 'Non-Recurring':
event = form.save(commit=False)
event.date_of_meeting = start_date
# event.created_by = request.user
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
if form.cleaned_data.get('event_type') == 'Recurring':
event = form.save(commit=False)
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
# event.contacts.add(*request.POST.getlist('contacts'))
# event.assigned_to.add(*request.POST.getlist('assigned_to'))
return JsonResponse({'error': False, 'success_url': reverse('events:events_list')})
else:
return JsonResponse({'error': True, 'errors': form.errors, })
@login_required
@sales_access_required
def event_delete(request, event_id):
event = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event.created_by == request.user):
raise PermissionDenied
event.delete()
return redirect('events:events_list')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = EventCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.event = get_object_or_404(
Event, id=request.POST.get('event_id'))
if (
request.user == self.event.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.event = self.event
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'events', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = EventCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'events', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = EventAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.event = get_object_or_404(
Event, id=request.POST.get('event_id'))
if (
request.user == self.event.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.event = self.event
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
| from datetime import date, datetime, timedelta
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
TemplateView, UpdateView, View)
from common.models import Attachments, Comment, User
from common.tasks import send_email_user_mentions
from events.forms import EventAttachmentForm, EventCommentForm, EventForm
from events.models import Event
from events.tasks import send_email
from common.access_decorators_mixins import (
sales_access_required, marketing_access_required, SalesAccessRequiredMixin, MarketingAccessRequiredMixin)
from teams.models import Teams
@login_required
@sales_access_required
def events_list(request):
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.all()
elif request.user.google.all():
users = User.objects.none()
elif request.user.role == 'USER':
users = User.objects.filter(role='ADMIN')
if request.method == 'GET':
context = {}
if request.user.role == 'ADMIN' or request.user.is_superuser:
events = Event.objects.all().distinct()
else:
events = Event.objects.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
context['events'] = events.order_by('id')
# context['status'] = status
context['users'] = users
return render(request, 'events_list.html', context)
if request.method == 'POST':
context = {}
# context['status'] = status
context['users'] = users
events = Event.objects.filter()
if request.user.role == 'ADMIN' or request.user.is_superuser:
events = events
else:
events = events.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
if request.POST.get('event_name', None):
events = events.filter(
name__icontains=request.POST.get('event_name'))
if request.POST.get('created_by', None):
events = events.filter(
created_by__id=request.POST.get('created_by'))
if request.POST.getlist('assigned_to', None):
events = events.filter(
assigned_to__in=request.POST.getlist('assigned_to'))
context['assigned_to'] = request.POST.getlist('assigned_to')
if request.POST.get('date_of_meeting', None):
events = events.filter(
date_of_meeting=request.POST.get('date_of_meeting'))
context['events'] = events.distinct().order_by('id')
return render(request, 'events_list.html', context)
@login_required
@sales_access_required
def event_create(request):
if request.method == 'GET':
context = {}
context["form"] = EventForm(request_user=request.user)
return render(request, 'event_create.html', context)
if request.method == 'POST':
form = EventForm(request.POST, request_user=request.user)
if form.is_valid():
start_date = form.cleaned_data.get('start_date')
end_date = form.cleaned_data.get('end_date')
# recurring_days
recurring_days = request.POST.getlist('recurring_days')
if form.cleaned_data.get('event_type') == 'Non-Recurring':
event = form.save(commit=False)
event.date_of_meeting = start_date
event.created_by = request.user
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
if form.cleaned_data.get('event_type') == 'Recurring':
delta = end_date - start_date
all_dates = []
required_dates = []
for day in range(delta.days + 1):
each_date = (start_date + timedelta(days=day))
if each_date.strftime("%A") in recurring_days:
required_dates.append(each_date)
for each in required_dates:
each = datetime.strptime(str(each), '%Y-%m-%d').date()
data = form.cleaned_data
event = Event.objects.create(
created_by=request.user, start_date=start_date, end_date=end_date,
name=data['name'], event_type=data['event_type'],
description=data['description'], start_time=data['start_time'],
end_time=data['end_time'], date_of_meeting=each
)
event.contacts.add(*request.POST.getlist('contacts'))
event.assigned_to.add(*request.POST.getlist('assigned_to'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
return JsonResponse({'error': False, 'success_url': reverse('events:events_list')})
else:
return JsonResponse({'error': True, 'errors': form.errors, })
@login_required
@sales_access_required
def event_detail_view(request, event_id):
event = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event.created_by == request.user or request.user in event.assigned_to.all()):
raise PermissionDenied
if request.method == 'GET':
context = {}
context['event'] = event
context['attachments'] = event.events_attachment.all()
context['comments'] = event.events_comments.all()
if request.user.is_superuser or request.user.role == 'ADMIN':
context['users_mention'] = list(
User.objects.all().values('username'))
elif request.user != event.created_by:
context['users_mention'] = [
{'username': event.created_by.username}]
else:
context['users_mention'] = list(
event.assigned_to.all().values('username'))
return render(request, 'event_detail.html', context)
@login_required
@sales_access_required
def event_update(request, event_id):
event_obj = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event_obj.created_by == request.user or request.user in event_obj.assigned_to.all()):
raise PermissionDenied
if request.method == 'GET':
context = {}
context["event_obj"] = event_obj
context["form"] = EventForm(
instance=event_obj, request_user=request.user)
selected_recurring_days = Event.objects.filter(
name=event_obj.name).values_list('date_of_meeting', flat=True)
selected_recurring_days = [day.strftime(
'%A') for day in selected_recurring_days]
context['selected_recurring_days'] = selected_recurring_days
return render(request, 'event_create.html', context)
if request.method == 'POST':
form = EventForm(request.POST, instance=event_obj,
request_user=request.user)
if form.is_valid():
start_date = form.cleaned_data.get('start_date')
end_date = form.cleaned_data.get('end_date')
# recurring_days
# recurring_days = request.POST.getlist('days')
if form.cleaned_data.get('event_type') == 'Non-Recurring':
event = form.save(commit=False)
event.date_of_meeting = start_date
# event.created_by = request.user
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
if form.cleaned_data.get('event_type') == 'Recurring':
event = form.save(commit=False)
event.save()
form.save_m2m()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = event.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
event.assigned_to.add(user_id)
send_email.delay(
event.id, domain=request.get_host(), protocol=request.scheme)
# event.contacts.add(*request.POST.getlist('contacts'))
# event.assigned_to.add(*request.POST.getlist('assigned_to'))
return JsonResponse({'error': False, 'success_url': reverse('events:events_list')})
else:
return JsonResponse({'error': True, 'errors': form.errors, })
@login_required
@sales_access_required
def event_delete(request, event_id):
event = get_object_or_404(Event, pk=event_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or event.created_by == request.user):
raise PermissionDenied
event.delete()
return redirect('events:events_list')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = EventCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.event = get_object_or_404(
Event, id=request.POST.get('event_id'))
if (
request.user == self.event.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.event = self.event
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'events', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = EventCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'events', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = EventAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.event = get_object_or_404(
Event, id=request.POST.get('event_id'))
if (
request.user == self.event.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.event = self.event
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data) | en | 0.518003 | # context['status'] = status # context['status'] = status # recurring_days # recurring_days # recurring_days = request.POST.getlist('days') # event.created_by = request.user # event.contacts.add(*request.POST.getlist('contacts')) # event.assigned_to.add(*request.POST.getlist('assigned_to')) | 1.844102 | 2 |
src/jodlgang/tensorwow/layers.py | fausecteam/faustctf-2018-jodlgang | 4 | 6630898 | <gh_stars>1-10
import numpy as np
from tensorwow.im2col import im2col_indices
class FullyConnectedLayer(object):
def __init__(self, num_input_units, num_output_units, activation_func, weights_initializer, bias_initializer):
"""
:param num_input_units: Number of input dimensions D
:param num_output_units: Number of output dimensions O
:param activation_func: Activation function
:param weights_initializer: Weights initializer
:param bias_initializer: Bias initializer
"""
self._num_input_units = num_input_units
self._num_output_units = num_output_units
self._activation_func = activation_func
# Disable default initialization
# self._weights = weights_initializer.initialize((num_input_units, num_output_units))
# self._bias = bias_initializer.initialize((num_output_units))
self._x = None
self._z = None
self._a = None
self._dw = None
self._db = None
@property
def num_input_units(self):
return self._num_input_units
@property
def num_output_units(self):
return self._num_output_units
@property
def weights(self):
"""
:return: D x M matrix
"""
return self._weights
@weights.setter
def weights(self, weights):
if weights.shape != (self._num_input_units, self._num_output_units):
raise ValueError("Invalid dimensions")
self._weights = weights
@property
def bias(self):
"""
:return: vector of length M
"""
return self._bias
@bias.setter
def bias(self, bias):
if bias.shape != (self._num_output_units,):
raise ValueError("Invalid dimensions")
self._bias = bias
def forward(self, x):
"""
:param x: N x D matrix
:return: N x M matrix
"""
assert len(x.shape) == 2, "Inputs must be a two-dimensional tensor"
assert x.shape[1] == self._num_input_units, "Inputs does not match input size"
z = np.dot(x, self._weights) + self._bias
a = self._activation_func.compute(z)
# Cache values for backward step
self._x = x
self._z = z
self._a = a
return a
class ConvLayer(object):
def __init__(self, kernel_size, num_input_channels, num_filters, activation_func, weights_initializer, bias_initializer, stride=1, padding=1):
self._kernel_size = kernel_size
self._num_input_channels = num_input_channels
self._num_filters = num_filters
self._padding = padding
self._stride = stride
self._activation_func = activation_func
# Disable default initialization
# self._weights = weights_initializer.initialize((kernel_size, kernel_size, num_input_channels, num_filters))
# self._bias = bias_initializer.initialize((num_filters, 1))
@property
def weights(self):
"""
:return: Weight matrix of shape (kernel_size, kernel_size, num_input_channels, num_filters)
"""
return self._weights
@property
def bias(self):
"""
:return: Bias vector of length num_filters
"""
return self._bias
@weights.setter
def weights(self, weights):
if weights.shape != (self._kernel_size, self._kernel_size, self._num_input_channels, self._num_filters):
raise ValueError("Invalid dimensions")
self._weights = weights
@bias.setter
def bias(self, bias):
if bias.shape != (self._num_filters,):
raise ValueError("Invalid dimensions")
self._bias = bias
def forward(self, x):
"""
Computes the correlation of each input sample with the layer's kernel matrix
:param x: input images of shape [num_samples, height, width, input_channels]
:return: feature maps of shape [num_samples, height, width, num_filters]
"""
assert len(x.shape) == 4, "Inputs must be a three-dimensional tensor"
assert x.shape[3] == self._num_input_channels, "Inputs does not match required input channels"
num_samples, height, width, channels = x.shape
assert (height - self._kernel_size + 2 * self._padding) % self._stride == 0, "Invalid dimensions"
assert (width - self._kernel_size + 2 * self._padding) % self._stride == 0, "Invalid dimensions"
output_height = (height - self._kernel_size + 2 * self._padding) // self._stride + 1
output_width = (width - self._kernel_size + 2 * self._padding) // self._stride + 1
x_col = im2col_indices(x, self._kernel_size, self._kernel_size, padding=self._padding, stride=self._stride)
# Move filter kernels to the front before reshaping to [num_filters, ...]
# To make the filter matrix appear for each channel contiguously, move the channels dimension to the front as well
weights_col = self._weights.transpose(3, 2, 0, 1).reshape(self._num_filters, -1)
z = np.dot(weights_col, x_col) + self._bias[:, None]
a = self._activation_func.compute(z)
# Found this order through experimenting
a = a.reshape(self._num_filters, num_samples, output_height, output_width).transpose(1, 2, 3, 0)
return a
class MaxPoolLayer(object):
def __init__(self, window_size, padding, stride):
self._window_size = window_size
self._padding = padding
self._stride = stride
def forward(self, x):
num_samples, height, width, num_channels = x.shape
assert (height - self._window_size) % self._stride == 0, "Invalid dimensions"
assert (width - self._window_size) % self._stride == 0, "Invalid dimensions"
output_height = (height - self._window_size) // self._stride + 1
output_width = (width - self._window_size) // self._stride + 1
x_prep = x.transpose(0, 3, 1, 2).reshape(num_samples * num_channels, height, width, 1)
x_col = im2col_indices(x_prep, self._window_size, self._window_size, padding=self._padding, stride=self._stride)
max_indices = np.argmax(x_col, axis=0)
z = x_col[max_indices, range(len(max_indices))]
z = z.reshape(num_samples, num_channels, output_height, output_width).transpose(0, 2, 3, 1)
return z
| import numpy as np
from tensorwow.im2col import im2col_indices
class FullyConnectedLayer(object):
def __init__(self, num_input_units, num_output_units, activation_func, weights_initializer, bias_initializer):
"""
:param num_input_units: Number of input dimensions D
:param num_output_units: Number of output dimensions O
:param activation_func: Activation function
:param weights_initializer: Weights initializer
:param bias_initializer: Bias initializer
"""
self._num_input_units = num_input_units
self._num_output_units = num_output_units
self._activation_func = activation_func
# Disable default initialization
# self._weights = weights_initializer.initialize((num_input_units, num_output_units))
# self._bias = bias_initializer.initialize((num_output_units))
self._x = None
self._z = None
self._a = None
self._dw = None
self._db = None
@property
def num_input_units(self):
return self._num_input_units
@property
def num_output_units(self):
return self._num_output_units
@property
def weights(self):
"""
:return: D x M matrix
"""
return self._weights
@weights.setter
def weights(self, weights):
if weights.shape != (self._num_input_units, self._num_output_units):
raise ValueError("Invalid dimensions")
self._weights = weights
@property
def bias(self):
"""
:return: vector of length M
"""
return self._bias
@bias.setter
def bias(self, bias):
if bias.shape != (self._num_output_units,):
raise ValueError("Invalid dimensions")
self._bias = bias
def forward(self, x):
"""
:param x: N x D matrix
:return: N x M matrix
"""
assert len(x.shape) == 2, "Inputs must be a two-dimensional tensor"
assert x.shape[1] == self._num_input_units, "Inputs does not match input size"
z = np.dot(x, self._weights) + self._bias
a = self._activation_func.compute(z)
# Cache values for backward step
self._x = x
self._z = z
self._a = a
return a
class ConvLayer(object):
def __init__(self, kernel_size, num_input_channels, num_filters, activation_func, weights_initializer, bias_initializer, stride=1, padding=1):
self._kernel_size = kernel_size
self._num_input_channels = num_input_channels
self._num_filters = num_filters
self._padding = padding
self._stride = stride
self._activation_func = activation_func
# Disable default initialization
# self._weights = weights_initializer.initialize((kernel_size, kernel_size, num_input_channels, num_filters))
# self._bias = bias_initializer.initialize((num_filters, 1))
@property
def weights(self):
"""
:return: Weight matrix of shape (kernel_size, kernel_size, num_input_channels, num_filters)
"""
return self._weights
@property
def bias(self):
"""
:return: Bias vector of length num_filters
"""
return self._bias
@weights.setter
def weights(self, weights):
if weights.shape != (self._kernel_size, self._kernel_size, self._num_input_channels, self._num_filters):
raise ValueError("Invalid dimensions")
self._weights = weights
@bias.setter
def bias(self, bias):
if bias.shape != (self._num_filters,):
raise ValueError("Invalid dimensions")
self._bias = bias
def forward(self, x):
"""
Computes the correlation of each input sample with the layer's kernel matrix
:param x: input images of shape [num_samples, height, width, input_channels]
:return: feature maps of shape [num_samples, height, width, num_filters]
"""
assert len(x.shape) == 4, "Inputs must be a three-dimensional tensor"
assert x.shape[3] == self._num_input_channels, "Inputs does not match required input channels"
num_samples, height, width, channels = x.shape
assert (height - self._kernel_size + 2 * self._padding) % self._stride == 0, "Invalid dimensions"
assert (width - self._kernel_size + 2 * self._padding) % self._stride == 0, "Invalid dimensions"
output_height = (height - self._kernel_size + 2 * self._padding) // self._stride + 1
output_width = (width - self._kernel_size + 2 * self._padding) // self._stride + 1
x_col = im2col_indices(x, self._kernel_size, self._kernel_size, padding=self._padding, stride=self._stride)
# Move filter kernels to the front before reshaping to [num_filters, ...]
# To make the filter matrix appear for each channel contiguously, move the channels dimension to the front as well
weights_col = self._weights.transpose(3, 2, 0, 1).reshape(self._num_filters, -1)
z = np.dot(weights_col, x_col) + self._bias[:, None]
a = self._activation_func.compute(z)
# Found this order through experimenting
a = a.reshape(self._num_filters, num_samples, output_height, output_width).transpose(1, 2, 3, 0)
return a
class MaxPoolLayer(object):
def __init__(self, window_size, padding, stride):
self._window_size = window_size
self._padding = padding
self._stride = stride
def forward(self, x):
num_samples, height, width, num_channels = x.shape
assert (height - self._window_size) % self._stride == 0, "Invalid dimensions"
assert (width - self._window_size) % self._stride == 0, "Invalid dimensions"
output_height = (height - self._window_size) // self._stride + 1
output_width = (width - self._window_size) // self._stride + 1
x_prep = x.transpose(0, 3, 1, 2).reshape(num_samples * num_channels, height, width, 1)
x_col = im2col_indices(x_prep, self._window_size, self._window_size, padding=self._padding, stride=self._stride)
max_indices = np.argmax(x_col, axis=0)
z = x_col[max_indices, range(len(max_indices))]
z = z.reshape(num_samples, num_channels, output_height, output_width).transpose(0, 2, 3, 1)
return z | en | 0.639939 | :param num_input_units: Number of input dimensions D :param num_output_units: Number of output dimensions O :param activation_func: Activation function :param weights_initializer: Weights initializer :param bias_initializer: Bias initializer # Disable default initialization # self._weights = weights_initializer.initialize((num_input_units, num_output_units)) # self._bias = bias_initializer.initialize((num_output_units)) :return: D x M matrix :return: vector of length M :param x: N x D matrix :return: N x M matrix # Cache values for backward step # Disable default initialization # self._weights = weights_initializer.initialize((kernel_size, kernel_size, num_input_channels, num_filters)) # self._bias = bias_initializer.initialize((num_filters, 1)) :return: Weight matrix of shape (kernel_size, kernel_size, num_input_channels, num_filters) :return: Bias vector of length num_filters Computes the correlation of each input sample with the layer's kernel matrix :param x: input images of shape [num_samples, height, width, input_channels] :return: feature maps of shape [num_samples, height, width, num_filters] # Move filter kernels to the front before reshaping to [num_filters, ...] # To make the filter matrix appear for each channel contiguously, move the channels dimension to the front as well # Found this order through experimenting | 2.868278 | 3 |
output_visualisation_window.py | Lewak/PracaMagisterska | 0 | 6630899 | <filename>output_visualisation_window.py
from generic_window import GenericWindow
from dearpygui import core, simple
from tensor_flow_interface import TensorFlowInterface
class OutputVisualisationWindow(GenericWindow):
heatMapTable = [[]]
windowName = 'Wizualizacja wyjscia'
learningGraph = 'Historia uczenia'
plotName = 'Heatmap'
historyPlotName = 'Wykres historii uczenia'
seriesName = 'Odpowiedz neuronow'
xSize = 372
ySize = 376
xPos = 16
yPos = 396
def __init__(self):
with simple.window(self.windowName, width=self.xSize, height=self.ySize, x_pos=self.xPos, y_pos=self.yPos):
core.add_separator()
core.add_plot(self.plotName)
super().__init__()
def create_output_graph(self, model:TensorFlowInterface):
size = 300
dataOut = []
outputList = []
for j in range(size):
for i in range(size):
x = -8.0 + i * 16.0 / size
y = -8.0 + j * 16.0 / size
dataOut.append([x,-y])
outputList.append([i,j])
temp = model.predict_value(dataOut)
temp2 = []
for i in range(len(dataOut)):
outputList[i].append(temp[i][0])
temp2.append(temp[i][0])
core.add_heat_series(self.plotName,name = self.seriesName, values=temp2, rows=size, columns=size, scale_min=0.0, scale_max=1.0, format='')
def display_history_graph(self, historyDict, numberOfEpochs):
with simple.window(self.learningGraph, width=300, height=300):
core.add_separator()
core.add_plot(self.historyPlotName)
xAxis = range(0, numberOfEpochs)
core.add_line_series(self.historyPlotName, "Dokładnosc", xAxis, historyDict['accuracy'])
core.add_line_series(self.historyPlotName, "Strata", xAxis, historyDict['loss'])
| <filename>output_visualisation_window.py
from generic_window import GenericWindow
from dearpygui import core, simple
from tensor_flow_interface import TensorFlowInterface
class OutputVisualisationWindow(GenericWindow):
heatMapTable = [[]]
windowName = 'Wizualizacja wyjscia'
learningGraph = 'Historia uczenia'
plotName = 'Heatmap'
historyPlotName = 'Wykres historii uczenia'
seriesName = 'Odpowiedz neuronow'
xSize = 372
ySize = 376
xPos = 16
yPos = 396
def __init__(self):
with simple.window(self.windowName, width=self.xSize, height=self.ySize, x_pos=self.xPos, y_pos=self.yPos):
core.add_separator()
core.add_plot(self.plotName)
super().__init__()
def create_output_graph(self, model:TensorFlowInterface):
size = 300
dataOut = []
outputList = []
for j in range(size):
for i in range(size):
x = -8.0 + i * 16.0 / size
y = -8.0 + j * 16.0 / size
dataOut.append([x,-y])
outputList.append([i,j])
temp = model.predict_value(dataOut)
temp2 = []
for i in range(len(dataOut)):
outputList[i].append(temp[i][0])
temp2.append(temp[i][0])
core.add_heat_series(self.plotName,name = self.seriesName, values=temp2, rows=size, columns=size, scale_min=0.0, scale_max=1.0, format='')
def display_history_graph(self, historyDict, numberOfEpochs):
with simple.window(self.learningGraph, width=300, height=300):
core.add_separator()
core.add_plot(self.historyPlotName)
xAxis = range(0, numberOfEpochs)
core.add_line_series(self.historyPlotName, "Dokładnosc", xAxis, historyDict['accuracy'])
core.add_line_series(self.historyPlotName, "Strata", xAxis, historyDict['loss'])
| none | 1 | 2.808195 | 3 |
|
skidl/alias.py | vkleen/skidl | 700 | 6630900 | <filename>skidl/alias.py
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
"""
Handles aliases for Circuit, Part, Pin, Net, Bus, Interface objects.
"""
from __future__ import ( # isort:skip
absolute_import,
division,
print_function,
unicode_literals,
)
import re
from builtins import str, super
from future import standard_library
standard_library.install_aliases()
class Alias(set):
"""
Multiple aliases can be added to another object to give it other names.
Args:
aliases: A single string or a list of strings.
"""
def __init__(self, *aliases):
super().__init__()
self.__iadd__(*aliases)
def __iadd__(self, *aliases):
"""Add new aliases."""
for alias in aliases:
if isinstance(alias, (tuple, list, set)):
for a in list(alias):
self.add(a)
else:
self.add(alias)
return self
def __str__(self):
"""Return the aliases as a delimited string."""
return "/".join(list(self))
def __eq__(self, other):
"""
Return true if both lists of aliases have at least one alias in common.
Args:
other: The Alias object which self will be compared to.
"""
return bool(self.intersection(Alias(other)))
def clean(self):
"""Remove any empty aliases."""
self.discard(None)
self.discard("")
| <filename>skidl/alias.py
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
"""
Handles aliases for Circuit, Part, Pin, Net, Bus, Interface objects.
"""
from __future__ import ( # isort:skip
absolute_import,
division,
print_function,
unicode_literals,
)
import re
from builtins import str, super
from future import standard_library
standard_library.install_aliases()
class Alias(set):
"""
Multiple aliases can be added to another object to give it other names.
Args:
aliases: A single string or a list of strings.
"""
def __init__(self, *aliases):
super().__init__()
self.__iadd__(*aliases)
def __iadd__(self, *aliases):
"""Add new aliases."""
for alias in aliases:
if isinstance(alias, (tuple, list, set)):
for a in list(alias):
self.add(a)
else:
self.add(alias)
return self
def __str__(self):
"""Return the aliases as a delimited string."""
return "/".join(list(self))
def __eq__(self, other):
"""
Return true if both lists of aliases have at least one alias in common.
Args:
other: The Alias object which self will be compared to.
"""
return bool(self.intersection(Alias(other)))
def clean(self):
"""Remove any empty aliases."""
self.discard(None)
self.discard("")
| en | 0.675965 | # -*- coding: utf-8 -*- # The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>. Handles aliases for Circuit, Part, Pin, Net, Bus, Interface objects. # isort:skip Multiple aliases can be added to another object to give it other names. Args: aliases: A single string or a list of strings. Add new aliases. Return the aliases as a delimited string. Return true if both lists of aliases have at least one alias in common. Args: other: The Alias object which self will be compared to. Remove any empty aliases. | 2.740757 | 3 |
service/service.py | sesam-community/currenttime | 0 | 6630901 | from flask import Flask, request, jsonify, Response
from sesamutils import VariablesConfig, sesam_logger
import json
import requests
import os
import sys
app = Flask(__name__)
logger = sesam_logger("Steve the logger", app=app)
## Logic for running program in dev
try:
with open("helpers.json", "r") as stream:
logger.info("Using env vars defined in helpers.json")
env_vars = json.load(stream)
os.environ['current_url'] = env_vars['current_url']
os.environ['current_user'] = env_vars['current_user']
os.environ['current_password'] = env_vars['current_password']
except OSError as e:
logger.info("Using env vars defined in SESAM")
##
required_env_vars = ['current_user', 'current_password', 'current_url']
optional_env_vars = ['test1', 'test2']
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
## Helper functions
def stream_json(clean):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
@app.route('/')
def index():
output = {
'service': 'CurrentTime up and running...',
'remote_addr': request.remote_addr
}
return jsonify(output)
@app.route('/get/<path>', methods=['GET'])
def get_data(path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
exceed_limit = True
result_offset = 0
if request.args.get('since') != None:
logger.info('Requesting resource with since value.')
result_offset = int(request.args.get('since'))
def emit_rows(exceed_limit, result_offset, config):
while exceed_limit is not None:
try:
logger.info("Requesting data...")
request_url = f"{config.current_url}/{path}?%24count=true&%24skip={result_offset}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
raise
else:
data_count = json.loads(data.content.decode('utf-8-sig'))["@odata.count"]
updated_value = result_offset+1
first = True
yield '['
for entity in json.loads(data.content.decode('utf-8-sig'))["value"]:
entity['_updated'] = updated_value
if not first:
yield ','
else:
first = False
yield json.dumps(entity)
updated_value += 1
yield ']'
if exceed_limit != None:
if exceed_limit != data_count:
exceed_limit = data_count
result_offset+=exceed_limit
logger.info(f"Result offset is now {result_offset}")
logger.info(f"extending result")
if exceed_limit == data_count:
logger.info(f"Paging is complete.")
exceed_limit = None
except Exception as e:
logger.warning(f"Service not working correctly. Failing with error : {e}")
logger.info("Returning objects...")
try:
return Response(emit_rows(exceed_limit, result_offset, config), status=200, mimetype='application/json')
except Exception as e:
logger.error("Error from Currenttime: %s", e)
return Response(status=500)
@app.route('/chained/<path>/', defaults={'resource_path': None}, methods=['GET','POST'])
@app.route('/chained/<path>/<resource_path>', defaults={'sub_resource_path': None}, methods=['GET','POST'])
@app.route('/chained/<path>/<resource_path>/<sub_resource_path>', methods=['GET','POST'])
def chain_data(path, resource_path, sub_resource_path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
request_data = request.get_data()
json_data = json.loads(str(request_data.decode("utf-8")))
def emit_rows(config, json_data):
first = True
for element in json_data[0].get("payload"):
resource = [*element.values()][0]
if resource_path == None:
request_url = f"{config.current_url}/{path}({resource})"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if sub_resource_path != None:
sub_resource = [*element.values()][1]
request_url = f"{config.current_url}/{path}({resource})/{resource_path}({sub_resource})/{sub_resource_path}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
else:
request_url = f"{config.current_url}/{path}({resource})/{resource_path}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
raise
else:
if not first:
yield ','
else:
first = False
yield json.dumps(data.json()["value"])
logger.info("Returning objects...")
try:
return Response(emit_rows(config, json_data), status=200, mimetype='application/json')
except Exception as e:
logger.error("Error from Currenttime: %s", e)
return Response(status=500)
@app.route('/post/<path>/', defaults={'resource_path': None}, methods=['GET','POST'])
@app.route('/post/<path>/<resource_path>', methods=['GET','POST'])
def post_data(path, resource_path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
request_data = request.get_data()
json_data = json.loads(str(request_data.decode("utf-8")))
for element in json_data:
try:
resource_id = element["id"]
del element["id"]
except:
resource_id = None
if resource_path == None and resource_id == None:
request_url = f"{config.current_url}/{path}"
logger.info(f"Trying to POST payload: {element}")
data = requests.post(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"POST Completed")
if resource_path == None and resource_id != None:
if element['deleted'] == True:
logger.info(f"Trying to DELETE payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})"
data = requests.delete(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"DELETE Completed")
else:
logger.info(f"Trying to PUT payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})"
data = requests.put(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"UPDATE Completed")
if resource_path != None and resource_id != None:
logger.info(f"Trying to PUT payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})/{resource_path}"
data = requests.put(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"UPDATE Completed")
#else:
# logger.info("Nothing to do... Look at the README or in the code to modify the if clauses.")
return jsonify({'Steve reporting': "work complete..."})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True) | from flask import Flask, request, jsonify, Response
from sesamutils import VariablesConfig, sesam_logger
import json
import requests
import os
import sys
app = Flask(__name__)
logger = sesam_logger("Steve the logger", app=app)
## Logic for running program in dev
try:
with open("helpers.json", "r") as stream:
logger.info("Using env vars defined in helpers.json")
env_vars = json.load(stream)
os.environ['current_url'] = env_vars['current_url']
os.environ['current_user'] = env_vars['current_user']
os.environ['current_password'] = env_vars['current_password']
except OSError as e:
logger.info("Using env vars defined in SESAM")
##
required_env_vars = ['current_user', 'current_password', 'current_url']
optional_env_vars = ['test1', 'test2']
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
## Helper functions
def stream_json(clean):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
@app.route('/')
def index():
output = {
'service': 'CurrentTime up and running...',
'remote_addr': request.remote_addr
}
return jsonify(output)
@app.route('/get/<path>', methods=['GET'])
def get_data(path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
exceed_limit = True
result_offset = 0
if request.args.get('since') != None:
logger.info('Requesting resource with since value.')
result_offset = int(request.args.get('since'))
def emit_rows(exceed_limit, result_offset, config):
while exceed_limit is not None:
try:
logger.info("Requesting data...")
request_url = f"{config.current_url}/{path}?%24count=true&%24skip={result_offset}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
raise
else:
data_count = json.loads(data.content.decode('utf-8-sig'))["@odata.count"]
updated_value = result_offset+1
first = True
yield '['
for entity in json.loads(data.content.decode('utf-8-sig'))["value"]:
entity['_updated'] = updated_value
if not first:
yield ','
else:
first = False
yield json.dumps(entity)
updated_value += 1
yield ']'
if exceed_limit != None:
if exceed_limit != data_count:
exceed_limit = data_count
result_offset+=exceed_limit
logger.info(f"Result offset is now {result_offset}")
logger.info(f"extending result")
if exceed_limit == data_count:
logger.info(f"Paging is complete.")
exceed_limit = None
except Exception as e:
logger.warning(f"Service not working correctly. Failing with error : {e}")
logger.info("Returning objects...")
try:
return Response(emit_rows(exceed_limit, result_offset, config), status=200, mimetype='application/json')
except Exception as e:
logger.error("Error from Currenttime: %s", e)
return Response(status=500)
@app.route('/chained/<path>/', defaults={'resource_path': None}, methods=['GET','POST'])
@app.route('/chained/<path>/<resource_path>', defaults={'sub_resource_path': None}, methods=['GET','POST'])
@app.route('/chained/<path>/<resource_path>/<sub_resource_path>', methods=['GET','POST'])
def chain_data(path, resource_path, sub_resource_path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
request_data = request.get_data()
json_data = json.loads(str(request_data.decode("utf-8")))
def emit_rows(config, json_data):
first = True
for element in json_data[0].get("payload"):
resource = [*element.values()][0]
if resource_path == None:
request_url = f"{config.current_url}/{path}({resource})"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if sub_resource_path != None:
sub_resource = [*element.values()][1]
request_url = f"{config.current_url}/{path}({resource})/{resource_path}({sub_resource})/{sub_resource_path}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
else:
request_url = f"{config.current_url}/{path}({resource})/{resource_path}"
data = requests.get(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
raise
else:
if not first:
yield ','
else:
first = False
yield json.dumps(data.json()["value"])
logger.info("Returning objects...")
try:
return Response(emit_rows(config, json_data), status=200, mimetype='application/json')
except Exception as e:
logger.error("Error from Currenttime: %s", e)
return Response(status=500)
@app.route('/post/<path>/', defaults={'resource_path': None}, methods=['GET','POST'])
@app.route('/post/<path>/<resource_path>', methods=['GET','POST'])
def post_data(path, resource_path):
config = VariablesConfig(required_env_vars)
if not config.validate():
sys.exit(1)
request_data = request.get_data()
json_data = json.loads(str(request_data.decode("utf-8")))
for element in json_data:
try:
resource_id = element["id"]
del element["id"]
except:
resource_id = None
if resource_path == None and resource_id == None:
request_url = f"{config.current_url}/{path}"
logger.info(f"Trying to POST payload: {element}")
data = requests.post(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"POST Completed")
if resource_path == None and resource_id != None:
if element['deleted'] == True:
logger.info(f"Trying to DELETE payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})"
data = requests.delete(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"DELETE Completed")
else:
logger.info(f"Trying to PUT payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})"
data = requests.put(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"UPDATE Completed")
if resource_path != None and resource_id != None:
logger.info(f"Trying to PUT payload: {element}")
request_url = f"{config.current_url}/{path}({resource_id})/{resource_path}"
data = requests.put(request_url, headers=headers, auth=(f"{config.current_user}", f"{config.current_password}"), data=json.dumps(element))
if not data.ok:
logger.error(f"Unexpected response status code: {data.content}")
return f"Unexpected error : {data.content}", 500
if data.ok:
logger.info(f"UPDATE Completed")
#else:
# logger.info("Nothing to do... Look at the README or in the code to modify the if clauses.")
return jsonify({'Steve reporting': "work complete..."})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True) | en | 0.6444 | ## Logic for running program in dev ## ## Helper functions #else: # logger.info("Nothing to do... Look at the README or in the code to modify the if clauses.") | 2.31585 | 2 |
odm/dialects/postgresql/__init__.py | quantmind/pulsar-odm | 16 | 6630902 | <filename>odm/dialects/postgresql/__init__.py
from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
from sqlalchemy.dialects import registry
from .pool import GreenletPool
class PGDGreen(PGDialect_psycopg2):
'''PostgreSql dialect using psycopg2 and greenlet to obtain an
implicit asynchronous database connection.
'''
poolclass = GreenletPool
is_green = True
@classmethod
def dbapi(cls):
from odm.dialects.postgresql import green
return green
def create_connect_args(self, url):
args, opts = super().create_connect_args(url)
opts.pop('pool_size', None)
opts.pop('pool_timeout', None)
return [[], opts]
registry.register("postgresql.green", "odm.dialects.postgresql", "PGDGreen")
| <filename>odm/dialects/postgresql/__init__.py
from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
from sqlalchemy.dialects import registry
from .pool import GreenletPool
class PGDGreen(PGDialect_psycopg2):
'''PostgreSql dialect using psycopg2 and greenlet to obtain an
implicit asynchronous database connection.
'''
poolclass = GreenletPool
is_green = True
@classmethod
def dbapi(cls):
from odm.dialects.postgresql import green
return green
def create_connect_args(self, url):
args, opts = super().create_connect_args(url)
opts.pop('pool_size', None)
opts.pop('pool_timeout', None)
return [[], opts]
registry.register("postgresql.green", "odm.dialects.postgresql", "PGDGreen")
| en | 0.674078 | PostgreSql dialect using psycopg2 and greenlet to obtain an implicit asynchronous database connection. | 2.323011 | 2 |
tests/test_holding_groups.py | mscarey/AuthoritySpoke | 18 | 6630903 | <reponame>mscarey/AuthoritySpoke
import pytest
from authorityspoke.holdings import HoldingGroup
class TestMakeHoldingGroup:
def test_all_members_must_be_holdings(self, make_rule):
with pytest.raises(TypeError):
HoldingGroup([make_rule["h1"]])
class TestHoldingGroupImplies:
def test_explain_holdinggroup_implication(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = HoldingGroup([make_holding["h2"]])
explanation = left.explain_implication(right)
assert "implies" in str(explanation).lower()
def test_implication_of_holding(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = make_holding["h2"]
assert left.implies(right)
def test_implication_of_rule(self, make_holding, make_rule):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = make_rule["h2"]
assert left.implies(right)
def test_implication_of_none(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = None
assert left.implies(right)
| import pytest
from authorityspoke.holdings import HoldingGroup
class TestMakeHoldingGroup:
def test_all_members_must_be_holdings(self, make_rule):
with pytest.raises(TypeError):
HoldingGroup([make_rule["h1"]])
class TestHoldingGroupImplies:
def test_explain_holdinggroup_implication(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = HoldingGroup([make_holding["h2"]])
explanation = left.explain_implication(right)
assert "implies" in str(explanation).lower()
def test_implication_of_holding(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = make_holding["h2"]
assert left.implies(right)
def test_implication_of_rule(self, make_holding, make_rule):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = make_rule["h2"]
assert left.implies(right)
def test_implication_of_none(self, make_holding):
left = HoldingGroup([make_holding["h1"], make_holding["h2_ALL"]])
right = None
assert left.implies(right) | none | 1 | 2.574925 | 3 |
|
uquake/grid/base.py | jeanphilippemercier/uquake | 0 | 6630904 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: <filename>
# Purpose: <purpose>
# Author: <author>
# Email: <email>
#
# Copyright (C) <copyright>
# --------------------------------------------------------------------
"""
:copyright:
<copyright>
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
import numpy as np
from uuid import uuid4
from ..core.logging import logger
from pkg_resources import load_entry_point
from ..core.util import ENTRY_POINTS
from pathlib import Path
from scipy.ndimage.interpolation import map_coordinates
from ..core.event import WaveformStreamID
import matplotlib.pyplot as plt
def read_grid(filename, format='PICKLE', **kwargs):
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
read_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'readFormat')
return read_format(filename, **kwargs)
class Grid:
"""
Object containing a regular grid
"""
def __init__(self, data_or_dims, spacing=None, origin=None,
resource_id=None, value=0):
"""
can hold both 2 and 3 dimensional grid
:param data_or_dims: either a numpy array or a tuple/list with the grid
dimensions. If grid dimensions are specified, the grid is initialized
with value
:param spacing: Spacing
:type spacing: typle
:param origin: tuple, list or array containing the origin of the grid
:type origin: tuple
:param resource_id: unique identifier for the grid, if set to None,
:param value: value to fill the grid should dims be specified
:type value:
uuid4 is used to define a unique identifier.
:type uuid4: str
"""
data_or_dims = np.array(data_or_dims)
if data_or_dims.ndim == 1:
self.data = np.ones(data_or_dims) * value
else:
self.data = data_or_dims
if resource_id is None:
self.resource_id = str(uuid4())
else:
self.resource_id = resource_id
if origin is None:
self.origin = np.zeros(len(self.data.shape))
else:
origin = np.array(origin)
if origin.shape[0] == len(self.data.shape):
self.origin = origin
else:
logger.error(f'origin shape should be {len(self.data.shape)}')
raise ValueError
if spacing is None:
self.spacing = np.ones(len(self.data.shape))
else:
spacing = np.array(spacing)
if spacing.shape[0] == len(self.data.shape):
self.spacing = spacing
else:
logger.error(f'spacing shape should be {len(self.data.shape)}')
raise ValueError
def __hash__(self):
return hash((tuple(self.data.ravel()), tuple(self.spacing),
tuple(self.shape), tuple(self.origin)))
def __eq__(self, other):
self.hash == other.hash
@property
def hash(self):
return self.__hash__()
@classmethod
def from_ods(cls, origin, dimensions, spacing, val=0):
"""
create a grid from origin, dimensions and spacing
:param origin: grid origin
:type origin: tuple
:param dimensions: grid dimension
:type dimensions: tuple
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
"""
data = np.ones(tuple(dimensions)) * val
cls.grid = cls.__init__(data, spacing=spacing, origin=origin)
@classmethod
def from_ocs(cls, origin, corner, spacing, val=0):
"""
create a grid from origin, corner and spacing
:param origin: grid origin (e.g., lower left corner for 2D grid)
:type origin: tuple or list or numpy.array
:param corner: grid upper (e.g., upper right corner for 2D grid)
:type corner: tuple or list or numpy.array
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
:param buf: buffer around the grid in fraction of grid size
"""
origin2 = origin
corner2 = corner
gshape = tuple([int(np.ceil((c - o) / spacing))
for o, c in zip(origin2, corner2)])
data = np.ones(gshape) * val
cls.__init__(data, spacing=spacing, origin=origin)
cls.fill_homogeneous(val)
return cls
@classmethod
def from_ocd(cls, origin, corner, dimensions, val=0):
"""
create a grid from origin, corner and dimensions
:param origin: grid origin (e.g., lower left corner for 2D grid)
:param corner: grid upper (e.g., upper right corner for 2D grid)
:param dimensions: grid dimensions
:param val: constant value with which to fill the grid
:return:
"""
data = np.ones(dimensions) * val
spacing = (corner - origin) / (dimensions - 1)
cls.__init__(data, spacing, spacing=spacing, origin=origin)
return cls
def __repr__(self):
repr_str = """
spacing: %s
origin : %s
shape : %s
""" % (self.spacing, self.origin, self.shape)
return repr_str
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return np.all((self.shape == other.shape) &
(self.spacing == other.spacing) &
np.all(self.origin == other.origin))
def __mul__(self, other):
if isinstance(other, Grid):
if self.check_compatibility(self, other):
mul_data = self.data * other.data
return Grid(mul_data, spacing=self.spacing,
origin=self.origin)
else:
raise ValueError
else:
raise TypeError
def __abs__(self):
return np.abs(self.data)
def transform_to(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
coords = (values - self.origin) / self.spacing
return coords
def transform_to_grid(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_to(values)
def transform_from(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return values * self.spacing + self.origin
def transform_from_grid(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_from(values)
def check_compatibility(self, other):
"""
check if two grids are compatible, i.e., have the same shape, spacing
and origin
"""
return (np.all(self.shape == other.shape) and
np.all(self.spacing == other.spacing) and
np.all(self.origin == other.origin))
def __get_shape__(self):
"""
return the shape of the object
"""
return self.data.shape
shape = property(__get_shape__)
def copy(self):
"""
copy the object using copy.deepcopy
"""
import copy
cp = copy.deepcopy(self)
return cp
def in_grid(self, point):
"""
Check if a point is inside the grid
:param point: the point to check in absolute coordinate (model)
:type point: tuple, list or numpy array
:returns: True if point is inside the grid
:rtype: bool
"""
corner1 = self.origin
corner2 = self.origin + self.spacing * np.array(self.shape)
return np.all((point >= corner1) & (point <= corner2))
def fill_homogeneous(self, value):
"""
fill the data with a constant value
:param value: the value with which to fill the array
"""
self.data.fill(value)
def generate_points(self, pt_spacing=None):
"""
Generate points within the grid
"""
# if pt_spacing is None:
ev_spacing = self.spacing
dimensions = np.array(self.shape) * self.spacing / ev_spacing
xe = np.arange(0, dimensions[0]) * ev_spacing + self.origin[0]
ye = np.arange(0, dimensions[1]) * ev_spacing + self.origin[1]
ze = np.arange(0, dimensions[2]) * ev_spacing + self.origin[2]
Xe, Ye, Ze = np.meshgrid(xe, ye, ze)
Xe = Xe.reshape(np.prod(Xe.shape))
Ye = Ye.reshape(np.prod(Ye.shape))
Ze = Ze.reshape(np.prod(Ze.shape))
return Xe, Ye, Ze
def generate_random_points_in_grid(self, n_points=1,
grid_space=False):
"""
Generate a random set of points within the grid
:param n_points: number of points to generate (default=1)
:type n_points: int
:param grid_space: whether the output is expressed in
grid coordinates (True) or model coordinates (False)
(default: False)
:type grid_space: bool
:return: an array of triplet
"""
points = np.random.rand(n_points, len(self.data.shape))
for i in range(n_points):
points[i] = points[i] * self.dimensions
if not grid_space:
return self.transform_from_grid(points)
return points
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
logger.error('format %s is not currently supported for Grid '
'objects' % format)
return
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
'uquake.plugin.grid.%s'
% format_ep.name, 'writeFormat')
write_format(self, filename, **kwargs)
def interpolate(self, coord, grid_space=True, mode='nearest',
order=1, **kwargs):
"""
This function interpolate the values at a given point expressed
either in grid or absolute coordinates
:param coord: Coordinate of the point(s) at which to interpolate
either in grid or absolute coordinates
:type coord: list, tuple, numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be float) as opposed to model space
:param mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'},
optional
The `mode` parameter determines how the input array is extended
beyond its boundaries. Default is 'constant'. Behavior for each valid
value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.
:param order: int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
:type order: int
:type grid_space: bool
:rtype: numpy.array
"""
coord = np.array(coord)
if not grid_space:
coord = self.transform_to(coord)
if len(coord.shape) < 2:
coord = coord[:, np.newaxis]
try:
return map_coordinates(self.data, coord, mode=mode, order=order,
**kwargs)
except Exception as e:
# logger.warning(e)
# logger.info('transposing the coordinate array')
return map_coordinates(self.data, coord.T, mode=mode, order=order,
**kwargs)
def fill_from_z_gradient(self, vals, zvals):
data = self.data
origin = self.origin
zinds = [int(self.transform_to([origin[0], origin[1], z_])[2]) for z_
in zvals]
# print(zinds, origin)
data[:, :, zinds[0]:] = vals[0]
data[:, :, :zinds[-1]] = vals[-1]
for i in range(len(zinds) - 1):
# print(i)
fill = np.linspace(vals[i + 1], vals[i], zinds[i] - zinds[i + 1])
data[:, :, zinds[i + 1]:zinds[i]] = fill
def get_grid_point_coordinates(self, mesh_grid=True):
"""
"""
x = []
for i, (dimension, spacing) in \
enumerate(zip(self.data.shape, self.spacing)):
v = np.arange(0, dimension) * spacing + self.origin[0]
x.append(v)
if not mesh_grid:
return tuple(x)
if len(x) == 2:
return tuple(np.meshgrid(x[0], x[1]))
if len(x) == 3:
return tuple(np.meshgrid(x[0], x[1], x[2]))
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
Path(filename).parent.mkdirs(parent=True, exist_ok=True)
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'writeFormat')
return write_format(self, filename, **kwargs)
def plot_1D(self, x, y, z_resolution, grid_space=False,
inventory=None, reverse_y=True):
"""
:param x: x location
:param y: y location
:param z_resolution_m: z resolution in grid units
:param grid_space:
:return:
"""
if not grid_space:
x, y, z = self.transform_from([x, y, 0])
zs = np.arange(self.origin[2], self.corner[2], z_resolution)
coords = []
for z in zs:
coords.append(np.array([x, y, z]))
values = self.interpolate(coords, grid_space=grid_space)
plt.plot(values, zs)
if reverse_y:
plt.gca().invert_yaxis()
if (inventory):
z_stas = []
for network in inventory:
for station in network:
loc = station.loc
z_stas.append(loc[2])
plt.plot([np.mean(values)] * len(z_stas), z_stas, 'kv')
plt.plot()
plt.plot()
plt.show()
@property
def ndim(self):
return self.data.ndim
@property
def shape(self):
return list(self.data.shape)
@property
def dims(self):
return self.shape
@property
def dimensions(self):
return self.shape
@property
def corner(self):
return np.array(self.origin) + np.array(self.shape) * \
np.array(self.spacing)
def angles(travel_time_grid):
"""
This function calculate the take off angle and azimuth for every grid point
given a travel time grid calculated using an Eikonal solver
:param travel_time_grid: travel_time grid
:type travel_time_grid: ~uquake.core.grid.Grid.
:rparam: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
gds_tmp = np.gradient(travel_time_grid.data)
gds = [-gd for gd in gds_tmp]
tmp = np.arctan2(gds[0], gds[1]) # azimuth is zero northwards
azimuth = travel_time_grid.copy()
azimuth.type = 'ANGLE'
azimuth.data = tmp
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
tmp = np.arctan2(hor, -gds[2])
# takeoff is zero pointing down
takeoff = travel_time_grid.copy()
takeoff.type = 'ANGLE'
takeoff.data = tmp
return azimuth, takeoff
def ray_tracer(travel_time_grid, start, grid_space=False, max_iter=1000,
arrival_id=None, earth_model_id=None,
network: str=None):
"""
This function calculates the ray between a starting point (start) and an
end point, which should be the seed of the travel_time grid, using the
gradient descent method.
:param travel_time_grid: a travel time grid
:type travel_time_grid: TTGrid
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param max_iter: maximum number of iteration
:param arrival_id: id of the arrival associated to the ray if
applicable
:type arrival_id: uquake.core.event.ResourceIdentifier
:param earth_model_id: velocity/earth model id.
:type earth_model_id: uquake.core.event.ResourceIdentifier
:param network: network information
:type network: str
:rtype: numpy.array
"""
from uquake.core.event import Ray
if grid_space:
start = np.array(start)
start = travel_time_grid.transform_from(start)
origin = travel_time_grid.origin
spacing = travel_time_grid.spacing
end = np.array(travel_time_grid.seed)
start = np.array(start)
# calculating the gradient in every dimension at every grid points
gds = [Grid(gd, origin=origin, spacing=spacing)
for gd in np.gradient(travel_time_grid.data)]
dist = np.linalg.norm(start - end)
cloc = start # initializing cloc "current location" to start
gamma = spacing / 2 # gamma is set to half the grid spacing. This
# should be
# sufficient. Note that gamma is fixed to reduce
# processing time.
nodes = [start]
iter_number = 0
while np.all(dist > spacing / 2):
if iter_number > max_iter:
break
if np.all(dist < spacing * 4):
gamma = np.min(spacing) / 4
gvect = np.array([gd.interpolate(cloc, grid_space=False,
order=1)[0] for gd in gds])
cloc = cloc - gamma * gvect / (np.linalg.norm(gvect) + 1e-8)
nodes.append(cloc)
dist = np.linalg.norm(cloc - end)
iter_number += 1
nodes.append(end)
tt = travel_time_grid.interpolate(start, grid_space=False, order=1)[0]
az = travel_time_grid.to_azimuth_point(start, grid_space=False,
order=1)
toa = travel_time_grid.to_takeoff_point(start, grid_space=False,
order=1)
ray = Ray(nodes=nodes, site_code=travel_time_grid.seed_label,
arrival_id=arrival_id, phase=travel_time_grid.phase,
azimuth=az, takeoff_angle=toa, travel_time=tt,
earth_model_id=earth_model_id, network=network)
return ray
| # -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: <filename>
# Purpose: <purpose>
# Author: <author>
# Email: <email>
#
# Copyright (C) <copyright>
# --------------------------------------------------------------------
"""
:copyright:
<copyright>
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
import numpy as np
from uuid import uuid4
from ..core.logging import logger
from pkg_resources import load_entry_point
from ..core.util import ENTRY_POINTS
from pathlib import Path
from scipy.ndimage.interpolation import map_coordinates
from ..core.event import WaveformStreamID
import matplotlib.pyplot as plt
def read_grid(filename, format='PICKLE', **kwargs):
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
read_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'readFormat')
return read_format(filename, **kwargs)
class Grid:
"""
Object containing a regular grid
"""
def __init__(self, data_or_dims, spacing=None, origin=None,
resource_id=None, value=0):
"""
can hold both 2 and 3 dimensional grid
:param data_or_dims: either a numpy array or a tuple/list with the grid
dimensions. If grid dimensions are specified, the grid is initialized
with value
:param spacing: Spacing
:type spacing: typle
:param origin: tuple, list or array containing the origin of the grid
:type origin: tuple
:param resource_id: unique identifier for the grid, if set to None,
:param value: value to fill the grid should dims be specified
:type value:
uuid4 is used to define a unique identifier.
:type uuid4: str
"""
data_or_dims = np.array(data_or_dims)
if data_or_dims.ndim == 1:
self.data = np.ones(data_or_dims) * value
else:
self.data = data_or_dims
if resource_id is None:
self.resource_id = str(uuid4())
else:
self.resource_id = resource_id
if origin is None:
self.origin = np.zeros(len(self.data.shape))
else:
origin = np.array(origin)
if origin.shape[0] == len(self.data.shape):
self.origin = origin
else:
logger.error(f'origin shape should be {len(self.data.shape)}')
raise ValueError
if spacing is None:
self.spacing = np.ones(len(self.data.shape))
else:
spacing = np.array(spacing)
if spacing.shape[0] == len(self.data.shape):
self.spacing = spacing
else:
logger.error(f'spacing shape should be {len(self.data.shape)}')
raise ValueError
def __hash__(self):
return hash((tuple(self.data.ravel()), tuple(self.spacing),
tuple(self.shape), tuple(self.origin)))
def __eq__(self, other):
self.hash == other.hash
@property
def hash(self):
return self.__hash__()
@classmethod
def from_ods(cls, origin, dimensions, spacing, val=0):
"""
create a grid from origin, dimensions and spacing
:param origin: grid origin
:type origin: tuple
:param dimensions: grid dimension
:type dimensions: tuple
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
"""
data = np.ones(tuple(dimensions)) * val
cls.grid = cls.__init__(data, spacing=spacing, origin=origin)
@classmethod
def from_ocs(cls, origin, corner, spacing, val=0):
"""
create a grid from origin, corner and spacing
:param origin: grid origin (e.g., lower left corner for 2D grid)
:type origin: tuple or list or numpy.array
:param corner: grid upper (e.g., upper right corner for 2D grid)
:type corner: tuple or list or numpy.array
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
:param buf: buffer around the grid in fraction of grid size
"""
origin2 = origin
corner2 = corner
gshape = tuple([int(np.ceil((c - o) / spacing))
for o, c in zip(origin2, corner2)])
data = np.ones(gshape) * val
cls.__init__(data, spacing=spacing, origin=origin)
cls.fill_homogeneous(val)
return cls
@classmethod
def from_ocd(cls, origin, corner, dimensions, val=0):
"""
create a grid from origin, corner and dimensions
:param origin: grid origin (e.g., lower left corner for 2D grid)
:param corner: grid upper (e.g., upper right corner for 2D grid)
:param dimensions: grid dimensions
:param val: constant value with which to fill the grid
:return:
"""
data = np.ones(dimensions) * val
spacing = (corner - origin) / (dimensions - 1)
cls.__init__(data, spacing, spacing=spacing, origin=origin)
return cls
def __repr__(self):
repr_str = """
spacing: %s
origin : %s
shape : %s
""" % (self.spacing, self.origin, self.shape)
return repr_str
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return np.all((self.shape == other.shape) &
(self.spacing == other.spacing) &
np.all(self.origin == other.origin))
def __mul__(self, other):
if isinstance(other, Grid):
if self.check_compatibility(self, other):
mul_data = self.data * other.data
return Grid(mul_data, spacing=self.spacing,
origin=self.origin)
else:
raise ValueError
else:
raise TypeError
def __abs__(self):
return np.abs(self.data)
def transform_to(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
coords = (values - self.origin) / self.spacing
return coords
def transform_to_grid(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_to(values)
def transform_from(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return values * self.spacing + self.origin
def transform_from_grid(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_from(values)
def check_compatibility(self, other):
"""
check if two grids are compatible, i.e., have the same shape, spacing
and origin
"""
return (np.all(self.shape == other.shape) and
np.all(self.spacing == other.spacing) and
np.all(self.origin == other.origin))
def __get_shape__(self):
"""
return the shape of the object
"""
return self.data.shape
shape = property(__get_shape__)
def copy(self):
"""
copy the object using copy.deepcopy
"""
import copy
cp = copy.deepcopy(self)
return cp
def in_grid(self, point):
"""
Check if a point is inside the grid
:param point: the point to check in absolute coordinate (model)
:type point: tuple, list or numpy array
:returns: True if point is inside the grid
:rtype: bool
"""
corner1 = self.origin
corner2 = self.origin + self.spacing * np.array(self.shape)
return np.all((point >= corner1) & (point <= corner2))
def fill_homogeneous(self, value):
"""
fill the data with a constant value
:param value: the value with which to fill the array
"""
self.data.fill(value)
def generate_points(self, pt_spacing=None):
"""
Generate points within the grid
"""
# if pt_spacing is None:
ev_spacing = self.spacing
dimensions = np.array(self.shape) * self.spacing / ev_spacing
xe = np.arange(0, dimensions[0]) * ev_spacing + self.origin[0]
ye = np.arange(0, dimensions[1]) * ev_spacing + self.origin[1]
ze = np.arange(0, dimensions[2]) * ev_spacing + self.origin[2]
Xe, Ye, Ze = np.meshgrid(xe, ye, ze)
Xe = Xe.reshape(np.prod(Xe.shape))
Ye = Ye.reshape(np.prod(Ye.shape))
Ze = Ze.reshape(np.prod(Ze.shape))
return Xe, Ye, Ze
def generate_random_points_in_grid(self, n_points=1,
grid_space=False):
"""
Generate a random set of points within the grid
:param n_points: number of points to generate (default=1)
:type n_points: int
:param grid_space: whether the output is expressed in
grid coordinates (True) or model coordinates (False)
(default: False)
:type grid_space: bool
:return: an array of triplet
"""
points = np.random.rand(n_points, len(self.data.shape))
for i in range(n_points):
points[i] = points[i] * self.dimensions
if not grid_space:
return self.transform_from_grid(points)
return points
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
logger.error('format %s is not currently supported for Grid '
'objects' % format)
return
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
'uquake.plugin.grid.%s'
% format_ep.name, 'writeFormat')
write_format(self, filename, **kwargs)
def interpolate(self, coord, grid_space=True, mode='nearest',
order=1, **kwargs):
"""
This function interpolate the values at a given point expressed
either in grid or absolute coordinates
:param coord: Coordinate of the point(s) at which to interpolate
either in grid or absolute coordinates
:type coord: list, tuple, numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be float) as opposed to model space
:param mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'},
optional
The `mode` parameter determines how the input array is extended
beyond its boundaries. Default is 'constant'. Behavior for each valid
value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.
:param order: int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
:type order: int
:type grid_space: bool
:rtype: numpy.array
"""
coord = np.array(coord)
if not grid_space:
coord = self.transform_to(coord)
if len(coord.shape) < 2:
coord = coord[:, np.newaxis]
try:
return map_coordinates(self.data, coord, mode=mode, order=order,
**kwargs)
except Exception as e:
# logger.warning(e)
# logger.info('transposing the coordinate array')
return map_coordinates(self.data, coord.T, mode=mode, order=order,
**kwargs)
def fill_from_z_gradient(self, vals, zvals):
data = self.data
origin = self.origin
zinds = [int(self.transform_to([origin[0], origin[1], z_])[2]) for z_
in zvals]
# print(zinds, origin)
data[:, :, zinds[0]:] = vals[0]
data[:, :, :zinds[-1]] = vals[-1]
for i in range(len(zinds) - 1):
# print(i)
fill = np.linspace(vals[i + 1], vals[i], zinds[i] - zinds[i + 1])
data[:, :, zinds[i + 1]:zinds[i]] = fill
def get_grid_point_coordinates(self, mesh_grid=True):
"""
"""
x = []
for i, (dimension, spacing) in \
enumerate(zip(self.data.shape, self.spacing)):
v = np.arange(0, dimension) * spacing + self.origin[0]
x.append(v)
if not mesh_grid:
return tuple(x)
if len(x) == 2:
return tuple(np.meshgrid(x[0], x[1]))
if len(x) == 3:
return tuple(np.meshgrid(x[0], x[1], x[2]))
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
Path(filename).parent.mkdirs(parent=True, exist_ok=True)
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'writeFormat')
return write_format(self, filename, **kwargs)
def plot_1D(self, x, y, z_resolution, grid_space=False,
inventory=None, reverse_y=True):
"""
:param x: x location
:param y: y location
:param z_resolution_m: z resolution in grid units
:param grid_space:
:return:
"""
if not grid_space:
x, y, z = self.transform_from([x, y, 0])
zs = np.arange(self.origin[2], self.corner[2], z_resolution)
coords = []
for z in zs:
coords.append(np.array([x, y, z]))
values = self.interpolate(coords, grid_space=grid_space)
plt.plot(values, zs)
if reverse_y:
plt.gca().invert_yaxis()
if (inventory):
z_stas = []
for network in inventory:
for station in network:
loc = station.loc
z_stas.append(loc[2])
plt.plot([np.mean(values)] * len(z_stas), z_stas, 'kv')
plt.plot()
plt.plot()
plt.show()
@property
def ndim(self):
return self.data.ndim
@property
def shape(self):
return list(self.data.shape)
@property
def dims(self):
return self.shape
@property
def dimensions(self):
return self.shape
@property
def corner(self):
return np.array(self.origin) + np.array(self.shape) * \
np.array(self.spacing)
def angles(travel_time_grid):
"""
This function calculate the take off angle and azimuth for every grid point
given a travel time grid calculated using an Eikonal solver
:param travel_time_grid: travel_time grid
:type travel_time_grid: ~uquake.core.grid.Grid.
:rparam: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
gds_tmp = np.gradient(travel_time_grid.data)
gds = [-gd for gd in gds_tmp]
tmp = np.arctan2(gds[0], gds[1]) # azimuth is zero northwards
azimuth = travel_time_grid.copy()
azimuth.type = 'ANGLE'
azimuth.data = tmp
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
tmp = np.arctan2(hor, -gds[2])
# takeoff is zero pointing down
takeoff = travel_time_grid.copy()
takeoff.type = 'ANGLE'
takeoff.data = tmp
return azimuth, takeoff
def ray_tracer(travel_time_grid, start, grid_space=False, max_iter=1000,
arrival_id=None, earth_model_id=None,
network: str=None):
"""
This function calculates the ray between a starting point (start) and an
end point, which should be the seed of the travel_time grid, using the
gradient descent method.
:param travel_time_grid: a travel time grid
:type travel_time_grid: TTGrid
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param max_iter: maximum number of iteration
:param arrival_id: id of the arrival associated to the ray if
applicable
:type arrival_id: uquake.core.event.ResourceIdentifier
:param earth_model_id: velocity/earth model id.
:type earth_model_id: uquake.core.event.ResourceIdentifier
:param network: network information
:type network: str
:rtype: numpy.array
"""
from uquake.core.event import Ray
if grid_space:
start = np.array(start)
start = travel_time_grid.transform_from(start)
origin = travel_time_grid.origin
spacing = travel_time_grid.spacing
end = np.array(travel_time_grid.seed)
start = np.array(start)
# calculating the gradient in every dimension at every grid points
gds = [Grid(gd, origin=origin, spacing=spacing)
for gd in np.gradient(travel_time_grid.data)]
dist = np.linalg.norm(start - end)
cloc = start # initializing cloc "current location" to start
gamma = spacing / 2 # gamma is set to half the grid spacing. This
# should be
# sufficient. Note that gamma is fixed to reduce
# processing time.
nodes = [start]
iter_number = 0
while np.all(dist > spacing / 2):
if iter_number > max_iter:
break
if np.all(dist < spacing * 4):
gamma = np.min(spacing) / 4
gvect = np.array([gd.interpolate(cloc, grid_space=False,
order=1)[0] for gd in gds])
cloc = cloc - gamma * gvect / (np.linalg.norm(gvect) + 1e-8)
nodes.append(cloc)
dist = np.linalg.norm(cloc - end)
iter_number += 1
nodes.append(end)
tt = travel_time_grid.interpolate(start, grid_space=False, order=1)[0]
az = travel_time_grid.to_azimuth_point(start, grid_space=False,
order=1)
toa = travel_time_grid.to_takeoff_point(start, grid_space=False,
order=1)
ray = Ray(nodes=nodes, site_code=travel_time_grid.seed_label,
arrival_id=arrival_id, phase=travel_time_grid.phase,
azimuth=az, takeoff_angle=toa, travel_time=tt,
earth_model_id=earth_model_id, network=network)
return ray
| en | 0.68335 | # -*- coding: utf-8 -*- # ------------------------------------------------------------------ # Filename: <filename> # Purpose: <purpose> # Author: <author> # Email: <email> # # Copyright (C) <copyright> # -------------------------------------------------------------------- :copyright: <copyright> :license: GNU Lesser General Public License, Version 3 (http://www.gnu.org/copyleft/lesser.html) Object containing a regular grid can hold both 2 and 3 dimensional grid :param data_or_dims: either a numpy array or a tuple/list with the grid dimensions. If grid dimensions are specified, the grid is initialized with value :param spacing: Spacing :type spacing: typle :param origin: tuple, list or array containing the origin of the grid :type origin: tuple :param resource_id: unique identifier for the grid, if set to None, :param value: value to fill the grid should dims be specified :type value: uuid4 is used to define a unique identifier. :type uuid4: str create a grid from origin, dimensions and spacing :param origin: grid origin :type origin: tuple :param dimensions: grid dimension :type dimensions: tuple :param spacing: spacing between the grid nodes :type spacing: float :param val: constant value with which to fill the grid create a grid from origin, corner and spacing :param origin: grid origin (e.g., lower left corner for 2D grid) :type origin: tuple or list or numpy.array :param corner: grid upper (e.g., upper right corner for 2D grid) :type corner: tuple or list or numpy.array :param spacing: spacing between the grid nodes :type spacing: float :param val: constant value with which to fill the grid :param buf: buffer around the grid in fraction of grid size create a grid from origin, corner and dimensions :param origin: grid origin (e.g., lower left corner for 2D grid) :param corner: grid upper (e.g., upper right corner for 2D grid) :param dimensions: grid dimensions :param val: constant value with which to fill the grid :return: spacing: %s origin : %s shape : %s transform model space coordinates into grid space coordinates :param values: tuple of model space coordinates :type values: tuple :rtype: tuple transform model space coordinates into grid space coordinates :param values: tuple of model space coordinates :type values: tuple :rtype: tuple transform grid space coordinates into model space coordinates :param values: tuple of grid space coordinates :type values: tuple :rtype: tuple transform grid space coordinates into model space coordinates :param values: tuple of grid space coordinates :type values: tuple :rtype: tuple check if two grids are compatible, i.e., have the same shape, spacing and origin return the shape of the object copy the object using copy.deepcopy Check if a point is inside the grid :param point: the point to check in absolute coordinate (model) :type point: tuple, list or numpy array :returns: True if point is inside the grid :rtype: bool fill the data with a constant value :param value: the value with which to fill the array Generate points within the grid # if pt_spacing is None: Generate a random set of points within the grid :param n_points: number of points to generate (default=1) :type n_points: int :param grid_space: whether the output is expressed in grid coordinates (True) or model coordinates (False) (default: False) :type grid_space: bool :return: an array of triplet write the grid to disk :param filename: full path to the file to be written :type filename: str :param format: output file format :type format: str This function interpolate the values at a given point expressed either in grid or absolute coordinates :param coord: Coordinate of the point(s) at which to interpolate either in grid or absolute coordinates :type coord: list, tuple, numpy.array :param grid_space: true if the coordinates are expressed in grid space (indices can be float) as opposed to model space :param mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the input array is extended beyond its boundaries. Default is 'constant'. Behavior for each valid value is as follows: 'reflect' (`d c b a | a b c d | d c b a`) The input is extended by reflecting about the edge of the last pixel. 'constant' (`k k k k | a b c d | k k k k`) The input is extended by filling all values beyond the edge with the same constant value, defined by the `cval` parameter. 'nearest' (`a a a a | a b c d | d d d d`) The input is extended by replicating the last pixel. 'mirror' (`d c b | a b c d | c b a`) The input is extended by reflecting about the center of the last pixel. 'wrap' (`a b c d | a b c d | a b c d`) The input is extended by wrapping around to the opposite edge. :param order: int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. :type order: int :type grid_space: bool :rtype: numpy.array # logger.warning(e) # logger.info('transposing the coordinate array') # print(zinds, origin) # print(i) write the grid to disk :param filename: full path to the file to be written :type filename: str :param format: output file format :type format: str :param x: x location :param y: y location :param z_resolution_m: z resolution in grid units :param grid_space: :return: This function calculate the take off angle and azimuth for every grid point given a travel time grid calculated using an Eikonal solver :param travel_time_grid: travel_time grid :type travel_time_grid: ~uquake.core.grid.Grid. :rparam: azimuth and takeoff angles grids .. Note: The convention for the takeoff angle is that 0 degree is down. # azimuth is zero northwards # takeoff is zero pointing down This function calculates the ray between a starting point (start) and an end point, which should be the seed of the travel_time grid, using the gradient descent method. :param travel_time_grid: a travel time grid :type travel_time_grid: TTGrid :param start: the starting point (usually event location) :type start: tuple, list or numpy.array :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param max_iter: maximum number of iteration :param arrival_id: id of the arrival associated to the ray if applicable :type arrival_id: uquake.core.event.ResourceIdentifier :param earth_model_id: velocity/earth model id. :type earth_model_id: uquake.core.event.ResourceIdentifier :param network: network information :type network: str :rtype: numpy.array # calculating the gradient in every dimension at every grid points # initializing cloc "current location" to start # gamma is set to half the grid spacing. This # should be # sufficient. Note that gamma is fixed to reduce # processing time. | 2.101055 | 2 |
__init__.py | pjcigan/obsplanning | 1 | 6630905 | <gh_stars>1-10
name="obsplanning"
| name="obsplanning" | none | 1 | 1.138966 | 1 |
|
example.py | whackur/pySeed128 | 4 | 6630906 | <reponame>whackur/pySeed128<filename>example.py
from kisaSeed.kisaSeed import *
if __name__ == "__main__":
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum sit amet ultrices purus. Integer " \
"cursus sit amet diam sagittis porttitor. Praesent viverra, erat at tincidunt ornare, mauris arcu " \
"dignissim leo, faucibus dapibus est nisl ac neque. Etiam pulvinar sit amet nunc ac vulputate. Vestibulum " \
"accumsan interdum ante ac consectetur. Aliquam ut mattis arcu. Aliquam a arcu vel mauris hendrerit " \
"molestie. Phasellus rhoncus volutpat odio, eget mattis nisi maximus et. Suspendisse potenti. Aliquam " \
"convallis suscipit risus, eget finibus velit fermentum eu. Suspendisse hendrerit metus magna, " \
"id mattis leo interdum id. Donec bibendum arcu eget sem faucibus, non aliquam tortor tempor. Maecenas " \
"facilisis mauris a eros aliquet, non euismod mi ullamcorper. Maecenas lobortis sagittis urna sit amet " \
"viverra. Cras dignissim, libero at imperdiet elementum, magna nisi facilisis ante, eget sodales odio nibh " \
"sit amet sem. Proin nec ultrices nisl. Cras ut vestibulum ex. Nam vel ornare turpis. Sed metus lorem, " \
"semper a condimentum a, luctus nec sem."
iv = None
encode_text = None
decode_text = None
tag = None
tweak = None
nonce = None
key = generate_nonce(16)
seed = KisaSeed(key)
crypto_modes = [e for e in Modes]
padding_modes = [e for e in PaddingModes]
for crypto_mode in crypto_modes:
print("------------ start ------------", "\ncrypto_mode:", crypto_mode)
for padding_mode in padding_modes:
if(len(text) < 8 and padding_mode == PaddingModes.PKCS5):
continue
print("padding_mode:", padding_mode)
print("text:", text)
padding_text = seed.padding(padding_mode, str.encode(text))
print("padding_text:", padding_text)
if crypto_mode in [Modes.CBC, Modes.OFB, Modes.CFB, Modes.CFB8]:
iv = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, iv=iv)
if crypto_mode in [Modes.GCM]:
iv = generate_nonce(12)
additional_data = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, iv=iv, additional_data=additional_data)
if crypto_mode in [Modes.XTS]:
key = generate_nonce(32)
seed = KisaSeed(key)
tweak = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, tweak=tweak)
if crypto_mode in [Modes.CTR]:
nonce = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, nonce=nonce)
if crypto_mode in [Modes.ECB]:
encode_text, tag = seed.encode(crypto_mode, padding_text)
print("encode_text, tag:", encode_text, tag)
if crypto_mode in [Modes.CBC, Modes.OFB, Modes.CFB, Modes.CFB8]:
decode_text = seed.decode(crypto_mode, encode_text, iv=iv)
if crypto_mode in [Modes.GCM]:
decode_text = seed.decode(crypto_mode, encode_text, iv=iv, additional_data=additional_data, tag=tag)
if crypto_mode in [Modes.XTS]:
decode_text = seed.decode(crypto_mode, encode_text, tweak=tweak)
if crypto_mode in [Modes.CTR]:
decode_text = seed.decode(crypto_mode, encode_text, nonce=nonce)
if crypto_mode in [Modes.ECB]:
decode_text = seed.decode(crypto_mode, encode_text)
print("decode_text:", decode_text)
unpadding_text = seed.padding_flush(padding_mode, decode_text)
print("unpadding_text:", unpadding_text.decode('utf-8'))
print()
print("------------ e n d ------------\n")
| from kisaSeed.kisaSeed import *
if __name__ == "__main__":
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum sit amet ultrices purus. Integer " \
"cursus sit amet diam sagittis porttitor. Praesent viverra, erat at tincidunt ornare, mauris arcu " \
"dignissim leo, faucibus dapibus est nisl ac neque. Etiam pulvinar sit amet nunc ac vulputate. Vestibulum " \
"accumsan interdum ante ac consectetur. Aliquam ut mattis arcu. Aliquam a arcu vel mauris hendrerit " \
"molestie. Phasellus rhoncus volutpat odio, eget mattis nisi maximus et. Suspendisse potenti. Aliquam " \
"convallis suscipit risus, eget finibus velit fermentum eu. Suspendisse hendrerit metus magna, " \
"id mattis leo interdum id. Donec bibendum arcu eget sem faucibus, non aliquam tortor tempor. Maecenas " \
"facilisis mauris a eros aliquet, non euismod mi ullamcorper. Maecenas lobortis sagittis urna sit amet " \
"viverra. Cras dignissim, libero at imperdiet elementum, magna nisi facilisis ante, eget sodales odio nibh " \
"sit amet sem. Proin nec ultrices nisl. Cras ut vestibulum ex. Nam vel ornare turpis. Sed metus lorem, " \
"semper a condimentum a, luctus nec sem."
iv = None
encode_text = None
decode_text = None
tag = None
tweak = None
nonce = None
key = generate_nonce(16)
seed = KisaSeed(key)
crypto_modes = [e for e in Modes]
padding_modes = [e for e in PaddingModes]
for crypto_mode in crypto_modes:
print("------------ start ------------", "\ncrypto_mode:", crypto_mode)
for padding_mode in padding_modes:
if(len(text) < 8 and padding_mode == PaddingModes.PKCS5):
continue
print("padding_mode:", padding_mode)
print("text:", text)
padding_text = seed.padding(padding_mode, str.encode(text))
print("padding_text:", padding_text)
if crypto_mode in [Modes.CBC, Modes.OFB, Modes.CFB, Modes.CFB8]:
iv = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, iv=iv)
if crypto_mode in [Modes.GCM]:
iv = generate_nonce(12)
additional_data = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, iv=iv, additional_data=additional_data)
if crypto_mode in [Modes.XTS]:
key = generate_nonce(32)
seed = KisaSeed(key)
tweak = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, tweak=tweak)
if crypto_mode in [Modes.CTR]:
nonce = generate_nonce(16)
encode_text, tag = seed.encode(crypto_mode, padding_text, nonce=nonce)
if crypto_mode in [Modes.ECB]:
encode_text, tag = seed.encode(crypto_mode, padding_text)
print("encode_text, tag:", encode_text, tag)
if crypto_mode in [Modes.CBC, Modes.OFB, Modes.CFB, Modes.CFB8]:
decode_text = seed.decode(crypto_mode, encode_text, iv=iv)
if crypto_mode in [Modes.GCM]:
decode_text = seed.decode(crypto_mode, encode_text, iv=iv, additional_data=additional_data, tag=tag)
if crypto_mode in [Modes.XTS]:
decode_text = seed.decode(crypto_mode, encode_text, tweak=tweak)
if crypto_mode in [Modes.CTR]:
decode_text = seed.decode(crypto_mode, encode_text, nonce=nonce)
if crypto_mode in [Modes.ECB]:
decode_text = seed.decode(crypto_mode, encode_text)
print("decode_text:", decode_text)
unpadding_text = seed.padding_flush(padding_mode, decode_text)
print("unpadding_text:", unpadding_text.decode('utf-8'))
print()
print("------------ e n d ------------\n") | none | 1 | 1.741 | 2 |
|
mediawatcher/__main__.py | adrien-f/mediawatcher | 1 | 6630907 | <filename>mediawatcher/__main__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import yaml
from mediawatcher import MediaWatcher
def main():
parser = argparse.ArgumentParser(description='Watch directories and move media files accordingly.')
parser.add_argument('-c', dest='config', help='path to the config file in YAML', required=True)
args = parser.parse_args()
with open(args.config) as f:
config_yaml = yaml.load(f)
watcher = MediaWatcher({**MediaWatcher.default_config, **config_yaml})
watcher.startup()
watcher.watch()
if __name__ == '__main__':
main()
| <filename>mediawatcher/__main__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import yaml
from mediawatcher import MediaWatcher
def main():
parser = argparse.ArgumentParser(description='Watch directories and move media files accordingly.')
parser.add_argument('-c', dest='config', help='path to the config file in YAML', required=True)
args = parser.parse_args()
with open(args.config) as f:
config_yaml = yaml.load(f)
watcher = MediaWatcher({**MediaWatcher.default_config, **config_yaml})
watcher.startup()
watcher.watch()
if __name__ == '__main__':
main()
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.484346 | 2 |
amethyst/tools/build_clang_commands/__main__.py | knight-ryu12/StarFoxAdventures | 22 | 6630908 | #!/usr/bin/env python3
"""Asks `make` to output all commands it would run, and generates `compile_commands.json`
from them.
This file's purpose is to tell the VSCode clangd extension how to compile the files,
so that it can do error checking, completion, etc.
Note that the paths must be absolute, so you'll need to run this script yourself
if you want to use this feature.
"""
import subprocess
import json
import os
def main():
commands = []
cwd = os.getcwd()
# get make to output all the commands it would run
proc = subprocess.run(["make", "--always-make", "--dry-run"],
capture_output=True, check=True)
# look at each command
for line in proc.stdout.decode('utf-8').splitlines():
cmd = line.split(' ', maxsplit=1)[0]
if cmd.endswith(('-gcc', '-as')):
# if it's compiling or assembling a file, create an entry
cmd = line.split()
# quick hack to remove options clang doesn't support
line = line.replace('-fno-toplevel-reorder', '')
line = line.replace('-mrelocatable', '')
line = line.replace('-mbroadway', '')
line = line.replace('-mregnames', '')
line = line.replace('--fatal-warnings', '')
# and prevent warnings for GLOBAL_ASM
line = line.replace('-Wall ', '-Wall -Wno-unknown-pragmas ')
commands.append({
'directory': cwd,
'command': line,
'file': cmd[-1],
})
# output the entries
with open('compile_commands.json', 'wt') as file:
file.write(json.dumps(commands, sort_keys=True, indent=4))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""Asks `make` to output all commands it would run, and generates `compile_commands.json`
from them.
This file's purpose is to tell the VSCode clangd extension how to compile the files,
so that it can do error checking, completion, etc.
Note that the paths must be absolute, so you'll need to run this script yourself
if you want to use this feature.
"""
import subprocess
import json
import os
def main():
commands = []
cwd = os.getcwd()
# get make to output all the commands it would run
proc = subprocess.run(["make", "--always-make", "--dry-run"],
capture_output=True, check=True)
# look at each command
for line in proc.stdout.decode('utf-8').splitlines():
cmd = line.split(' ', maxsplit=1)[0]
if cmd.endswith(('-gcc', '-as')):
# if it's compiling or assembling a file, create an entry
cmd = line.split()
# quick hack to remove options clang doesn't support
line = line.replace('-fno-toplevel-reorder', '')
line = line.replace('-mrelocatable', '')
line = line.replace('-mbroadway', '')
line = line.replace('-mregnames', '')
line = line.replace('--fatal-warnings', '')
# and prevent warnings for GLOBAL_ASM
line = line.replace('-Wall ', '-Wall -Wno-unknown-pragmas ')
commands.append({
'directory': cwd,
'command': line,
'file': cmd[-1],
})
# output the entries
with open('compile_commands.json', 'wt') as file:
file.write(json.dumps(commands, sort_keys=True, indent=4))
if __name__ == "__main__":
main()
| en | 0.880428 | #!/usr/bin/env python3 Asks `make` to output all commands it would run, and generates `compile_commands.json` from them. This file's purpose is to tell the VSCode clangd extension how to compile the files, so that it can do error checking, completion, etc. Note that the paths must be absolute, so you'll need to run this script yourself if you want to use this feature. # get make to output all the commands it would run # look at each command # if it's compiling or assembling a file, create an entry # quick hack to remove options clang doesn't support # and prevent warnings for GLOBAL_ASM # output the entries | 2.62578 | 3 |
gmqtt/mqtt/connection.py | rkrell/gmqtt | 0 | 6630909 | import asyncio
import time
from .protocol import MQTTProtocol
class MQTTConnection(object):
def __init__(self, transport: asyncio.Transport, protocol: MQTTProtocol, clean_session: bool, keepalive: int):
self._transport = transport
self._protocol = protocol
self._protocol.set_connection(self)
self._buff = asyncio.Queue()
self._clean_session = clean_session
self._keepalive = keepalive
self._last_data_in = time.monotonic()
self._last_data_out = time.monotonic()
self._keep_connection_callback = asyncio.get_event_loop().call_later(self._keepalive, self._keep_connection)
@classmethod
async def create_connection(cls, host, port, ssl, clean_session, keepalive, loop=None):
loop = loop or asyncio.get_event_loop()
transport, protocol = await loop.create_connection(MQTTProtocol, host, port, ssl=ssl)
return MQTTConnection(transport, protocol, clean_session, keepalive)
def _keep_connection(self):
if self.is_closing():
return
if time.monotonic() - self._last_data_in >= self._keepalive:
self._send_ping_request()
self._keep_connection_callback = asyncio.get_event_loop().call_later(self._keepalive, self._keep_connection)
def put_package(self, pkg):
self._handler(*pkg)
def send_package(self, package):
# This is not blocking operation, because transport place the data
# to the buffer, and this buffer flushing async
if isinstance(package, (bytes, bytearray)):
package = package
else:
package = package.encode()
self._transport.write(package)
async def auth(self, client_id, username, password, will_message=None, **kwargs):
await self._protocol.send_auth_package(client_id, username, password, self._clean_session,
self._keepalive, will_message=will_message, **kwargs)
def publish(self, message):
return self._protocol.send_publish(message)
def send_disconnect(self, reason_code=0, **properties):
self._protocol.send_disconnect(reason_code=reason_code, **properties)
def subscribe(self, topic, qos, no_local, retain_as_published, retain_handling_options, **kwargs):
return self._protocol.send_subscribe_packet(
topic, qos, no_local=no_local, retain_as_published=retain_as_published,
retain_handling_options=retain_handling_options, **kwargs)
def unsubscribe(self, topic, **kwargs):
return self._protocol.send_unsubscribe_packet(topic, **kwargs)
def send_simple_command(self, cmd):
self._protocol.send_simple_command_packet(cmd)
def send_command_with_mid(self, cmd, mid, dup, reason_code=0):
self._protocol.send_command_with_mid(cmd, mid, dup, reason_code=reason_code)
def _send_ping_request(self):
self._protocol.send_ping_request()
def set_handler(self, handler):
self._handler = handler
async def close(self):
self._keep_connection_callback.cancel()
self._transport.close()
def is_closing(self):
return self._transport.is_closing()
| import asyncio
import time
from .protocol import MQTTProtocol
class MQTTConnection(object):
def __init__(self, transport: asyncio.Transport, protocol: MQTTProtocol, clean_session: bool, keepalive: int):
self._transport = transport
self._protocol = protocol
self._protocol.set_connection(self)
self._buff = asyncio.Queue()
self._clean_session = clean_session
self._keepalive = keepalive
self._last_data_in = time.monotonic()
self._last_data_out = time.monotonic()
self._keep_connection_callback = asyncio.get_event_loop().call_later(self._keepalive, self._keep_connection)
@classmethod
async def create_connection(cls, host, port, ssl, clean_session, keepalive, loop=None):
loop = loop or asyncio.get_event_loop()
transport, protocol = await loop.create_connection(MQTTProtocol, host, port, ssl=ssl)
return MQTTConnection(transport, protocol, clean_session, keepalive)
def _keep_connection(self):
if self.is_closing():
return
if time.monotonic() - self._last_data_in >= self._keepalive:
self._send_ping_request()
self._keep_connection_callback = asyncio.get_event_loop().call_later(self._keepalive, self._keep_connection)
def put_package(self, pkg):
self._handler(*pkg)
def send_package(self, package):
# This is not blocking operation, because transport place the data
# to the buffer, and this buffer flushing async
if isinstance(package, (bytes, bytearray)):
package = package
else:
package = package.encode()
self._transport.write(package)
async def auth(self, client_id, username, password, will_message=None, **kwargs):
await self._protocol.send_auth_package(client_id, username, password, self._clean_session,
self._keepalive, will_message=will_message, **kwargs)
def publish(self, message):
return self._protocol.send_publish(message)
def send_disconnect(self, reason_code=0, **properties):
self._protocol.send_disconnect(reason_code=reason_code, **properties)
def subscribe(self, topic, qos, no_local, retain_as_published, retain_handling_options, **kwargs):
return self._protocol.send_subscribe_packet(
topic, qos, no_local=no_local, retain_as_published=retain_as_published,
retain_handling_options=retain_handling_options, **kwargs)
def unsubscribe(self, topic, **kwargs):
return self._protocol.send_unsubscribe_packet(topic, **kwargs)
def send_simple_command(self, cmd):
self._protocol.send_simple_command_packet(cmd)
def send_command_with_mid(self, cmd, mid, dup, reason_code=0):
self._protocol.send_command_with_mid(cmd, mid, dup, reason_code=reason_code)
def _send_ping_request(self):
self._protocol.send_ping_request()
def set_handler(self, handler):
self._handler = handler
async def close(self):
self._keep_connection_callback.cancel()
self._transport.close()
def is_closing(self):
return self._transport.is_closing()
| en | 0.909834 | # This is not blocking operation, because transport place the data # to the buffer, and this buffer flushing async | 2.817048 | 3 |
piper/mayapy/rig/__init__.py | MongoWobbler/piper | 0 | 6630910 | # Copyright (c) 2021 <NAME>. All Rights Reserved.
import os
import time
import inspect
import pymel.core as pm
import piper_config as pcfg
import piper.core.util as pcu
import piper.mayapy.util as myu
import piper.mayapy.convert as convert
import piper.mayapy.mayamath as mayamath
import piper.mayapy.pipernode as pipernode
import piper.mayapy.attribute as attribute
import piper.mayapy.pipe.paths as paths
import piper.mayapy.ui.window as uiwindow
from . import bone
from . import xform
from . import space
from . import curve
from . import control
from . import switcher
def getRootControl(rig):
"""
Gets the root control associated with the given rig.
Args:
rig (pm.nodetypes.piperRig): Rig node to get root control of.
Returns:
(pm.nodetypes.DependNode): Root control of rig.
"""
return attribute.getDestinationNode(rig.attr(pcfg.message_root_control))
def getMeshes():
"""
Gets all the meshes inside all the piper skinned nodes in the scene.
Returns:
(set): Piper transforms that hold mesh shapes grouped under piper skinned nodes.
"""
nodes = pipernode.get('piperSkinnedMesh')
return {mesh.getParent() for skin in nodes for mesh in skin.getChildren(ad=True, type='mesh') if mesh.getParent()}
def getSkeletonNodes(rigs=None):
"""
Gets all the piperSkinnedMesh nodes that are a child of a piperRig node that start with the skeleton namespace.
Args:
rigs (list): rigs to find skeleton nodes of. If None given, will search for selected or scene rigs.
Returns:
(dictionary): piperSkinnedMesh nodes in rig(s) that start with skeleton namespace. Rig as value
"""
if not rigs:
rigs = pipernode.get('piperRig')
return {child: rig for rig in rigs for child in rig.getChildren(ad=True, type='piperSkinnedMesh') if
pcfg.skeleton_namespace in child.namespace()}
def getSkeletonMeshes(rigs=None):
"""
Gets all the transforms that are under the piperSkinnedMesh node that starts with the skeleton namespace.
Returns:
(dictionary): Transforms with mesh shape under piperSkinnedMesh node that starts with skeleton namespace.
"""
nodes = getSkeletonNodes(rigs=rigs)
return {mesh.getParent(): {'skinned_mesh': node, 'rig': rig} for node, rig in nodes.items()
for mesh in node.getChildren(ad=True, type='mesh')}
def setLockOnMeshes(lock):
"""
Locks or unlocks all the transforms under piper skinned nodes that have mesh shapes.
Args:
lock (int): Mode to set on meshes. 0 is unlocked, 1 is locked.
"""
meshes = getMeshes()
for mesh in meshes:
try:
mesh.overrideEnabled.set(1)
mesh.overrideDisplayType.set(lock)
except RuntimeError as error:
pm.warning('Can\'t set lock on mesh! ' + str(error))
def lockMeshes():
"""
Locks all the transforms under piper skinned nodes that have mesh shapes.
"""
setLockOnMeshes(2)
def unlockMeshes():
"""
Unlocks all the transforms under piper skinned nodes that have mesh shapes.
"""
setLockOnMeshes(0)
def zeroOut(controls=None):
"""
Zeroes out the given controls to their bind pose. Retains current space.
Args:
controls (list): Controls to zero out.
"""
if not controls:
controls = pm.selected()
if not controls:
controls = control.getAll()
current_space = None
for ctrl in controls:
has_spaces = space.exists(ctrl)
if has_spaces:
spaces = space.getAll(ctrl)
current_space = space.getCurrent(ctrl)
[ctrl.attr(space_attribute).set(0) for space_attribute in spaces]
mayamath.zeroOut(ctrl)
if has_spaces and current_space:
space.switch(ctrl, current_space)
class Rig(object):
"""
Example:
from piper.mayapy.rig import Rig
with Rig() as rig:
root_ctrl = rig.root()[1][0]
pelvis_ctrl = rig.FK('pelvis', name='Pelvis', parent=root_ctrl)[1][0]
butt_ctrl = rig.extra('pelvis', 'butt', scale=1.05, spaces=[pelvis_ctrl, root_ctrl])
_, mouth_ctrls, _ = rig.FK('mouth', 'lips', parent=pelvis_ctrl, name='Mouth')
[rig.FK(joint, parent=pelvis_ctrl, axis='z', name='Eyes') for joint in ['eye_l', 'eye_r']]
"""
def __init__(self, path='', rig=None, find=False, group=False, color=True, copy_controls=True):
"""
Houses all rig scripts.
Args:
path (string): Path to skeletal mesh to prepare to start rigging.
rig (pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced.
find (boolean): Will attempt to find piperRig node in scene if no rig or path is given.
group (boolean): If True, will automatically parent nodes into the groups and/or into rig node.
color (boolean): If True, will automatically color controls according to settings in piper_config.py
copy_controls (boolean): If True, will attempt to copy control shapes from existing rig on finish.
"""
self.start_time = time.time()
self.rig = rig
self.path = path
self.auto_group = group
self.auto_color = color
self.copy_controls = copy_controls
self.group_stack = {}
self.controls = {}
self.keep_colors = []
self.ik_controls = []
self.inner_controls = []
self.bendy_controls = []
self.inner_bendy_controls = []
self.root_control = None
self.body_base_control = None
self.namespace = pcfg.skeleton_namespace + ':'
if find and not rig:
rigs = pm.ls(type='piperRig')
if not rigs:
pm.error('No rigs found!')
elif len(rigs) > 1:
pm.warning('Found ' + str(len(rigs)) + ' rigs! Using ' + rigs[0].name())
self.rig = rigs[0]
else:
self.rig = rigs[0]
else:
self.prepare(path)
def __enter__(self):
"""
Context manager enter method.
Returns:
(piper.mayapy.rig.Rig): Class that holds all methods for rigging.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Context manager exit method.
"""
self.finish()
def prepare(self, path=''):
"""
Prepares the scene for a rig.
Returns:
(pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced.
"""
if not path:
path = pm.sceneName()
# getRelativeArt checks if scene is saved
self.path = path
skeleton_path = paths.getRelativeArt(path=path)
rig_name, _ = os.path.splitext(os.path.basename(skeleton_path))
rig_name = rig_name.split(pcfg.skinned_mesh_prefix)[-1]
# if scene is modified, ask user if they would like to save, not save, or cancel operation
if not uiwindow.save():
pm.error('Scene not saved.')
# open skeletal mesh to check for bone health
if path != pm.sceneName():
pm.openFile(path, force=True, esn=False, prompt=False)
# perform a bone health check before referencing to emphasize any possible errors
bone.health()
# create new file, reference the skeleton into the new file, create rig group
pm.newFile(force=True)
self.rig = pipernode.createRig(name=rig_name)
one_minus = pipernode.oneMinus(self.rig.highPolyVisibility)
pm.createReference(skeleton_path, namespace=pcfg.skeleton_namespace)
pm.createReference(skeleton_path, namespace=pcfg.bind_namespace)
skinned_nodes = pipernode.get('piperSkinnedMesh')
[node.visibility.set(False) for node in skinned_nodes if node.name().startswith(pcfg.bind_namespace)]
pm.parent(skinned_nodes, self.rig)
[one_minus.output >> mesh.visibility for mesh in getSkeletonMeshes()]
lockMeshes()
return self.rig
def validateTransform(self, transform, i='01'):
"""
Validates the joint by casting to a PyNode with namespace if it's not already a PyNode with namespace.
Args:
transform (string or PyNode): Transform to validate to make sure its a PyNode with namespace.
i (string): Digit format to incremental nodes to find with given i as the starting digit.
Returns:
(PyNode or list): Given transform as a PyNode.
"""
if not transform:
return transform
if isinstance(transform, pm.PyNode):
return transform
if not transform.startswith(self.namespace):
transform = self.namespace + transform
if transform.find('{}') >= 0:
return [pm.PyNode(node) for node in myu.getIncrementalNodes(transform, i)]
return pm.PyNode(transform)
def validateTransforms(self, transforms, i='01'):
"""
Convenience method for validating multiple transforms at once.
Args:
transforms (list): Transforms to validate to make sure they are PyNodes.
i (string): Digit format to incremental nodes to find with given i as the starting digit.
Returns:
(list): Transforms validated.
"""
nodes = []
for transform in transforms:
transform = self.validateTransform(transform, i=i)
nodes.extend(transform) if isinstance(transform, list) else nodes.append(transform)
return nodes
def addControls(self, controls, inner=None, name=''):
"""
Adds controls to the self.controls stack to be added into the controls set
Args:
controls (list): Control(s) to be added to controls set.
inner (list): Inner controls to be added to inner controls list.
name (string): Name of control set.
"""
self.controls[name] = self.controls.get(name) + controls if self.controls.get(name) else controls
if inner:
self.inner_controls.extend(inner)
if name == 'bendy':
self.inner_bendy_controls.extend(inner)
if name == 'bendy':
self.bendy_controls.extend(controls)
if name == 'IK':
self.ik_controls.append(controls[-1])
def addToGroupStack(self, parent, children):
"""
Adds the given children as the value to the given parent key to the group_stack dictionary.
Args:
parent (pm.nodetypes.Transform): Node to add as key that things will be parented to.
children (list): Nodes to parent to given parent.
"""
current = self.group_stack.get(parent)
self.group_stack[parent] = current + children if current else children
def findGroup(self, reference_transform, transforms):
"""
Finds the group the given transforms should be parented under based on given reference transform.
Args:
reference_transform (pm.nodetypes.Transform): Used to search parent hierarchy or group stack for parent.
transforms (list): Nodes to parent.
"""
found = False
group_parent = None
transform_parent = myu.getRootParent(reference_transform)
# try to find the reference transform's parent in the group stack to figure out where it should be parented to
for parent, children in self.group_stack.items():
if transform_parent in children:
group_parent = parent
break
# if found, add transform to the found parent
if group_parent:
self.addToGroupStack(group_parent, transforms)
found = True
# else get the first parent that is either a piperRig or is a group
else:
parent = myu.getFirstTypeOrEndsWithParent(reference_transform, 'piperRig', pcfg.group_suffix)
if parent:
self.addToGroupStack(parent, transforms)
found = True
if found and self.auto_group:
self.runGroupStack()
def runGroupStack(self):
"""
Parents all the given children to their corresponding parent key in the group stack dictionary.
"""
for parent, children in self.group_stack.items():
children = [myu.getRootParent(child) for child in children]
pm.parent(children, parent)
self.group_stack = {}
def runControlStack(self):
"""
Adds all the controls in self.controls to the control set node.
"""
pm.select(cl=True)
control_members = []
movable_members = []
control_set = control.getSet(pcfg.control_set)
inners_set = control.getSet(pcfg.inner_controls_set)
movable_set = control.getSet(pcfg.movable_controls_set)
iks_set = control.getSet(pcfg.ik_controls_set)
for name, controls in self.controls.items():
if not name:
control_members.extend(controls)
continue
module_set = control.getSet(name)
module_set.addMembers(controls)
control_members.append(module_set)
if self.body_base_control:
movable_members.append(self.body_base_control)
movable_members.append(iks_set)
control_members.append(inners_set)
control_members.append(movable_set)
iks_set.addMembers(self.ik_controls)
inners_set.addMembers(self.inner_controls + self.inner_bendy_controls)
movable_set.addMembers(movable_members)
control_set.addMembers(control_members)
self.controls = {}
self.inner_controls = []
self.ik_controls = []
def finish(self):
"""
Groups everything, creates the control set group, colorizes, copies control shapes, and displays time.
"""
if self.auto_color:
self.colorize()
self.runGroupStack()
self.runControlStack()
if self.copy_controls and self.path:
pm.select(cl=True)
rig_path = paths.getRigPath(self.path)
if rig_path:
control.replaceShapes(rig_path)
end_time = time.time()
total_time = round(end_time - self.start_time, 2)
pm.displayInfo(self.rig.name() + '\'s rig is finished. Time = ' + str(total_time) + ' seconds.')
def _color(self, controls, left_suffix, right_suffix, left_color, right_color, middle_color):
"""
Sets the colors of the given controls that end with the given prefixes the given left, right, and middle colors.
Args:
controls (list): Controls to set colors of.
left_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to left color.
right_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to right color.
left_color (string): Name of color for controls ending with left suffix.
right_color (string): Name of color for controls ending with right suffix.
middle_color (string): Name of color for controls NOT ending with either right OR left suffix.
"""
for ctrl in controls:
ctrl_name = ctrl.name()
if ctrl in self.keep_colors:
continue
elif ctrl_name.endswith(left_suffix):
curve.color(ctrl, left_color)
elif ctrl_name.endswith(right_suffix):
curve.color(ctrl, right_color)
else:
curve.color(ctrl, middle_color)
def colorize(self):
"""
Colors all the controls according to setting in piper_config.py
"""
controls = pcu.flatten(list(self.controls.values()))
left_control = pcfg.left_suffix + pcfg.control_suffix
left_banker = pcfg.left_suffix + pcfg.banker_suffix + pcfg.control_suffix
left_reverse = pcfg.left_suffix + pcfg.reverse_suffix + pcfg.control_suffix
right_control = pcfg.right_suffix + pcfg.control_suffix
right_banker = pcfg.right_suffix + pcfg.banker_suffix + pcfg.control_suffix
right_reverse = pcfg.right_suffix + pcfg.reverse_suffix + pcfg.control_suffix
left_suffixes = (left_control, left_banker, left_reverse)
right_suffixes = (right_control, right_banker, right_reverse)
self._color(controls, left_suffixes, right_suffixes,
pcfg.left_color, pcfg.right_color, pcfg.middle_color)
left_suffix = pcfg.left_suffix + pcfg.inner_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.inner_suffix + pcfg.control_suffix
self._color(self.inner_controls, left_suffix, right_suffix,
pcfg.left_inner_color, pcfg.right_inner_color, pcfg.middle_inner_color)
left_suffix = pcfg.left_suffix + pcfg.bendy_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.bendy_suffix + pcfg.control_suffix
self._color(self.bendy_controls, left_suffix, right_suffix,
pcfg.left_bendy_color, pcfg.right_bendy_color, pcfg.middle_bendy_color)
left_suffix = pcfg.left_suffix + pcfg.inner_suffix + pcfg.bendy_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.inner_suffix + pcfg.bendy_suffix + pcfg.control_suffix
self._color(self.inner_bendy_controls, left_suffix, right_suffix,
pcfg.left_inner_bendy_color, pcfg.right_inner_bendy_color, pcfg.middle_inner_bendy_color)
def organize(self, transforms, prefix=None, name=None):
"""
Organizes the given transforms into a group if name given and into the rig node.
Args:
transforms (Iterable): Nodes to group and/or move into rig node.
prefix (string): Prefix for group name. Usually calling function name.
name (string): Name to give group.
Returns:
(pm.nodetypes.Transform): Group node made.
"""
# preliminary checks, don't make group if no name given and there is no rig node
if (name is None or not transforms) or (not name and not self.rig):
return
group = None
parent_to_rig = transforms
if name:
prefix = prefix[:0] + prefix[0].capitalize() + prefix[1:]
group_name = prefix + '_' + name.capitalize().replace(' ', '_') + pcfg.group_suffix
if pm.objExists(group_name):
group = pm.PyNode(group_name)
else:
group = pm.group(name=group_name, empty=True)
attribute.lockAndHideCompound(group)
if group:
self.addToGroupStack(group, transforms)
parent_to_rig = [group]
if self.rig:
self.addToGroupStack(self.rig, parent_to_rig)
# drive visibility of groups through rig node
if group:
attribute_name = group.name() + pcfg.visibility_suffix
if not self.rig.hasAttr(attribute_name):
self.rig.addAttr(attribute_name, at='bool', dv=1, k=True)
self.rig.attr(attribute_name) >> group.visibility
group.setAttr('visibility', k=False, cb=False) # set hidden, still keyable even though k is False
if self.auto_group:
self.runGroupStack()
return group
def dynamicPivot(self, transform, target=None, shape=curve.square, axis=None, color='red', scale=1, size=None):
"""
Creates a dynamic pivot at the given transform driving the given target.
Args:
transform (pm.nodetypes.Transform): Transform to create dynamic pivot at.
target (pm.nodetypes.Transform): Transform to drive with dynamic pivot.
shape (method): Used to create curve or visual representation of FK control.
axis (string): Orientation for control made.
color (string): Color for control.
scale (float): Multiplied times size.
size (list): X, Y, Z sizes of control.
Returns:
(pm.nodetypes.Transform): Control created.
"""
if not target:
target = transform
pivot_ctrl, _ = control.create(transform,
shape=shape,
name=target.name() + pcfg.dynamic_pivot_suffix,
axis=axis,
color=color,
scale=scale,
parent=target,
matrix_offset=False,
size=size)
pivot_ctrl.translate >> target.rotatePivot
attribute.nonKeyableCompound(pivot_ctrl, ['r', 's'])
pivot_ctrl.addAttr(pcfg.dynamic_pivot_rest, dt='string', k=False, h=True, s=True)
pivot_ctrl.attr(pcfg.dynamic_pivot_rest).set(transform.name())
function_name = inspect.currentframe().f_code.co_name
self.organize([pivot_ctrl], prefix=function_name, name='')
self.addControls([pivot_ctrl], name=function_name)
return pivot_ctrl
@staticmethod
def _tagControllerParent(ctrl, parent, i, controls):
"""
Derives whether to tag the given ctrl with the parent, the parent's inner control, or the last in controls.
Args:
ctrl (pm.nodetypes.Transform or string): Transform that will receive parent to pick walk up to.
parent (pm.nodetypes.Transform): Parent that could drive ctrl's chain.
i (int): Iterator.
controls (list): Controls being added to chain.
"""
pick_walk_parent = controls[-1] if controls else None
if parent and i == 0:
inner_ctrl = parent.name().replace(pcfg.control_suffix, pcfg.inner_suffix + pcfg.control_suffix)
pick_walk_parent = pm.PyNode(inner_ctrl) if pm.objExists(inner_ctrl) else parent
if pick_walk_parent:
control.tagAsControllerParent(ctrl, pick_walk_parent)
@staticmethod
def _getAxis(i, transforms, last_axis, duplicates=None):
"""
Attempts to figure out the axis for the given iteration of the given transforms and/or duplicates.
Args:
i (int): Iteration count.
transforms (list): Transforms to use to get orient axis.
duplicates (list): Duplicates of transforms
Returns:
(string): Axis calculated from orientation of current iteration and next iteration.
"""
axis = last_axis
if not duplicates:
duplicates = transforms
if duplicates[i] != duplicates[-1]:
axis_vector = mayamath.getOrientAxis(duplicates[i], duplicates[i + 1])
axis = convert.axisToString(axis_vector)
# attempt to deduce axis if transform only has one child and axis is not given
elif len(transforms) == 1 and transforms[0].getChildren() and len(transforms[0].getChildren()) == 1:
axis_vector = mayamath.getOrientAxis(transforms[0], transforms[0].getChildren()[0])
axis = convert.axisToString(axis_vector)
return axis, axis
def root(self, transform=pcfg.root_joint_name, name=pcfg.root_joint_name):
"""
Creates a root control with a squash and stretch attribute.
Args:
transform (pm.nodetypes.Transform or string): Joint to create root control on.
name (string): Name to give group
Returns:
(list): Controls created in order from start to end.
"""
# create the root control as a regular FK
transform = self.validateTransform(transform)
controls = self.FK(transform, name=name)
self.root_control = controls[1][0]
# create a group above root control that will be scaled and squash and stretch attribute
name_prefix = name.lower() + '_scale'
root_scale = pm.group(self.root_control, name=name_prefix + pcfg.group_suffix)
attribute.addSeparator(self.root_control)
self.root_control.addAttr(pcfg.squash_stretch_attribute, k=True, dv=1, min=0.001)
self.root_control.addAttr(pcfg.squash_stretch_weight_attribute, k=True, dv=1, hsx=True, hsn=True, smn=0, smx=1)
attribute.nonKeyable(self.root_control.attr(pcfg.squash_stretch_weight_attribute))
# create blender
blender = pm.createNode('piperBlendAxis', name=name_prefix + '_BA')
self.root_control.attr(pcfg.squash_stretch_weight_attribute) >> blender.weight
blender.axis1.set(1, 1, 1)
blender.axis2.set(1, 1, 1)
blender.output >> root_scale.scale
# hook up squash and stretch
reciprocal = xform.squashStretch(self.root_control.attr(pcfg.squash_stretch_attribute), blender, 'a2')
transform.addAttr(pcfg.root_scale_up, k=True, dv=1)
transform.addAttr(pcfg.root_scale_sides, k=True, dv=1)
self.root_control.attr(pcfg.squash_stretch_attribute) >> transform.attr(pcfg.root_scale_up)
reciprocal.output >> transform.attr(pcfg.root_scale_sides)
# connect root and rig with message for easy look up
self.root_control.addAttr(pcfg.message_root_control, at='message')
self.rig.attr(pcfg.message_root_control) >> self.root_control.attr(pcfg.message_root_control)
return controls
def FK(self, start, end='', parent=None, axis=None, shape='', sizes=None, connect=True, global_ctrl='', name=''):
"""
Creates FK controls for the transform chain deduced by the start and end transforms.
Args:
start (pm.nodetypes.Transform or string): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Transform or string): End of the chain to be driven by FK controls.
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
axis (string): Only used if no end joint given for shape's axis to match rotations.
shape (method): Used to create curve or visual representation of FK control.
sizes (list): Sizes to use for each control.
connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all FK components.
Returns:
(list): Controls created in order from start to end.
"""
if not shape:
shape = curve.circle
if global_ctrl is '':
global_ctrl = self.root_control
controls = []
decomposes = []
multiplies = []
in_controls = []
calc_axis = 'y'
last_axis = axis
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
duplicates = xform.duplicateChain(transforms, prefix=pcfg.fk_prefix, color='green', scale=0.5)
for i, (transform, duplicate) in enumerate(zip(transforms, duplicates)):
dup_name = duplicate.name()
calc_axis, last_axis = [axis, axis] if axis else self._getAxis(i, transforms, last_axis, duplicates)
size = sizes[i] if sizes else control.calculateSize(transform)
ctrl_parent = parent if i == 0 else controls[i - 1]
ctrl = control.create(duplicate, pipernode.createFK, dup_name, calc_axis,
scale=1.2, control_shape=shape, size=size)
self._tagControllerParent(ctrl, parent, i, in_controls)
attribute.bindConnect(transform, ctrl, ctrl_parent) # connects attributes that offset controls
controls.append(ctrl)
xform.offsetConstraint(ctrl, duplicate, message=True)
in_ctrl = control.create(duplicate, name=dup_name + pcfg.inner_suffix, axis=calc_axis, shape=curve.plus,
size=size, parent=ctrl, color='burnt orange', inner=.125, matrix_offset=True)
decompose = xform.parentMatrixConstraint(in_ctrl, duplicate)
decomposes.append(decompose)
in_controls.append(in_ctrl)
transform_parent = None if transform.name() == pcfg.root_joint_name else transform.getParent()
bind_transform = convert.toBind(transform, return_node=True)
bind_transform.attr(pcfg.length_attribute) >> ctrl.initialLength
spaces = [transform_parent, ctrl_parent]
spaces = filter(lambda node: not isinstance(node, (pm.nodetypes.PiperSkinnedMesh, type(None))), spaces)
if spaces:
space.create(in_ctrl, spaces)
if connect:
xform.parentMatrixConstraint(duplicate, transform)
# used for scale calculation in FK control
ctrl.worldMatrix >> ctrl.scaleDriverMatrix
duplicate_parent = duplicate.getParent()
if duplicate_parent:
duplicate_parent.parentMatrix >> ctrl.scaleParentMatrix
duplicate_parent.translate >> ctrl.scaleTranslate
calc_axis = calc_axis.lstrip('n')
main_term = decomposes[-2].attr('outputScale' + calc_axis.upper())
inputs = [ctrl_parent.attr('s' + calc_axis), ctrl.outputScale]
if global_ctrl:
decompose = attribute.getDecomposeMatrix(global_ctrl.worldMatrix[0])
inputs.append(decompose.outputScaleY)
# connect all the stuff needed for volumetric scaling
multiply = pipernode.multiply(duplicate_parent, main_term, ctrl.volumetric, inputs)
multiplies.append(multiply)
# edge cases for scaling
if parent and len(transforms) > 1 and parent != global_ctrl:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[0])
parent.attr('s' + calc_axis) >> multiply_input
if len(transforms) > 2 and controls[1] != controls[-1]:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[1])
parent.attr('s' + calc_axis) >> multiply_input
if len(transforms) > 2:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[1])
controls[0].attr('s' + calc_axis) >> multiply_input
if start.name(stripNamespace=True) == pcfg.body_base_joint_name:
self.body_base_control = controls[0]
function_name = inspect.currentframe().f_code.co_name
self.organize(controls + [duplicates[0]], prefix=function_name, name=name)
self.addControls(controls, inner=in_controls, name=function_name)
return duplicates, controls, in_controls
def IK(self, start, end, parent=None, shape=curve.ring, sizes=None, connect=True, global_ctrl='', name=''):
"""
Creates IK controls and IK RP solver and for the given start and end joints.
Args:
start (pm.nodetypes.Joint or string): Start of the joint chain.
end (pm.nodetypes.Joint or string): End of the joint chain.
parent (pm.nodetypes.Transform): Parent of start control.
shape (method): Creates the shape control that will drive joints.
sizes (list): Sizes to use for each control.
connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all IK components.
Returns:
(list): Controls created in order from start to end.
"""
axis = None
mid_ctrl = None
start_ctrl = None
scale_buffer = None
controls = []
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
duplicates = xform.duplicateChain(transforms, prefix=pcfg.ik_prefix, color='purple', scale=0.5)
mid = pcu.getMedian(transforms)
mid_duplicate = pcu.getMedian(duplicates)
if global_ctrl is '':
global_ctrl = self.root_control
if mid == start or mid == end:
pm.error('Not enough joints given! {} is the mid joint?'.format(mid.name()))
for i, (transform, duplicate) in enumerate(zip(transforms, duplicates)):
dup_name = duplicate.name(stripNamespace=True)
size = sizes[i] if sizes else control.calculateSize(transform)
ctrl_parent = parent if i == 0 else None
if transform != transforms[-1]:
next_transform = transforms[i + 1]
axis_vector = mayamath.getOrientAxis(transform, next_transform)
axis = convert.axisToString(axis_vector)
# start
if transform == transforms[0]:
ctrl = control.create(duplicate, name=dup_name, axis=axis, shape=shape, size=size)
attribute.bindConnect(transform, ctrl, ctrl_parent)
start_ctrl = ctrl
# scale buffer transform
scale_buffer = pm.joint(n=dup_name + pcfg.scale_buffer_suffix)
scale_buffer.segmentScaleCompensate.set(False)
pm.matchTransform(scale_buffer, duplicate)
pm.parent(duplicate, scale_buffer)
# mid
elif transform == mid:
ctrl = control.create(duplicate, curve.orb, dup_name, axis, scale=0.1, matrix_offset=False, size=size)
translation, rotate, _, _ = xform.calculatePoleVector(start, mid, end)
pm.xform(ctrl, t=translation, ro=rotate)
mid_ctrl = ctrl
# end
elif transform == transforms[-1]:
ctrl = control.create(duplicate, pipernode.createIK, dup_name, axis, control_shape=shape, size=size)
attribute.bindConnect(transform, ctrl)
else:
# other unknown joint(s), left for possible future 3+ IK joint chains
ctrl = control.create(duplicate, name=dup_name, axis=axis, shape=shape, size=size)
if connect:
xform.parentMatrixConstraint(duplicate, transform)
self._tagControllerParent(ctrl, parent, i, controls)
controls.append(ctrl)
piper_ik = controls[-1]
nodes_to_organize = [controls[0], scale_buffer, piper_ik]
mid_bind = convert.toBind(mid, return_node=True)
bind_transform = convert.toBind(transforms[-1], return_node=True)
mid_bind.attr(pcfg.length_attribute) >> piper_ik.startInitialLength
bind_transform.attr(pcfg.length_attribute) >> piper_ik.endInitialLength
if axis.startswith('n'):
piper_ik.direction.set(-1)
axis = axis.lstrip('n')
# connect controls to joints, and make ik handle
decompose = xform.parentMatrixConstraint(start_ctrl, scale_buffer, t=True, r=False, s=True)
xform.parentMatrixConstraint(piper_ik, duplicates[-1], t=False)
ik_handle_name = duplicates[-1].name(stripNamespace=True) + '_handle'
ik_handle, _ = pm.ikHandle(sj=duplicates[0], ee=duplicates[-1], sol='ikRPsolver', n=ik_handle_name, pw=1, w=1)
ik_handle.visibility.set(False)
pm.parent(ik_handle, piper_ik)
mayamath.zeroOut(ik_handle)
ik_handle.translate >> piper_ik.handleTranslate
ik_handle.parentMatrix >> piper_ik.handleParentMatrix
# xform.poleVectorMatrixConstraint(ik_handle, mid_ctrl)
attribute.addSeparator(mid_ctrl)
mid_ctrl.addAttr('poleVectorWeight', k=True, dv=1, min=0, max=1)
constraint = pm.poleVectorConstraint(mid_ctrl, ik_handle)
mid_ctrl.poleVectorWeight >> constraint.attr(mid_ctrl.name() + 'W0')
# connect the rest
start_ctrl.attr('s' + axis) >> piper_ik.startControlScale
start_ctrl.worldMatrix >> piper_ik.startMatrix
mid_ctrl.worldMatrix >> piper_ik.poleVectorMatrix
piper_ik.startOutput >> mid_duplicate.attr('t' + axis)
piper_ik.endOutput >> duplicates[-1].attr('t' + axis)
piper_ik.twist >> ik_handle.twist
# scale ctrl connect
decompose_scale = decompose.attr('outputScale' + axis.upper())
pipernode.multiply(scale_buffer, decompose_scale, inputs=[piper_ik.startOutputScale])
pipernode.multiply(mid_duplicate, mid_ctrl.attr('s' + axis), inputs=[piper_ik.endOutputScale])
mid_ctrl.attr('s' + axis) >> piper_ik.poleControlScale
# parent pole vector to end control and create
pm.parent(mid_ctrl, piper_ik)
xform.toOffsetMatrix(mid_ctrl)
space.create(mid_ctrl, [start_ctrl])
attribute.lockAndHideCompound(mid_ctrl, ['r'])
# preferred angle connection
mid_bind = convert.toBind(mid, pm.warning)
if mid_bind:
mid_bind.preferredAngle >> piper_ik.preferredAngleInput
piper_ik.preferredAngleOutput >> mid_duplicate.preferredAngle
# must parent before creating spaces
if global_ctrl:
pm.parent(piper_ik, global_ctrl)
nodes_to_organize = [controls[0], scale_buffer]
# create spaces for piper ik
spaces = filter(None, [parent, global_ctrl])
space.create(piper_ik, spaces)
# global scale comes from parent's world matrix scale
if parent:
parent_decompose = pm.createNode('decomposeMatrix', n=parent.name(stripNamespace=True) + '_DM')
parent.worldMatrix >> parent_decompose.inputMatrix
parent_decompose.attr('outputScale' + axis.upper()) >> piper_ik.globalScale
if start.name(stripNamespace=True) == pcfg.body_base_joint_name:
self.body_base_control = controls[0]
function_name = inspect.currentframe().f_code.co_name
self.organize(nodes_to_organize, prefix=function_name, name=name)
self.addControls(controls, name=function_name)
return duplicates, controls, scale_buffer
def FKIK(self, start, end, parent=None, fk_shape='', ik_shape='', proxy=True, global_ctrl='', name=''):
"""
Creates a FK and IK controls that drive the chain from start to end.
Args:
start (pm.nodetypes.Joint or string): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Joint or string): End of the chain to be driven by FK controls.
If none given, will only drive start
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
fk_shape (method): Used to create curve or visual representation for the FK controls.
ik_shape (method): Used to create curve or visual representation for the IK controls.
proxy (boolean): If True, adds a proxy FK_IK attribute to all controls.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all FKIK components.
Returns:
(list): Two lists, FK and IK controls created in order from start to end respectively.
"""
if not fk_shape:
fk_shape = curve.circle
if not ik_shape:
ik_shape = curve.ring
if global_ctrl is '':
global_ctrl = self.root_control
# create joint chains that is the same as the given start and end chain for FK and IK then create controls
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
sizes = [control.calculateSize(transform) for transform in transforms]
fk_transforms, fk_ctrls, in_ctrls = self.FK(start, end, parent, '', fk_shape, sizes, False, global_ctrl, None)
ik_transforms, ik_ctrls, buffer = self.IK(start, end, parent, ik_shape, sizes, False, global_ctrl, None)
controls = fk_ctrls + in_ctrls + ik_ctrls
# create the switcher control and add the transforms, fk, and iks to its attribute to store it
switcher_control = switcher.create(end, end.name(stripNamespace=True))
switcher_attribute = switcher_control.attr(pcfg.fk_ik_attribute)
switcher.addData(switcher_control.attr(pcfg.switcher_transforms), transforms, names=True)
switcher.addData(switcher_control.attr(pcfg.switcher_fk), fk_ctrls + in_ctrls, names=True)
switcher.addData(switcher_control.attr(pcfg.switcher_ik), ik_ctrls, names=True)
controls.insert(0, switcher_control)
# one minus the output of the fk ik attribute in order to drive visibility of ik/fk controls
one_minus = pipernode.oneMinus(source=switcher_attribute)
[one_minus.output >> fk.lodVisibility for fk in fk_ctrls + in_ctrls]
[switcher_attribute >> ik.lodVisibility for ik in ik_ctrls]
# use spaces to drive original chain with fk and ik transforms and hook up switcher attributes
for og_transform, fk_transform, ik_transform in zip(transforms, fk_transforms, ik_transforms):
world_space, fk_space, ik_space = space.create(og_transform, [fk_transform, ik_transform], direct=True)
og_transform.attr(fk_space).set(1)
switcher_attribute >> og_transform.attr(ik_space)
results = fk_transforms, ik_transforms, controls
function_name = inspect.currentframe().f_code.co_name
nodes_to_organize = [fk_transforms[0], buffer] + fk_ctrls + [ik_ctrls[0], switcher_control]
self.organize(nodes_to_organize, prefix=function_name, name=name)
self.addControls([switcher_control], name=function_name)
if not proxy:
return results
# make proxy fk ik attribute on all the controls
for ctrl in controls[1:]: # start on index 1 since switcher is on index 0
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
# make IK control drive switcher visibility
ik_ctrls[-1].addAttr(pcfg.switcher_visibility, at='bool', dv=0, k=True)
switcher_visibility = ik_ctrls[-1].attr(pcfg.switcher_visibility)
switcher_visibility >> switcher_control.lodVisibility
attribute.nonKeyable(switcher_visibility)
return results
def extra(self, transform, name, parent=None, shape=curve.circle, axis='y', color='salmon', scale=1.0, spaces=None):
"""
Creates extra control that doesn't drive the transform, but rather should be used with spaces and act as parent.
Args:
transform (pm.nodetypes.Transform or string): Transform to create control on.
name (string): Name to append to given transform name.
parent (pm.nodetypes.Transform): Transform to parent the control created onto.
shape (method): Creates the control curve.
axis (string): Orientation for control.
color (string): Color of curve.
scale (float): Scale to multiply by joint radius.
spaces (iterator or None): A bunch of pm.nodetypes.Transform(s) that will drive the given transform.
Returns:
(pm.nodetypes.Transform): Control created.
"""
# allows for global scaling to work, otherwise parent under something that gets globally scaled
# or fix so that global scale gets multiplied onto created control if no parent given
if not parent:
parent = self.root_control
transform = self.validateTransform(transform)
name = transform.name(stripNamespace=True) + '_' + name
ctrl = control.create(transform, shape, name, axis, color, scale, parent=parent)
spaces = space.create(ctrl, spaces)
space.switch(ctrl, spaces[-1], r=False, o=False, s=False)
self.addControls([ctrl])
if not parent:
self.organize([ctrl], prefix=inspect.currentframe().f_code.co_name, name='')
# don't auto colorize if color is given
if color:
self.keep_colors.append(ctrl)
return ctrl, spaces
def twist(self, joint, driver, target, axis=None, blended=True, weight=0.5, global_ctrl='', name=''):
"""
Creates the twist control that mimics twist of given target based on given weight.
Args:
joint (pm.nodetypes.Transform or string): Joint to create FK control with twist attributes on.
driver (pm.nodetypes.Transform or string): The "parent" for the given joint.
target(pm.nodetypes.Transform or string): Used to mimic twist.
axis (string or None): Axis to mimic twist of.
blended (boolean): If True, will blend translate of joint between given driver and target.
weight (float): Amount of twist joint will mimic from given target.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all twist components.
Returns:
(list): Duplicate joint(s) as first index, control(s) as second index, and inner control(s) as third index.
"""
# get distance variables before making FK controls.
joint, driver, target = self.validateTransforms([joint, driver, target])
distance_percentage = 1
if driver != target:
total_distance = mayamath.getDistance(driver, target)
joint_distance = mayamath.getDistance(driver, joint)
distance_percentage = joint_distance / total_distance
# derive axis from driver and target
if not axis:
axis = mayamath.getOrientAxis(driver, joint)
axis = convert.axisToString(axis, absolute=True)
# create FK control
parent = None if blended else driver
duplicates, controls, in_ctrl = self.FK(joint, parent=parent, axis=axis, global_ctrl=global_ctrl, name=name)
ctrl = controls[0]
attribute.addSeparator(ctrl)
if blended:
# name and create blend matrix
driver_name = driver.name(stripNamespace=True)
target_name = target.name(stripNamespace=True)
blend_name = driver_name + '_To_' + target_name + pcfg.twist_blend_suffix
blend_matrix = pm.createNode('blendMatrix', n=blend_name)
# connect blend matrix and set default values
driver.worldMatrix >> blend_matrix.inputMatrix
target.worldMatrix >> blend_matrix.target[0].targetMatrix
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
blend_matrix.target[0].useRotate.set(False)
blend_matrix.target[0].useScale.set(False)
blend_matrix.target[0].useShear.set(False)
# create attribute on control to drive the distance weight
ctrl.addAttr(pcfg.twist_blend_weight_attribute, k=True, dv=1, hsx=True, hsn=True, smn=-1, smx=1)
ctrl.attr(pcfg.twist_blend_weight_attribute) >> blend_matrix.target[0].weight
ctrl.attr(pcfg.twist_blend_weight_attribute).set(distance_percentage)
# create twist node and add twist attribute on control
twist_node = pipernode.createSwingTwist(target, ctrl, axis=axis, twist=weight)
ctrl.addAttr(pcfg.twist_weight_attribute, k=True, dv=weight, hsx=True, hsn=True, smn=-1, smx=1)
ctrl.attr(pcfg.twist_weight_attribute) >> twist_node.twist
return duplicates, controls, in_ctrl
def bendy(self, joints, ctrl_parent=None, shape=curve.sun, i='01', name=''):
"""
Creates controls for each given joint that will be used as part of a nurbs surface to drive given joints.
Args:
joints (list): Joints to create controls for. Must have at least three joints!
ctrl_parent (pm.nodetypes.DependNode): Node to tag as control parent for pick-walking.
shape (method): Used to create curve or visual representation of bendy control.
i (string): Format and first digit to search for middle element in given joints.
name (str or None): Name to give group that will house all bendy components.
Returns:
(list): Controls of bendy chain.
"""
joint_length = len(joints)
if joint_length < 3:
pm.error('Not enough joints!')
locators = []
controls = []
inner_controls = []
control_existed = []
controls_to_organize = []
joints = self.validateTransforms(joints, i=i)
start_joint = joints[0]
end_joint = joints[-1]
end_bind_joint = convert.toBind(end_joint)
prefix = joints[1].name(stripNamespace=True)
start_position = pm.xform(start_joint, q=True, ws=True, t=True)
end_position = pm.xform(end_joint, q=True, ws=True, t=True)
start_position = convert.toVector(start_position)
end_position = convert.toVector(end_position)
surface_position = (start_position + end_position) / 2
surface_rotation = pm.xform(start_joint, q=True, ws=True, ro=True)
surface_length = mayamath.getDistance(start_position, end_position)
surface_direction = mayamath.getDirection(start_position, end_position)
axis = mayamath.getOrientAxis(start_joint, end_joint)
axis_label = convert.axisToString(axis)
absolute_axis_label = axis_label.lstrip('n')
up_axis_label = convert.axisToTriAxis(axis)[-1]
up_axis = convert.axisToVector(up_axis_label)
up_axis = convert.toVector(up_axis)
# if axis is negative, rotate nurbs surface 180 degrees so that UVs end up going down the correct direction
if convert.axisToString(axis).startswith('n'):
axis_index = convert.axisToIndex(up_axis_label)
surface_rotation[axis_index] = surface_rotation[axis_index] + 180
up_position = start_position + (surface_direction * (surface_length / 2)) + (up_axis * (surface_length / 2))
up_locator = pm.spaceLocator(n=prefix + '_up_locator')
up_locator.t.set(up_position)
up_locator.visibility.set(False)
xform.offsetConstraint(start_joint, up_locator, offset=True)
surface_name = prefix + '_surface'
surface, nurbs_plane = pm.nurbsPlane(ax=up_axis, lr=surface_length, v=joint_length - 1, ch=True, n=surface_name)
pm.rebuildSurface(surface, rpo=True, ch=True, end=1, kr=0, kc=False, su=1, du=1, sv=joint_length - 1, dir=0)
surface_shape = surface.getShape()
surface.t.set(surface_position)
surface.r.set(surface_rotation)
original_shape = xform.getOrigShape(surface)
uv_pin = pm.createNode('uvPin', name=prefix + '_uvPin')
original_shape.local >> uv_pin.originalGeometry
surface.worldSpace >> uv_pin.deformedGeometry
for i, joint in enumerate(joints):
joint_name = joint.name(stripNamespace=True)
dividend = convert.toBind(joint).attr(pcfg.length_attribute)
divisor = end_bind_joint.attr(pcfg.length_attribute)
decimal_distance = pipernode.divide(dividend, divisor).output
ctrl_name = joint_name + pcfg.bendy_suffix
ctrl_exists = pm.objExists(ctrl_name + pcfg.control_suffix)
control_existed.append(ctrl_exists)
if ctrl_exists:
ctrl = pm.PyNode(ctrl_name + pcfg.control_suffix)
controls.append(ctrl)
continue
else:
ctrl = control.create(joint, shape, ctrl_name, axis_label, joint=True, scale=0.9, inner=.9)
pm.setAttr(ctrl.radius, cb=False)
controls.append(ctrl)
controls_to_organize.append(ctrl)
if joint == joints[0]:
xform.offsetConstraint(joint, ctrl)
elif joint == joints[-1]:
blend_matrix = pm.createNode('blendMatrix', n=joint_name + pcfg.blend_matrix_suffix)
start_joint.worldMatrix >> blend_matrix.inputMatrix
end_joint.worldMatrix >> blend_matrix.target[0].targetMatrix
blend_matrix.target[0].useRotate.set(0)
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
else:
si = str(i)
locator = pm.spaceLocator(n=joint_name + pcfg.bendy_locator_suffix)
uv_pin.attr('outputMatrix[{}]'.format(si)) >> locator.offsetParentMatrix
uv_pin.attr('coordinate[{}].coordinateU'.format(si)).set(0.5)
decimal_distance >> uv_pin.attr('coordinate[{}].coordinateV'.format(si))
pm.select(cl=True)
xform.parentMatrixConstraint(locator, joint, offset=True)
blend_matrix = pm.createNode('blendMatrix', n=joint_name + pcfg.blend_matrix_suffix)
start_joint.worldMatrix >> blend_matrix.inputMatrix
end_joint.worldMatrix >> blend_matrix.target[0].targetMatrix
decimal_distance >> blend_matrix.target[0].weight
blend_matrix.target[0].useRotate.set(0)
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
# inner control
inner_name = joint_name + pcfg.inner_suffix + pcfg.bendy_suffix
size = control.calculateSize(joint)
in_ctrl = control.create(locator, curve.plus, inner_name, axis_label,
scale=.85, parent=ctrl, size=size, inner=.02)
inner_controls.append(in_ctrl)
# global and control scale hooked up to locator that drives joint
joint_parent = joint.getParent()
decompose_matrix = attribute.getDecomposeMatrix(joint_parent.worldMatrix)
parent_scale = decompose_matrix.attr('outputScale' + absolute_axis_label.upper())
ctrl_scale = ctrl.attr('s' + absolute_axis_label)
in_ctrl_scale = in_ctrl.attr('s' + absolute_axis_label)
piper_mult = pipernode.multiply(locator, inputs=[parent_scale, ctrl_scale, in_ctrl_scale])
# multiplying inner control's translate by scale to compensate for any parent scaling
scale_mult = pm.createNode('multiplyDivide', n=ctrl_name + pcfg.bendy_locator_suffix + 'scaleMultiply')
piper_mult.output >> scale_mult.input1
in_ctrl.t >> scale_mult.input2
scale_mult.output >> locator.t
in_ctrl.r >> locator.r
locator.visibility.set(False)
locators.append(locator)
if i != 0:
ctrl_parent = controls[i - 1]
pm.reorder(ctrl, r=(i - 1) * -1)
if not control_existed[i - 1]:
xform.aimConstraint(ctrl, up_locator, controls[i - 1])
if ctrl_parent:
control.tagAsControllerParent(ctrl, ctrl_parent)
pm.select(controls, surface)
skin = pm.skinCluster(tsb=True, bm=0, sm=0, nw=1, wd=0, mi=4, omi=True)
# paint the skin weights so that they are set to 1
uvs = {pm.PyNode(joint): {'position': convert.toVector(joint), 'cvs': []} for joint in controls}
u_amount = surface_shape.numCVsInU()
v_amount = surface_shape.numCVsInV()
for u in range(u_amount):
for v in range(v_amount):
closest_joint = None
closest_distance = 0
cv_position = surface_shape.getCV(u, v, space='world')
for i, joint in enumerate(uvs):
distance = mayamath.getDistance(cv_position, uvs[joint]['position'])
if i == 0 or distance < closest_distance:
closest_distance = distance
closest_joint = joint
uvs[closest_joint]['cvs'].append(surface.name() + '.cv[{}][{}]'.format(str(u), str(v)))
for joint in uvs:
pm.select(uvs[joint]['cvs'])
pm.skinPercent(skin, nrm=True, tv=(joint, 1))
surface.visibility.set(False)
pm.select(cl=True)
function_name = inspect.currentframe().f_code.co_name
self.organize(controls_to_organize + locators + [surface, up_locator], prefix=function_name, name=name)
self.addControls(controls_to_organize, inner=inner_controls, name=function_name)
return controls
def banker(self, joint, ik_control, pivot_track=None, side='', use_track_shape=True):
"""
Creates a reverse foot control that changes pivot based on curve shape and control rotation input.
Useful for banking.
Args:
joint (pm.nodetypes.Joint): Joint that will be driven by the reverse module and IK handle.
ik_control (pm.nodetypes.Transform): Control that drives the IK Handle.
pivot_track (pm.nodetypes.Transform): NurbsCurve shape as child that will act as the track for the pivot.
side (string or None): Side to generate cross section
use_track_shape (boolean): If True, will use the pivot track shape as the control shape
Returns:
(pm.nodetypes.Transform): Control that moves the reverse foot pivot.
"""
joint_name = joint.name()
axis = mayamath.getOrientAxis(joint.getParent(), joint)
axes = convert.axisToTriAxis(axis)
if not side:
side = pcu.getSide(joint_name)
# get the IK handle and validate there is only one
ik_handle = list(set(ik_control.connections(skipConversionNodes=True, type='ikHandle')))
if len(ik_handle) != 1:
pm.error('Needed only ONE ik_handle. {} found.'.format(str(len(ik_handle))))
ik_handle = ik_handle[0]
# create a pivot track (cross section curve) if no pivot track (curve) is given
if not pivot_track:
# if IK joint given, get the name of the regular joint by stripping the ik prefix
if joint_name.startswith(pcfg.ik_prefix):
stripped_name = pcu.removePrefixes(joint_name, pcfg.ik_prefix)
namespace_name = pcfg.skeleton_namespace + ':' + stripped_name
search_joint = pm.PyNode(stripped_name) if pm.objExists(stripped_name) else pm.PyNode(namespace_name)
else:
search_joint = joint
# tries to get the meshes influenced by the skin cluster connected to the joint
skins = search_joint.future(type='skinCluster')
meshes = {mesh for skin in skins for mesh in pm.skinCluster(skin, q=True, g=True)} if skins else None
# create the pivot track curve
pm.select(cl=True)
pivot_track = curve.originCrossSection(meshes, side=side, name=joint_name + '_pivotTrack')
# validate that only one is made
if len(pivot_track) != 1:
text = 'Needed only ONE curve! {} curves made for the side: {}'.format(str(len(pivot_track)), str(side))
pm.error(text)
pivot_track = pivot_track[0]
# create the pivot and the normalized pivot, move the norm pivot to joint and then to floor
pivot = pm.group(em=True, name=joint_name + '_Pivot')
normalized_pivot = pm.group(em=True, name=joint_name + '_normalPivot')
pm.matchTransform(normalized_pivot, joint, pos=True, rot=False, scale=False)
normalized_pivot.ty.set(0)
xform.toOffsetMatrix(normalized_pivot)
# figure out control size, create control, lock and hide axis, translate, and scale
if ik_control.hasAttr(pcfg.proxy_fk_ik):
switcher_control = switcher.get(ik_control)
transforms = switcher.getData(switcher_control.attr(pcfg.switcher_transforms), cast=True)
size = control.calculateSize(transforms[-1])
else:
size = None
if use_track_shape:
ctrl = pm.duplicate(pivot_track, n=joint_name + pcfg.banker_suffix + pcfg.control_suffix)[0]
curve.color(ctrl, 'burnt orange')
else:
ctrl = control.create(joint, shape=curve.plus, name=joint_name + pcfg.banker_suffix, axis=axes[0],
color='burnt orange', matrix_offset=True, size=size, inner=.125, outer=1.25)
attribute.lockAndHide(ctrl.attr('r' + axes[0]))
attribute.lockAndHideCompound(ctrl, ['t', 's'])
# node to add small number
small_add = pm.createNode('plusMinusAverage', n=joint_name + '_plusSmallNumber')
small_add.input1D[0].set(0.001)
normalize_node = pm.createNode('vectorProduct', n=joint_name + '_pivotNormal')
normalize_node.operation.set(0)
normalize_node.normalizeOutput.set(True)
# adding a small amount to avoid division by zero
ctrl.attr('r' + axes[1]) >> small_add.input1D[1]
small_add.output1D >> normalize_node.attr('input1' + axes[2].upper())
# need to multiply the rotation by -1
negative_mult = pm.createNode('multDoubleLinear', n=joint_name + '_negative')
ctrl.attr('r' + axes[2]) >> negative_mult.input1
negative_mult.input2.set(-1)
normalize_input_attribute = negative_mult.output
normalize_input_attribute >> normalize_node.attr('input1' + axes[1].upper())
normalize_node.output >> normalized_pivot.translate
# creating the normalized (circle) version of the cross section
positions = []
duplicate_curve = pm.duplicate(pivot_track)[0]
pm.move(0, 0, 0, duplicate_curve, rpr=True)
cvs = duplicate_curve.numCVs()
for cv in range(0, cvs):
position = duplicate_curve.cv[cv].getPosition(space='world')
position.normalize()
positions.append(position)
# delete the duplicate and finally make the normalize track. Make sure to close the curve and center pivots
pm.delete(duplicate_curve)
normalized_track = pm.curve(d=1, p=positions, k=range(len(positions)), ws=True, n=joint_name + '_normalTrack')
normalized_track = pm.closeCurve(normalized_track, replaceOriginal=True)[0]
pm.xform(normalized_track, centerPivots=True)
# move normalized track to joint, then to floor, and freeze transforms
pm.matchTransform(normalized_track, joint, pos=True, rot=False, scale=False)
normalized_track.ty.set(0)
myu.freezeTransformations(normalized_track)
decomposed_matrix = pm.createNode('decomposeMatrix', n=normalize_node + '_decompose')
normalized_pivot.worldMatrix >> decomposed_matrix.inputMatrix
nearest_point = pm.createNode('nearestPointOnCurve', n=joint_name + '_nearestPoint')
decomposed_matrix.outputTranslate >> nearest_point.inPosition
normalized_track.getShape().worldSpace >> nearest_point.inputCurve
curve_info = pm.createNode('pointOnCurveInfo', n=joint_name + '_curveInfo')
nearest_point.parameter >> curve_info.parameter
pivot_track.getShape().worldSpace >> curve_info.inputCurve
reverse_group = pm.group(em=True, n=joint_name + '_reverse_grp')
xform.parentMatrixConstraint(ik_control, reverse_group, offset=True)
pm.parent([pivot, ctrl], reverse_group)
# curve_info position is where the pivot goes! Connect something to it if you want to visualize it
ctrl.r >> pivot.r
curve_info.result.position >> pivot.rotatePivot
# connect ik handle by letting the pivot drive it
pm.parent(ik_handle, pivot)
# make the pivot drive the joint's rotations
joint.r.disconnect()
xform.parentMatrixConstraint(pivot, joint, t=False, r=True, s=False, offset=True)
# clean up by hiding curves
pivot_track.visibility.set(False)
normalized_track.visibility.set(False)
ik_control.addAttr(pcfg.banker_attribute, dt='string', k=False, h=True, s=True)
ik_control.attr(pcfg.banker_attribute).set(ctrl.name())
# hook up pivot control with fk_ik attribute if ik has an fk-ik proxy
if ik_control.hasAttr(pcfg.proxy_fk_ik):
switcher_control = switcher.get(ik_control)
switcher_attribute = switcher_control.attr(pcfg.fk_ik_attribute)
switcher_attribute >> ctrl.lodVisibility
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
control.tagAsControllerParent(ctrl, ik_control)
nodes_to_organize = [reverse_group, normalized_pivot, normalized_track, pivot_track]
self.findGroup(joint, nodes_to_organize)
self.addControls([ctrl], name=inspect.currentframe().f_code.co_name)
return ctrl
def reverse(self, driver, target, driven_negate=None, transform=None, switcher_ctrl=None, shape=None, axis=None):
"""
Creates a control that offsets the given target through rotation (usually foot roll reverse rig).
Args:
driver (pm.nodetypes.Transform): The transform that drives the whole chain. Usually the IK handle.
target (pm.nodetypes.Transform): Transform that will have control rotations added to. Usually end joint.
driven_negate (pm.nodetypes.Transform): Transform that will have control rotations subtracted from.
Usually any controls further down the chain/hierarchy of the given target.
transform (pm.nodetypes.Transform): Transform for making the control. Useful for figuring out control size.
If None given, will try to use given driven_negate, if no driven_negate, will try to use given target.
switcher_ctrl (pm.nodetypes.Transform): Transform that handles switching between FK and IK chains.
shape (method): Creates the shape control that will drive reverse rig system.
axis (string): Direction control will be facing when created.
Returns:
(pm.nodetypes.Transform): Control created.
"""
if not transform:
transform = driven_negate if driven_negate else target
if not shape:
shape = curve.square
# attempt to deduce axis if transform only has one child and axis is not given
transform = self.validateTransform(transform)
if not axis and transform.getChildren() and len(transform.getChildren()) == 1:
axis_vector = mayamath.getOrientAxis(transform, transform.getChildren()[0])
axis = convert.axisToString(axis_vector)
axis = convert.axisToTriAxis(axis)[1]
# create control
name = transform.name(stripNamespace=True) + pcfg.reverse_suffix
driver_parent = driver.getParent()
ctrl = control.create(transform, shape, name, axis, 'burnt orange', 0.5, True, parent=driver_parent)
self.addControls([ctrl], name=inspect.currentframe().f_code.co_name)
name = ctrl.name(stripNamespace=True)
pm.parent(driver, ctrl)
attribute.lockAndHideCompound(ctrl, ['t', 's'])
target_source = target.rotate.connections(scn=True, plugs=True, destination=False)
# add control's rotation to whatever is connected to target's rotate.
if target_source:
target_source = target_source[0]
plus = pm.createNode('plusMinusAverage', n='_'.join([target.name(), 'plus', name]))
target_source >> plus.input3D[0]
ctrl.rotate >> plus.input3D[1]
plus.output3D >> target.rotate
else:
ctrl.rotate >> target.rotate
# if no driven negate given or driven negate is not being offset by the offsetParentMatrix, we are finished here
if not driven_negate or not driven_negate.offsetParentMatrix.connections(scn=True, plugs=True, d=False):
return ctrl
# decompose and compose matrices to get rotation value subtracted with control's rotation
source_matrix = driven_negate.offsetParentMatrix.connections(scn=True, plugs=True, destination=False)[0]
source_name = source_matrix.node().name()
decomp_matrix = pm.createNode('decomposeMatrix', n=source_name + '_DM')
compose_matrix = pm.createNode('composeMatrix', n=source_name + '_CM')
source_matrix >> decomp_matrix.inputMatrix
decomp_matrix.outputTranslate >> compose_matrix.inputTranslate
decomp_matrix.outputScale >> compose_matrix.inputScale
minus = pm.createNode('plusMinusAverage', n='_'.join([source_name, 'minus', name]))
minus.operation.set(2)
decomp_matrix.outputRotate >> minus.input3D[0]
ctrl.rotate >> minus.input3D[1]
minus.output3D >> compose_matrix.inputRotate
compose_matrix.outputMatrix >> driven_negate.offsetParentMatrix
attribute.addReverseMessage(ctrl, driven_negate)
if not switcher_ctrl:
return ctrl
# add reverse control to switcher data and connect ik visibility onto reverse control
switcher.addData(switcher_ctrl.attr(pcfg.switcher_reverses), [name])
switcher_attribute = switcher_ctrl.attr(pcfg.fk_ik_attribute)
switcher_attribute >> ctrl.lodVisibility
# add proxy fk_ik attribute to ctrl
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
# only make driven_negate by affected if IK is set to True
blend = pm.createNode('blendMatrix', n=source_name + '_negateBlend')
source_matrix >> blend.inputMatrix
compose_matrix.outputMatrix >> blend.target[0].targetMatrix
switcher_attribute >> blend.target[0].weight
blend.outputMatrix >> driven_negate.offsetParentMatrix
return ctrl
def humanLeg(self, start, end, ball, side='', parent=None, global_ctrl='', name=''):
"""
Convenience method for rigging a leg. FKIK chain, with banker, and reverse controls.
Args:
start (pm.nodetypes.Joint): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Joint): End of the chain to be driven by FK controls. If none given, will only drive start
ball (pm.nodetypes.Transform): Transform that will be driven by FK chain and reversed.
side (string): Side to create banker control on.
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all IK components.
Returns:
(list): Nodes created.
"""
fk_transforms, ik_transforms, ctrls = self.FKIK(start, end, parent=parent, global_ctrl=global_ctrl, name=name)
banker = self.banker(ik_transforms[-1], ctrls[-1], side=side)
ball_joint, ball_control, ball_inner = self.FK(ball, name=name)
xform.offsetConstraint(end, ball_control[0], offset=True)
ik_handle = ctrls[-1].connections(skipConversionNodes=True, type='ikHandle')[0]
reverse_ctrl = self.reverse(ik_handle, ik_transforms[-1], ball_control[0], ball, ctrls[0])
control.tagAsControllerParent(reverse_ctrl, banker)
return [fk_transforms, ik_transforms, ctrls], [ball_joint, ball_control, ball_inner], [banker, reverse_ctrl]
| # Copyright (c) 2021 <NAME>. All Rights Reserved.
import os
import time
import inspect
import pymel.core as pm
import piper_config as pcfg
import piper.core.util as pcu
import piper.mayapy.util as myu
import piper.mayapy.convert as convert
import piper.mayapy.mayamath as mayamath
import piper.mayapy.pipernode as pipernode
import piper.mayapy.attribute as attribute
import piper.mayapy.pipe.paths as paths
import piper.mayapy.ui.window as uiwindow
from . import bone
from . import xform
from . import space
from . import curve
from . import control
from . import switcher
def getRootControl(rig):
"""
Gets the root control associated with the given rig.
Args:
rig (pm.nodetypes.piperRig): Rig node to get root control of.
Returns:
(pm.nodetypes.DependNode): Root control of rig.
"""
return attribute.getDestinationNode(rig.attr(pcfg.message_root_control))
def getMeshes():
"""
Gets all the meshes inside all the piper skinned nodes in the scene.
Returns:
(set): Piper transforms that hold mesh shapes grouped under piper skinned nodes.
"""
nodes = pipernode.get('piperSkinnedMesh')
return {mesh.getParent() for skin in nodes for mesh in skin.getChildren(ad=True, type='mesh') if mesh.getParent()}
def getSkeletonNodes(rigs=None):
"""
Gets all the piperSkinnedMesh nodes that are a child of a piperRig node that start with the skeleton namespace.
Args:
rigs (list): rigs to find skeleton nodes of. If None given, will search for selected or scene rigs.
Returns:
(dictionary): piperSkinnedMesh nodes in rig(s) that start with skeleton namespace. Rig as value
"""
if not rigs:
rigs = pipernode.get('piperRig')
return {child: rig for rig in rigs for child in rig.getChildren(ad=True, type='piperSkinnedMesh') if
pcfg.skeleton_namespace in child.namespace()}
def getSkeletonMeshes(rigs=None):
"""
Gets all the transforms that are under the piperSkinnedMesh node that starts with the skeleton namespace.
Returns:
(dictionary): Transforms with mesh shape under piperSkinnedMesh node that starts with skeleton namespace.
"""
nodes = getSkeletonNodes(rigs=rigs)
return {mesh.getParent(): {'skinned_mesh': node, 'rig': rig} for node, rig in nodes.items()
for mesh in node.getChildren(ad=True, type='mesh')}
def setLockOnMeshes(lock):
"""
Locks or unlocks all the transforms under piper skinned nodes that have mesh shapes.
Args:
lock (int): Mode to set on meshes. 0 is unlocked, 1 is locked.
"""
meshes = getMeshes()
for mesh in meshes:
try:
mesh.overrideEnabled.set(1)
mesh.overrideDisplayType.set(lock)
except RuntimeError as error:
pm.warning('Can\'t set lock on mesh! ' + str(error))
def lockMeshes():
"""
Locks all the transforms under piper skinned nodes that have mesh shapes.
"""
setLockOnMeshes(2)
def unlockMeshes():
"""
Unlocks all the transforms under piper skinned nodes that have mesh shapes.
"""
setLockOnMeshes(0)
def zeroOut(controls=None):
"""
Zeroes out the given controls to their bind pose. Retains current space.
Args:
controls (list): Controls to zero out.
"""
if not controls:
controls = pm.selected()
if not controls:
controls = control.getAll()
current_space = None
for ctrl in controls:
has_spaces = space.exists(ctrl)
if has_spaces:
spaces = space.getAll(ctrl)
current_space = space.getCurrent(ctrl)
[ctrl.attr(space_attribute).set(0) for space_attribute in spaces]
mayamath.zeroOut(ctrl)
if has_spaces and current_space:
space.switch(ctrl, current_space)
class Rig(object):
"""
Example:
from piper.mayapy.rig import Rig
with Rig() as rig:
root_ctrl = rig.root()[1][0]
pelvis_ctrl = rig.FK('pelvis', name='Pelvis', parent=root_ctrl)[1][0]
butt_ctrl = rig.extra('pelvis', 'butt', scale=1.05, spaces=[pelvis_ctrl, root_ctrl])
_, mouth_ctrls, _ = rig.FK('mouth', 'lips', parent=pelvis_ctrl, name='Mouth')
[rig.FK(joint, parent=pelvis_ctrl, axis='z', name='Eyes') for joint in ['eye_l', 'eye_r']]
"""
def __init__(self, path='', rig=None, find=False, group=False, color=True, copy_controls=True):
"""
Houses all rig scripts.
Args:
path (string): Path to skeletal mesh to prepare to start rigging.
rig (pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced.
find (boolean): Will attempt to find piperRig node in scene if no rig or path is given.
group (boolean): If True, will automatically parent nodes into the groups and/or into rig node.
color (boolean): If True, will automatically color controls according to settings in piper_config.py
copy_controls (boolean): If True, will attempt to copy control shapes from existing rig on finish.
"""
self.start_time = time.time()
self.rig = rig
self.path = path
self.auto_group = group
self.auto_color = color
self.copy_controls = copy_controls
self.group_stack = {}
self.controls = {}
self.keep_colors = []
self.ik_controls = []
self.inner_controls = []
self.bendy_controls = []
self.inner_bendy_controls = []
self.root_control = None
self.body_base_control = None
self.namespace = pcfg.skeleton_namespace + ':'
if find and not rig:
rigs = pm.ls(type='piperRig')
if not rigs:
pm.error('No rigs found!')
elif len(rigs) > 1:
pm.warning('Found ' + str(len(rigs)) + ' rigs! Using ' + rigs[0].name())
self.rig = rigs[0]
else:
self.rig = rigs[0]
else:
self.prepare(path)
def __enter__(self):
"""
Context manager enter method.
Returns:
(piper.mayapy.rig.Rig): Class that holds all methods for rigging.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Context manager exit method.
"""
self.finish()
def prepare(self, path=''):
"""
Prepares the scene for a rig.
Returns:
(pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced.
"""
if not path:
path = pm.sceneName()
# getRelativeArt checks if scene is saved
self.path = path
skeleton_path = paths.getRelativeArt(path=path)
rig_name, _ = os.path.splitext(os.path.basename(skeleton_path))
rig_name = rig_name.split(pcfg.skinned_mesh_prefix)[-1]
# if scene is modified, ask user if they would like to save, not save, or cancel operation
if not uiwindow.save():
pm.error('Scene not saved.')
# open skeletal mesh to check for bone health
if path != pm.sceneName():
pm.openFile(path, force=True, esn=False, prompt=False)
# perform a bone health check before referencing to emphasize any possible errors
bone.health()
# create new file, reference the skeleton into the new file, create rig group
pm.newFile(force=True)
self.rig = pipernode.createRig(name=rig_name)
one_minus = pipernode.oneMinus(self.rig.highPolyVisibility)
pm.createReference(skeleton_path, namespace=pcfg.skeleton_namespace)
pm.createReference(skeleton_path, namespace=pcfg.bind_namespace)
skinned_nodes = pipernode.get('piperSkinnedMesh')
[node.visibility.set(False) for node in skinned_nodes if node.name().startswith(pcfg.bind_namespace)]
pm.parent(skinned_nodes, self.rig)
[one_minus.output >> mesh.visibility for mesh in getSkeletonMeshes()]
lockMeshes()
return self.rig
def validateTransform(self, transform, i='01'):
"""
Validates the joint by casting to a PyNode with namespace if it's not already a PyNode with namespace.
Args:
transform (string or PyNode): Transform to validate to make sure its a PyNode with namespace.
i (string): Digit format to incremental nodes to find with given i as the starting digit.
Returns:
(PyNode or list): Given transform as a PyNode.
"""
if not transform:
return transform
if isinstance(transform, pm.PyNode):
return transform
if not transform.startswith(self.namespace):
transform = self.namespace + transform
if transform.find('{}') >= 0:
return [pm.PyNode(node) for node in myu.getIncrementalNodes(transform, i)]
return pm.PyNode(transform)
def validateTransforms(self, transforms, i='01'):
"""
Convenience method for validating multiple transforms at once.
Args:
transforms (list): Transforms to validate to make sure they are PyNodes.
i (string): Digit format to incremental nodes to find with given i as the starting digit.
Returns:
(list): Transforms validated.
"""
nodes = []
for transform in transforms:
transform = self.validateTransform(transform, i=i)
nodes.extend(transform) if isinstance(transform, list) else nodes.append(transform)
return nodes
def addControls(self, controls, inner=None, name=''):
"""
Adds controls to the self.controls stack to be added into the controls set
Args:
controls (list): Control(s) to be added to controls set.
inner (list): Inner controls to be added to inner controls list.
name (string): Name of control set.
"""
self.controls[name] = self.controls.get(name) + controls if self.controls.get(name) else controls
if inner:
self.inner_controls.extend(inner)
if name == 'bendy':
self.inner_bendy_controls.extend(inner)
if name == 'bendy':
self.bendy_controls.extend(controls)
if name == 'IK':
self.ik_controls.append(controls[-1])
def addToGroupStack(self, parent, children):
"""
Adds the given children as the value to the given parent key to the group_stack dictionary.
Args:
parent (pm.nodetypes.Transform): Node to add as key that things will be parented to.
children (list): Nodes to parent to given parent.
"""
current = self.group_stack.get(parent)
self.group_stack[parent] = current + children if current else children
def findGroup(self, reference_transform, transforms):
"""
Finds the group the given transforms should be parented under based on given reference transform.
Args:
reference_transform (pm.nodetypes.Transform): Used to search parent hierarchy or group stack for parent.
transforms (list): Nodes to parent.
"""
found = False
group_parent = None
transform_parent = myu.getRootParent(reference_transform)
# try to find the reference transform's parent in the group stack to figure out where it should be parented to
for parent, children in self.group_stack.items():
if transform_parent in children:
group_parent = parent
break
# if found, add transform to the found parent
if group_parent:
self.addToGroupStack(group_parent, transforms)
found = True
# else get the first parent that is either a piperRig or is a group
else:
parent = myu.getFirstTypeOrEndsWithParent(reference_transform, 'piperRig', pcfg.group_suffix)
if parent:
self.addToGroupStack(parent, transforms)
found = True
if found and self.auto_group:
self.runGroupStack()
def runGroupStack(self):
"""
Parents all the given children to their corresponding parent key in the group stack dictionary.
"""
for parent, children in self.group_stack.items():
children = [myu.getRootParent(child) for child in children]
pm.parent(children, parent)
self.group_stack = {}
def runControlStack(self):
"""
Adds all the controls in self.controls to the control set node.
"""
pm.select(cl=True)
control_members = []
movable_members = []
control_set = control.getSet(pcfg.control_set)
inners_set = control.getSet(pcfg.inner_controls_set)
movable_set = control.getSet(pcfg.movable_controls_set)
iks_set = control.getSet(pcfg.ik_controls_set)
for name, controls in self.controls.items():
if not name:
control_members.extend(controls)
continue
module_set = control.getSet(name)
module_set.addMembers(controls)
control_members.append(module_set)
if self.body_base_control:
movable_members.append(self.body_base_control)
movable_members.append(iks_set)
control_members.append(inners_set)
control_members.append(movable_set)
iks_set.addMembers(self.ik_controls)
inners_set.addMembers(self.inner_controls + self.inner_bendy_controls)
movable_set.addMembers(movable_members)
control_set.addMembers(control_members)
self.controls = {}
self.inner_controls = []
self.ik_controls = []
def finish(self):
"""
Groups everything, creates the control set group, colorizes, copies control shapes, and displays time.
"""
if self.auto_color:
self.colorize()
self.runGroupStack()
self.runControlStack()
if self.copy_controls and self.path:
pm.select(cl=True)
rig_path = paths.getRigPath(self.path)
if rig_path:
control.replaceShapes(rig_path)
end_time = time.time()
total_time = round(end_time - self.start_time, 2)
pm.displayInfo(self.rig.name() + '\'s rig is finished. Time = ' + str(total_time) + ' seconds.')
def _color(self, controls, left_suffix, right_suffix, left_color, right_color, middle_color):
"""
Sets the colors of the given controls that end with the given prefixes the given left, right, and middle colors.
Args:
controls (list): Controls to set colors of.
left_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to left color.
right_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to right color.
left_color (string): Name of color for controls ending with left suffix.
right_color (string): Name of color for controls ending with right suffix.
middle_color (string): Name of color for controls NOT ending with either right OR left suffix.
"""
for ctrl in controls:
ctrl_name = ctrl.name()
if ctrl in self.keep_colors:
continue
elif ctrl_name.endswith(left_suffix):
curve.color(ctrl, left_color)
elif ctrl_name.endswith(right_suffix):
curve.color(ctrl, right_color)
else:
curve.color(ctrl, middle_color)
def colorize(self):
"""
Colors all the controls according to setting in piper_config.py
"""
controls = pcu.flatten(list(self.controls.values()))
left_control = pcfg.left_suffix + pcfg.control_suffix
left_banker = pcfg.left_suffix + pcfg.banker_suffix + pcfg.control_suffix
left_reverse = pcfg.left_suffix + pcfg.reverse_suffix + pcfg.control_suffix
right_control = pcfg.right_suffix + pcfg.control_suffix
right_banker = pcfg.right_suffix + pcfg.banker_suffix + pcfg.control_suffix
right_reverse = pcfg.right_suffix + pcfg.reverse_suffix + pcfg.control_suffix
left_suffixes = (left_control, left_banker, left_reverse)
right_suffixes = (right_control, right_banker, right_reverse)
self._color(controls, left_suffixes, right_suffixes,
pcfg.left_color, pcfg.right_color, pcfg.middle_color)
left_suffix = pcfg.left_suffix + pcfg.inner_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.inner_suffix + pcfg.control_suffix
self._color(self.inner_controls, left_suffix, right_suffix,
pcfg.left_inner_color, pcfg.right_inner_color, pcfg.middle_inner_color)
left_suffix = pcfg.left_suffix + pcfg.bendy_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.bendy_suffix + pcfg.control_suffix
self._color(self.bendy_controls, left_suffix, right_suffix,
pcfg.left_bendy_color, pcfg.right_bendy_color, pcfg.middle_bendy_color)
left_suffix = pcfg.left_suffix + pcfg.inner_suffix + pcfg.bendy_suffix + pcfg.control_suffix
right_suffix = pcfg.right_suffix + pcfg.inner_suffix + pcfg.bendy_suffix + pcfg.control_suffix
self._color(self.inner_bendy_controls, left_suffix, right_suffix,
pcfg.left_inner_bendy_color, pcfg.right_inner_bendy_color, pcfg.middle_inner_bendy_color)
def organize(self, transforms, prefix=None, name=None):
"""
Organizes the given transforms into a group if name given and into the rig node.
Args:
transforms (Iterable): Nodes to group and/or move into rig node.
prefix (string): Prefix for group name. Usually calling function name.
name (string): Name to give group.
Returns:
(pm.nodetypes.Transform): Group node made.
"""
# preliminary checks, don't make group if no name given and there is no rig node
if (name is None or not transforms) or (not name and not self.rig):
return
group = None
parent_to_rig = transforms
if name:
prefix = prefix[:0] + prefix[0].capitalize() + prefix[1:]
group_name = prefix + '_' + name.capitalize().replace(' ', '_') + pcfg.group_suffix
if pm.objExists(group_name):
group = pm.PyNode(group_name)
else:
group = pm.group(name=group_name, empty=True)
attribute.lockAndHideCompound(group)
if group:
self.addToGroupStack(group, transforms)
parent_to_rig = [group]
if self.rig:
self.addToGroupStack(self.rig, parent_to_rig)
# drive visibility of groups through rig node
if group:
attribute_name = group.name() + pcfg.visibility_suffix
if not self.rig.hasAttr(attribute_name):
self.rig.addAttr(attribute_name, at='bool', dv=1, k=True)
self.rig.attr(attribute_name) >> group.visibility
group.setAttr('visibility', k=False, cb=False) # set hidden, still keyable even though k is False
if self.auto_group:
self.runGroupStack()
return group
def dynamicPivot(self, transform, target=None, shape=curve.square, axis=None, color='red', scale=1, size=None):
"""
Creates a dynamic pivot at the given transform driving the given target.
Args:
transform (pm.nodetypes.Transform): Transform to create dynamic pivot at.
target (pm.nodetypes.Transform): Transform to drive with dynamic pivot.
shape (method): Used to create curve or visual representation of FK control.
axis (string): Orientation for control made.
color (string): Color for control.
scale (float): Multiplied times size.
size (list): X, Y, Z sizes of control.
Returns:
(pm.nodetypes.Transform): Control created.
"""
if not target:
target = transform
pivot_ctrl, _ = control.create(transform,
shape=shape,
name=target.name() + pcfg.dynamic_pivot_suffix,
axis=axis,
color=color,
scale=scale,
parent=target,
matrix_offset=False,
size=size)
pivot_ctrl.translate >> target.rotatePivot
attribute.nonKeyableCompound(pivot_ctrl, ['r', 's'])
pivot_ctrl.addAttr(pcfg.dynamic_pivot_rest, dt='string', k=False, h=True, s=True)
pivot_ctrl.attr(pcfg.dynamic_pivot_rest).set(transform.name())
function_name = inspect.currentframe().f_code.co_name
self.organize([pivot_ctrl], prefix=function_name, name='')
self.addControls([pivot_ctrl], name=function_name)
return pivot_ctrl
@staticmethod
def _tagControllerParent(ctrl, parent, i, controls):
"""
Derives whether to tag the given ctrl with the parent, the parent's inner control, or the last in controls.
Args:
ctrl (pm.nodetypes.Transform or string): Transform that will receive parent to pick walk up to.
parent (pm.nodetypes.Transform): Parent that could drive ctrl's chain.
i (int): Iterator.
controls (list): Controls being added to chain.
"""
pick_walk_parent = controls[-1] if controls else None
if parent and i == 0:
inner_ctrl = parent.name().replace(pcfg.control_suffix, pcfg.inner_suffix + pcfg.control_suffix)
pick_walk_parent = pm.PyNode(inner_ctrl) if pm.objExists(inner_ctrl) else parent
if pick_walk_parent:
control.tagAsControllerParent(ctrl, pick_walk_parent)
@staticmethod
def _getAxis(i, transforms, last_axis, duplicates=None):
"""
Attempts to figure out the axis for the given iteration of the given transforms and/or duplicates.
Args:
i (int): Iteration count.
transforms (list): Transforms to use to get orient axis.
duplicates (list): Duplicates of transforms
Returns:
(string): Axis calculated from orientation of current iteration and next iteration.
"""
axis = last_axis
if not duplicates:
duplicates = transforms
if duplicates[i] != duplicates[-1]:
axis_vector = mayamath.getOrientAxis(duplicates[i], duplicates[i + 1])
axis = convert.axisToString(axis_vector)
# attempt to deduce axis if transform only has one child and axis is not given
elif len(transforms) == 1 and transforms[0].getChildren() and len(transforms[0].getChildren()) == 1:
axis_vector = mayamath.getOrientAxis(transforms[0], transforms[0].getChildren()[0])
axis = convert.axisToString(axis_vector)
return axis, axis
def root(self, transform=pcfg.root_joint_name, name=pcfg.root_joint_name):
"""
Creates a root control with a squash and stretch attribute.
Args:
transform (pm.nodetypes.Transform or string): Joint to create root control on.
name (string): Name to give group
Returns:
(list): Controls created in order from start to end.
"""
# create the root control as a regular FK
transform = self.validateTransform(transform)
controls = self.FK(transform, name=name)
self.root_control = controls[1][0]
# create a group above root control that will be scaled and squash and stretch attribute
name_prefix = name.lower() + '_scale'
root_scale = pm.group(self.root_control, name=name_prefix + pcfg.group_suffix)
attribute.addSeparator(self.root_control)
self.root_control.addAttr(pcfg.squash_stretch_attribute, k=True, dv=1, min=0.001)
self.root_control.addAttr(pcfg.squash_stretch_weight_attribute, k=True, dv=1, hsx=True, hsn=True, smn=0, smx=1)
attribute.nonKeyable(self.root_control.attr(pcfg.squash_stretch_weight_attribute))
# create blender
blender = pm.createNode('piperBlendAxis', name=name_prefix + '_BA')
self.root_control.attr(pcfg.squash_stretch_weight_attribute) >> blender.weight
blender.axis1.set(1, 1, 1)
blender.axis2.set(1, 1, 1)
blender.output >> root_scale.scale
# hook up squash and stretch
reciprocal = xform.squashStretch(self.root_control.attr(pcfg.squash_stretch_attribute), blender, 'a2')
transform.addAttr(pcfg.root_scale_up, k=True, dv=1)
transform.addAttr(pcfg.root_scale_sides, k=True, dv=1)
self.root_control.attr(pcfg.squash_stretch_attribute) >> transform.attr(pcfg.root_scale_up)
reciprocal.output >> transform.attr(pcfg.root_scale_sides)
# connect root and rig with message for easy look up
self.root_control.addAttr(pcfg.message_root_control, at='message')
self.rig.attr(pcfg.message_root_control) >> self.root_control.attr(pcfg.message_root_control)
return controls
def FK(self, start, end='', parent=None, axis=None, shape='', sizes=None, connect=True, global_ctrl='', name=''):
"""
Creates FK controls for the transform chain deduced by the start and end transforms.
Args:
start (pm.nodetypes.Transform or string): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Transform or string): End of the chain to be driven by FK controls.
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
axis (string): Only used if no end joint given for shape's axis to match rotations.
shape (method): Used to create curve or visual representation of FK control.
sizes (list): Sizes to use for each control.
connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all FK components.
Returns:
(list): Controls created in order from start to end.
"""
if not shape:
shape = curve.circle
if global_ctrl is '':
global_ctrl = self.root_control
controls = []
decomposes = []
multiplies = []
in_controls = []
calc_axis = 'y'
last_axis = axis
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
duplicates = xform.duplicateChain(transforms, prefix=pcfg.fk_prefix, color='green', scale=0.5)
for i, (transform, duplicate) in enumerate(zip(transforms, duplicates)):
dup_name = duplicate.name()
calc_axis, last_axis = [axis, axis] if axis else self._getAxis(i, transforms, last_axis, duplicates)
size = sizes[i] if sizes else control.calculateSize(transform)
ctrl_parent = parent if i == 0 else controls[i - 1]
ctrl = control.create(duplicate, pipernode.createFK, dup_name, calc_axis,
scale=1.2, control_shape=shape, size=size)
self._tagControllerParent(ctrl, parent, i, in_controls)
attribute.bindConnect(transform, ctrl, ctrl_parent) # connects attributes that offset controls
controls.append(ctrl)
xform.offsetConstraint(ctrl, duplicate, message=True)
in_ctrl = control.create(duplicate, name=dup_name + pcfg.inner_suffix, axis=calc_axis, shape=curve.plus,
size=size, parent=ctrl, color='burnt orange', inner=.125, matrix_offset=True)
decompose = xform.parentMatrixConstraint(in_ctrl, duplicate)
decomposes.append(decompose)
in_controls.append(in_ctrl)
transform_parent = None if transform.name() == pcfg.root_joint_name else transform.getParent()
bind_transform = convert.toBind(transform, return_node=True)
bind_transform.attr(pcfg.length_attribute) >> ctrl.initialLength
spaces = [transform_parent, ctrl_parent]
spaces = filter(lambda node: not isinstance(node, (pm.nodetypes.PiperSkinnedMesh, type(None))), spaces)
if spaces:
space.create(in_ctrl, spaces)
if connect:
xform.parentMatrixConstraint(duplicate, transform)
# used for scale calculation in FK control
ctrl.worldMatrix >> ctrl.scaleDriverMatrix
duplicate_parent = duplicate.getParent()
if duplicate_parent:
duplicate_parent.parentMatrix >> ctrl.scaleParentMatrix
duplicate_parent.translate >> ctrl.scaleTranslate
calc_axis = calc_axis.lstrip('n')
main_term = decomposes[-2].attr('outputScale' + calc_axis.upper())
inputs = [ctrl_parent.attr('s' + calc_axis), ctrl.outputScale]
if global_ctrl:
decompose = attribute.getDecomposeMatrix(global_ctrl.worldMatrix[0])
inputs.append(decompose.outputScaleY)
# connect all the stuff needed for volumetric scaling
multiply = pipernode.multiply(duplicate_parent, main_term, ctrl.volumetric, inputs)
multiplies.append(multiply)
# edge cases for scaling
if parent and len(transforms) > 1 and parent != global_ctrl:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[0])
parent.attr('s' + calc_axis) >> multiply_input
if len(transforms) > 2 and controls[1] != controls[-1]:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[1])
parent.attr('s' + calc_axis) >> multiply_input
if len(transforms) > 2:
multiply_input = attribute.getNextAvailableMultiplyInput(multiplies[1])
controls[0].attr('s' + calc_axis) >> multiply_input
if start.name(stripNamespace=True) == pcfg.body_base_joint_name:
self.body_base_control = controls[0]
function_name = inspect.currentframe().f_code.co_name
self.organize(controls + [duplicates[0]], prefix=function_name, name=name)
self.addControls(controls, inner=in_controls, name=function_name)
return duplicates, controls, in_controls
def IK(self, start, end, parent=None, shape=curve.ring, sizes=None, connect=True, global_ctrl='', name=''):
"""
Creates IK controls and IK RP solver and for the given start and end joints.
Args:
start (pm.nodetypes.Joint or string): Start of the joint chain.
end (pm.nodetypes.Joint or string): End of the joint chain.
parent (pm.nodetypes.Transform): Parent of start control.
shape (method): Creates the shape control that will drive joints.
sizes (list): Sizes to use for each control.
connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all IK components.
Returns:
(list): Controls created in order from start to end.
"""
axis = None
mid_ctrl = None
start_ctrl = None
scale_buffer = None
controls = []
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
duplicates = xform.duplicateChain(transforms, prefix=pcfg.ik_prefix, color='purple', scale=0.5)
mid = pcu.getMedian(transforms)
mid_duplicate = pcu.getMedian(duplicates)
if global_ctrl is '':
global_ctrl = self.root_control
if mid == start or mid == end:
pm.error('Not enough joints given! {} is the mid joint?'.format(mid.name()))
for i, (transform, duplicate) in enumerate(zip(transforms, duplicates)):
dup_name = duplicate.name(stripNamespace=True)
size = sizes[i] if sizes else control.calculateSize(transform)
ctrl_parent = parent if i == 0 else None
if transform != transforms[-1]:
next_transform = transforms[i + 1]
axis_vector = mayamath.getOrientAxis(transform, next_transform)
axis = convert.axisToString(axis_vector)
# start
if transform == transforms[0]:
ctrl = control.create(duplicate, name=dup_name, axis=axis, shape=shape, size=size)
attribute.bindConnect(transform, ctrl, ctrl_parent)
start_ctrl = ctrl
# scale buffer transform
scale_buffer = pm.joint(n=dup_name + pcfg.scale_buffer_suffix)
scale_buffer.segmentScaleCompensate.set(False)
pm.matchTransform(scale_buffer, duplicate)
pm.parent(duplicate, scale_buffer)
# mid
elif transform == mid:
ctrl = control.create(duplicate, curve.orb, dup_name, axis, scale=0.1, matrix_offset=False, size=size)
translation, rotate, _, _ = xform.calculatePoleVector(start, mid, end)
pm.xform(ctrl, t=translation, ro=rotate)
mid_ctrl = ctrl
# end
elif transform == transforms[-1]:
ctrl = control.create(duplicate, pipernode.createIK, dup_name, axis, control_shape=shape, size=size)
attribute.bindConnect(transform, ctrl)
else:
# other unknown joint(s), left for possible future 3+ IK joint chains
ctrl = control.create(duplicate, name=dup_name, axis=axis, shape=shape, size=size)
if connect:
xform.parentMatrixConstraint(duplicate, transform)
self._tagControllerParent(ctrl, parent, i, controls)
controls.append(ctrl)
piper_ik = controls[-1]
nodes_to_organize = [controls[0], scale_buffer, piper_ik]
mid_bind = convert.toBind(mid, return_node=True)
bind_transform = convert.toBind(transforms[-1], return_node=True)
mid_bind.attr(pcfg.length_attribute) >> piper_ik.startInitialLength
bind_transform.attr(pcfg.length_attribute) >> piper_ik.endInitialLength
if axis.startswith('n'):
piper_ik.direction.set(-1)
axis = axis.lstrip('n')
# connect controls to joints, and make ik handle
decompose = xform.parentMatrixConstraint(start_ctrl, scale_buffer, t=True, r=False, s=True)
xform.parentMatrixConstraint(piper_ik, duplicates[-1], t=False)
ik_handle_name = duplicates[-1].name(stripNamespace=True) + '_handle'
ik_handle, _ = pm.ikHandle(sj=duplicates[0], ee=duplicates[-1], sol='ikRPsolver', n=ik_handle_name, pw=1, w=1)
ik_handle.visibility.set(False)
pm.parent(ik_handle, piper_ik)
mayamath.zeroOut(ik_handle)
ik_handle.translate >> piper_ik.handleTranslate
ik_handle.parentMatrix >> piper_ik.handleParentMatrix
# xform.poleVectorMatrixConstraint(ik_handle, mid_ctrl)
attribute.addSeparator(mid_ctrl)
mid_ctrl.addAttr('poleVectorWeight', k=True, dv=1, min=0, max=1)
constraint = pm.poleVectorConstraint(mid_ctrl, ik_handle)
mid_ctrl.poleVectorWeight >> constraint.attr(mid_ctrl.name() + 'W0')
# connect the rest
start_ctrl.attr('s' + axis) >> piper_ik.startControlScale
start_ctrl.worldMatrix >> piper_ik.startMatrix
mid_ctrl.worldMatrix >> piper_ik.poleVectorMatrix
piper_ik.startOutput >> mid_duplicate.attr('t' + axis)
piper_ik.endOutput >> duplicates[-1].attr('t' + axis)
piper_ik.twist >> ik_handle.twist
# scale ctrl connect
decompose_scale = decompose.attr('outputScale' + axis.upper())
pipernode.multiply(scale_buffer, decompose_scale, inputs=[piper_ik.startOutputScale])
pipernode.multiply(mid_duplicate, mid_ctrl.attr('s' + axis), inputs=[piper_ik.endOutputScale])
mid_ctrl.attr('s' + axis) >> piper_ik.poleControlScale
# parent pole vector to end control and create
pm.parent(mid_ctrl, piper_ik)
xform.toOffsetMatrix(mid_ctrl)
space.create(mid_ctrl, [start_ctrl])
attribute.lockAndHideCompound(mid_ctrl, ['r'])
# preferred angle connection
mid_bind = convert.toBind(mid, pm.warning)
if mid_bind:
mid_bind.preferredAngle >> piper_ik.preferredAngleInput
piper_ik.preferredAngleOutput >> mid_duplicate.preferredAngle
# must parent before creating spaces
if global_ctrl:
pm.parent(piper_ik, global_ctrl)
nodes_to_organize = [controls[0], scale_buffer]
# create spaces for piper ik
spaces = filter(None, [parent, global_ctrl])
space.create(piper_ik, spaces)
# global scale comes from parent's world matrix scale
if parent:
parent_decompose = pm.createNode('decomposeMatrix', n=parent.name(stripNamespace=True) + '_DM')
parent.worldMatrix >> parent_decompose.inputMatrix
parent_decompose.attr('outputScale' + axis.upper()) >> piper_ik.globalScale
if start.name(stripNamespace=True) == pcfg.body_base_joint_name:
self.body_base_control = controls[0]
function_name = inspect.currentframe().f_code.co_name
self.organize(nodes_to_organize, prefix=function_name, name=name)
self.addControls(controls, name=function_name)
return duplicates, controls, scale_buffer
def FKIK(self, start, end, parent=None, fk_shape='', ik_shape='', proxy=True, global_ctrl='', name=''):
"""
Creates a FK and IK controls that drive the chain from start to end.
Args:
start (pm.nodetypes.Joint or string): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Joint or string): End of the chain to be driven by FK controls.
If none given, will only drive start
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
fk_shape (method): Used to create curve or visual representation for the FK controls.
ik_shape (method): Used to create curve or visual representation for the IK controls.
proxy (boolean): If True, adds a proxy FK_IK attribute to all controls.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all FKIK components.
Returns:
(list): Two lists, FK and IK controls created in order from start to end respectively.
"""
if not fk_shape:
fk_shape = curve.circle
if not ik_shape:
ik_shape = curve.ring
if global_ctrl is '':
global_ctrl = self.root_control
# create joint chains that is the same as the given start and end chain for FK and IK then create controls
start, end = self.validateTransforms([start, end])
transforms = xform.getChain(start, end)
sizes = [control.calculateSize(transform) for transform in transforms]
fk_transforms, fk_ctrls, in_ctrls = self.FK(start, end, parent, '', fk_shape, sizes, False, global_ctrl, None)
ik_transforms, ik_ctrls, buffer = self.IK(start, end, parent, ik_shape, sizes, False, global_ctrl, None)
controls = fk_ctrls + in_ctrls + ik_ctrls
# create the switcher control and add the transforms, fk, and iks to its attribute to store it
switcher_control = switcher.create(end, end.name(stripNamespace=True))
switcher_attribute = switcher_control.attr(pcfg.fk_ik_attribute)
switcher.addData(switcher_control.attr(pcfg.switcher_transforms), transforms, names=True)
switcher.addData(switcher_control.attr(pcfg.switcher_fk), fk_ctrls + in_ctrls, names=True)
switcher.addData(switcher_control.attr(pcfg.switcher_ik), ik_ctrls, names=True)
controls.insert(0, switcher_control)
# one minus the output of the fk ik attribute in order to drive visibility of ik/fk controls
one_minus = pipernode.oneMinus(source=switcher_attribute)
[one_minus.output >> fk.lodVisibility for fk in fk_ctrls + in_ctrls]
[switcher_attribute >> ik.lodVisibility for ik in ik_ctrls]
# use spaces to drive original chain with fk and ik transforms and hook up switcher attributes
for og_transform, fk_transform, ik_transform in zip(transforms, fk_transforms, ik_transforms):
world_space, fk_space, ik_space = space.create(og_transform, [fk_transform, ik_transform], direct=True)
og_transform.attr(fk_space).set(1)
switcher_attribute >> og_transform.attr(ik_space)
results = fk_transforms, ik_transforms, controls
function_name = inspect.currentframe().f_code.co_name
nodes_to_organize = [fk_transforms[0], buffer] + fk_ctrls + [ik_ctrls[0], switcher_control]
self.organize(nodes_to_organize, prefix=function_name, name=name)
self.addControls([switcher_control], name=function_name)
if not proxy:
return results
# make proxy fk ik attribute on all the controls
for ctrl in controls[1:]: # start on index 1 since switcher is on index 0
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
# make IK control drive switcher visibility
ik_ctrls[-1].addAttr(pcfg.switcher_visibility, at='bool', dv=0, k=True)
switcher_visibility = ik_ctrls[-1].attr(pcfg.switcher_visibility)
switcher_visibility >> switcher_control.lodVisibility
attribute.nonKeyable(switcher_visibility)
return results
def extra(self, transform, name, parent=None, shape=curve.circle, axis='y', color='salmon', scale=1.0, spaces=None):
"""
Creates extra control that doesn't drive the transform, but rather should be used with spaces and act as parent.
Args:
transform (pm.nodetypes.Transform or string): Transform to create control on.
name (string): Name to append to given transform name.
parent (pm.nodetypes.Transform): Transform to parent the control created onto.
shape (method): Creates the control curve.
axis (string): Orientation for control.
color (string): Color of curve.
scale (float): Scale to multiply by joint radius.
spaces (iterator or None): A bunch of pm.nodetypes.Transform(s) that will drive the given transform.
Returns:
(pm.nodetypes.Transform): Control created.
"""
# allows for global scaling to work, otherwise parent under something that gets globally scaled
# or fix so that global scale gets multiplied onto created control if no parent given
if not parent:
parent = self.root_control
transform = self.validateTransform(transform)
name = transform.name(stripNamespace=True) + '_' + name
ctrl = control.create(transform, shape, name, axis, color, scale, parent=parent)
spaces = space.create(ctrl, spaces)
space.switch(ctrl, spaces[-1], r=False, o=False, s=False)
self.addControls([ctrl])
if not parent:
self.organize([ctrl], prefix=inspect.currentframe().f_code.co_name, name='')
# don't auto colorize if color is given
if color:
self.keep_colors.append(ctrl)
return ctrl, spaces
def twist(self, joint, driver, target, axis=None, blended=True, weight=0.5, global_ctrl='', name=''):
"""
Creates the twist control that mimics twist of given target based on given weight.
Args:
joint (pm.nodetypes.Transform or string): Joint to create FK control with twist attributes on.
driver (pm.nodetypes.Transform or string): The "parent" for the given joint.
target(pm.nodetypes.Transform or string): Used to mimic twist.
axis (string or None): Axis to mimic twist of.
blended (boolean): If True, will blend translate of joint between given driver and target.
weight (float): Amount of twist joint will mimic from given target.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all twist components.
Returns:
(list): Duplicate joint(s) as first index, control(s) as second index, and inner control(s) as third index.
"""
# get distance variables before making FK controls.
joint, driver, target = self.validateTransforms([joint, driver, target])
distance_percentage = 1
if driver != target:
total_distance = mayamath.getDistance(driver, target)
joint_distance = mayamath.getDistance(driver, joint)
distance_percentage = joint_distance / total_distance
# derive axis from driver and target
if not axis:
axis = mayamath.getOrientAxis(driver, joint)
axis = convert.axisToString(axis, absolute=True)
# create FK control
parent = None if blended else driver
duplicates, controls, in_ctrl = self.FK(joint, parent=parent, axis=axis, global_ctrl=global_ctrl, name=name)
ctrl = controls[0]
attribute.addSeparator(ctrl)
if blended:
# name and create blend matrix
driver_name = driver.name(stripNamespace=True)
target_name = target.name(stripNamespace=True)
blend_name = driver_name + '_To_' + target_name + pcfg.twist_blend_suffix
blend_matrix = pm.createNode('blendMatrix', n=blend_name)
# connect blend matrix and set default values
driver.worldMatrix >> blend_matrix.inputMatrix
target.worldMatrix >> blend_matrix.target[0].targetMatrix
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
blend_matrix.target[0].useRotate.set(False)
blend_matrix.target[0].useScale.set(False)
blend_matrix.target[0].useShear.set(False)
# create attribute on control to drive the distance weight
ctrl.addAttr(pcfg.twist_blend_weight_attribute, k=True, dv=1, hsx=True, hsn=True, smn=-1, smx=1)
ctrl.attr(pcfg.twist_blend_weight_attribute) >> blend_matrix.target[0].weight
ctrl.attr(pcfg.twist_blend_weight_attribute).set(distance_percentage)
# create twist node and add twist attribute on control
twist_node = pipernode.createSwingTwist(target, ctrl, axis=axis, twist=weight)
ctrl.addAttr(pcfg.twist_weight_attribute, k=True, dv=weight, hsx=True, hsn=True, smn=-1, smx=1)
ctrl.attr(pcfg.twist_weight_attribute) >> twist_node.twist
return duplicates, controls, in_ctrl
def bendy(self, joints, ctrl_parent=None, shape=curve.sun, i='01', name=''):
"""
Creates controls for each given joint that will be used as part of a nurbs surface to drive given joints.
Args:
joints (list): Joints to create controls for. Must have at least three joints!
ctrl_parent (pm.nodetypes.DependNode): Node to tag as control parent for pick-walking.
shape (method): Used to create curve or visual representation of bendy control.
i (string): Format and first digit to search for middle element in given joints.
name (str or None): Name to give group that will house all bendy components.
Returns:
(list): Controls of bendy chain.
"""
joint_length = len(joints)
if joint_length < 3:
pm.error('Not enough joints!')
locators = []
controls = []
inner_controls = []
control_existed = []
controls_to_organize = []
joints = self.validateTransforms(joints, i=i)
start_joint = joints[0]
end_joint = joints[-1]
end_bind_joint = convert.toBind(end_joint)
prefix = joints[1].name(stripNamespace=True)
start_position = pm.xform(start_joint, q=True, ws=True, t=True)
end_position = pm.xform(end_joint, q=True, ws=True, t=True)
start_position = convert.toVector(start_position)
end_position = convert.toVector(end_position)
surface_position = (start_position + end_position) / 2
surface_rotation = pm.xform(start_joint, q=True, ws=True, ro=True)
surface_length = mayamath.getDistance(start_position, end_position)
surface_direction = mayamath.getDirection(start_position, end_position)
axis = mayamath.getOrientAxis(start_joint, end_joint)
axis_label = convert.axisToString(axis)
absolute_axis_label = axis_label.lstrip('n')
up_axis_label = convert.axisToTriAxis(axis)[-1]
up_axis = convert.axisToVector(up_axis_label)
up_axis = convert.toVector(up_axis)
# if axis is negative, rotate nurbs surface 180 degrees so that UVs end up going down the correct direction
if convert.axisToString(axis).startswith('n'):
axis_index = convert.axisToIndex(up_axis_label)
surface_rotation[axis_index] = surface_rotation[axis_index] + 180
up_position = start_position + (surface_direction * (surface_length / 2)) + (up_axis * (surface_length / 2))
up_locator = pm.spaceLocator(n=prefix + '_up_locator')
up_locator.t.set(up_position)
up_locator.visibility.set(False)
xform.offsetConstraint(start_joint, up_locator, offset=True)
surface_name = prefix + '_surface'
surface, nurbs_plane = pm.nurbsPlane(ax=up_axis, lr=surface_length, v=joint_length - 1, ch=True, n=surface_name)
pm.rebuildSurface(surface, rpo=True, ch=True, end=1, kr=0, kc=False, su=1, du=1, sv=joint_length - 1, dir=0)
surface_shape = surface.getShape()
surface.t.set(surface_position)
surface.r.set(surface_rotation)
original_shape = xform.getOrigShape(surface)
uv_pin = pm.createNode('uvPin', name=prefix + '_uvPin')
original_shape.local >> uv_pin.originalGeometry
surface.worldSpace >> uv_pin.deformedGeometry
for i, joint in enumerate(joints):
joint_name = joint.name(stripNamespace=True)
dividend = convert.toBind(joint).attr(pcfg.length_attribute)
divisor = end_bind_joint.attr(pcfg.length_attribute)
decimal_distance = pipernode.divide(dividend, divisor).output
ctrl_name = joint_name + pcfg.bendy_suffix
ctrl_exists = pm.objExists(ctrl_name + pcfg.control_suffix)
control_existed.append(ctrl_exists)
if ctrl_exists:
ctrl = pm.PyNode(ctrl_name + pcfg.control_suffix)
controls.append(ctrl)
continue
else:
ctrl = control.create(joint, shape, ctrl_name, axis_label, joint=True, scale=0.9, inner=.9)
pm.setAttr(ctrl.radius, cb=False)
controls.append(ctrl)
controls_to_organize.append(ctrl)
if joint == joints[0]:
xform.offsetConstraint(joint, ctrl)
elif joint == joints[-1]:
blend_matrix = pm.createNode('blendMatrix', n=joint_name + pcfg.blend_matrix_suffix)
start_joint.worldMatrix >> blend_matrix.inputMatrix
end_joint.worldMatrix >> blend_matrix.target[0].targetMatrix
blend_matrix.target[0].useRotate.set(0)
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
else:
si = str(i)
locator = pm.spaceLocator(n=joint_name + pcfg.bendy_locator_suffix)
uv_pin.attr('outputMatrix[{}]'.format(si)) >> locator.offsetParentMatrix
uv_pin.attr('coordinate[{}].coordinateU'.format(si)).set(0.5)
decimal_distance >> uv_pin.attr('coordinate[{}].coordinateV'.format(si))
pm.select(cl=True)
xform.parentMatrixConstraint(locator, joint, offset=True)
blend_matrix = pm.createNode('blendMatrix', n=joint_name + pcfg.blend_matrix_suffix)
start_joint.worldMatrix >> blend_matrix.inputMatrix
end_joint.worldMatrix >> blend_matrix.target[0].targetMatrix
decimal_distance >> blend_matrix.target[0].weight
blend_matrix.target[0].useRotate.set(0)
blend_matrix.outputMatrix >> ctrl.offsetParentMatrix
# inner control
inner_name = joint_name + pcfg.inner_suffix + pcfg.bendy_suffix
size = control.calculateSize(joint)
in_ctrl = control.create(locator, curve.plus, inner_name, axis_label,
scale=.85, parent=ctrl, size=size, inner=.02)
inner_controls.append(in_ctrl)
# global and control scale hooked up to locator that drives joint
joint_parent = joint.getParent()
decompose_matrix = attribute.getDecomposeMatrix(joint_parent.worldMatrix)
parent_scale = decompose_matrix.attr('outputScale' + absolute_axis_label.upper())
ctrl_scale = ctrl.attr('s' + absolute_axis_label)
in_ctrl_scale = in_ctrl.attr('s' + absolute_axis_label)
piper_mult = pipernode.multiply(locator, inputs=[parent_scale, ctrl_scale, in_ctrl_scale])
# multiplying inner control's translate by scale to compensate for any parent scaling
scale_mult = pm.createNode('multiplyDivide', n=ctrl_name + pcfg.bendy_locator_suffix + 'scaleMultiply')
piper_mult.output >> scale_mult.input1
in_ctrl.t >> scale_mult.input2
scale_mult.output >> locator.t
in_ctrl.r >> locator.r
locator.visibility.set(False)
locators.append(locator)
if i != 0:
ctrl_parent = controls[i - 1]
pm.reorder(ctrl, r=(i - 1) * -1)
if not control_existed[i - 1]:
xform.aimConstraint(ctrl, up_locator, controls[i - 1])
if ctrl_parent:
control.tagAsControllerParent(ctrl, ctrl_parent)
pm.select(controls, surface)
skin = pm.skinCluster(tsb=True, bm=0, sm=0, nw=1, wd=0, mi=4, omi=True)
# paint the skin weights so that they are set to 1
uvs = {pm.PyNode(joint): {'position': convert.toVector(joint), 'cvs': []} for joint in controls}
u_amount = surface_shape.numCVsInU()
v_amount = surface_shape.numCVsInV()
for u in range(u_amount):
for v in range(v_amount):
closest_joint = None
closest_distance = 0
cv_position = surface_shape.getCV(u, v, space='world')
for i, joint in enumerate(uvs):
distance = mayamath.getDistance(cv_position, uvs[joint]['position'])
if i == 0 or distance < closest_distance:
closest_distance = distance
closest_joint = joint
uvs[closest_joint]['cvs'].append(surface.name() + '.cv[{}][{}]'.format(str(u), str(v)))
for joint in uvs:
pm.select(uvs[joint]['cvs'])
pm.skinPercent(skin, nrm=True, tv=(joint, 1))
surface.visibility.set(False)
pm.select(cl=True)
function_name = inspect.currentframe().f_code.co_name
self.organize(controls_to_organize + locators + [surface, up_locator], prefix=function_name, name=name)
self.addControls(controls_to_organize, inner=inner_controls, name=function_name)
return controls
def banker(self, joint, ik_control, pivot_track=None, side='', use_track_shape=True):
"""
Creates a reverse foot control that changes pivot based on curve shape and control rotation input.
Useful for banking.
Args:
joint (pm.nodetypes.Joint): Joint that will be driven by the reverse module and IK handle.
ik_control (pm.nodetypes.Transform): Control that drives the IK Handle.
pivot_track (pm.nodetypes.Transform): NurbsCurve shape as child that will act as the track for the pivot.
side (string or None): Side to generate cross section
use_track_shape (boolean): If True, will use the pivot track shape as the control shape
Returns:
(pm.nodetypes.Transform): Control that moves the reverse foot pivot.
"""
joint_name = joint.name()
axis = mayamath.getOrientAxis(joint.getParent(), joint)
axes = convert.axisToTriAxis(axis)
if not side:
side = pcu.getSide(joint_name)
# get the IK handle and validate there is only one
ik_handle = list(set(ik_control.connections(skipConversionNodes=True, type='ikHandle')))
if len(ik_handle) != 1:
pm.error('Needed only ONE ik_handle. {} found.'.format(str(len(ik_handle))))
ik_handle = ik_handle[0]
# create a pivot track (cross section curve) if no pivot track (curve) is given
if not pivot_track:
# if IK joint given, get the name of the regular joint by stripping the ik prefix
if joint_name.startswith(pcfg.ik_prefix):
stripped_name = pcu.removePrefixes(joint_name, pcfg.ik_prefix)
namespace_name = pcfg.skeleton_namespace + ':' + stripped_name
search_joint = pm.PyNode(stripped_name) if pm.objExists(stripped_name) else pm.PyNode(namespace_name)
else:
search_joint = joint
# tries to get the meshes influenced by the skin cluster connected to the joint
skins = search_joint.future(type='skinCluster')
meshes = {mesh for skin in skins for mesh in pm.skinCluster(skin, q=True, g=True)} if skins else None
# create the pivot track curve
pm.select(cl=True)
pivot_track = curve.originCrossSection(meshes, side=side, name=joint_name + '_pivotTrack')
# validate that only one is made
if len(pivot_track) != 1:
text = 'Needed only ONE curve! {} curves made for the side: {}'.format(str(len(pivot_track)), str(side))
pm.error(text)
pivot_track = pivot_track[0]
# create the pivot and the normalized pivot, move the norm pivot to joint and then to floor
pivot = pm.group(em=True, name=joint_name + '_Pivot')
normalized_pivot = pm.group(em=True, name=joint_name + '_normalPivot')
pm.matchTransform(normalized_pivot, joint, pos=True, rot=False, scale=False)
normalized_pivot.ty.set(0)
xform.toOffsetMatrix(normalized_pivot)
# figure out control size, create control, lock and hide axis, translate, and scale
if ik_control.hasAttr(pcfg.proxy_fk_ik):
switcher_control = switcher.get(ik_control)
transforms = switcher.getData(switcher_control.attr(pcfg.switcher_transforms), cast=True)
size = control.calculateSize(transforms[-1])
else:
size = None
if use_track_shape:
ctrl = pm.duplicate(pivot_track, n=joint_name + pcfg.banker_suffix + pcfg.control_suffix)[0]
curve.color(ctrl, 'burnt orange')
else:
ctrl = control.create(joint, shape=curve.plus, name=joint_name + pcfg.banker_suffix, axis=axes[0],
color='burnt orange', matrix_offset=True, size=size, inner=.125, outer=1.25)
attribute.lockAndHide(ctrl.attr('r' + axes[0]))
attribute.lockAndHideCompound(ctrl, ['t', 's'])
# node to add small number
small_add = pm.createNode('plusMinusAverage', n=joint_name + '_plusSmallNumber')
small_add.input1D[0].set(0.001)
normalize_node = pm.createNode('vectorProduct', n=joint_name + '_pivotNormal')
normalize_node.operation.set(0)
normalize_node.normalizeOutput.set(True)
# adding a small amount to avoid division by zero
ctrl.attr('r' + axes[1]) >> small_add.input1D[1]
small_add.output1D >> normalize_node.attr('input1' + axes[2].upper())
# need to multiply the rotation by -1
negative_mult = pm.createNode('multDoubleLinear', n=joint_name + '_negative')
ctrl.attr('r' + axes[2]) >> negative_mult.input1
negative_mult.input2.set(-1)
normalize_input_attribute = negative_mult.output
normalize_input_attribute >> normalize_node.attr('input1' + axes[1].upper())
normalize_node.output >> normalized_pivot.translate
# creating the normalized (circle) version of the cross section
positions = []
duplicate_curve = pm.duplicate(pivot_track)[0]
pm.move(0, 0, 0, duplicate_curve, rpr=True)
cvs = duplicate_curve.numCVs()
for cv in range(0, cvs):
position = duplicate_curve.cv[cv].getPosition(space='world')
position.normalize()
positions.append(position)
# delete the duplicate and finally make the normalize track. Make sure to close the curve and center pivots
pm.delete(duplicate_curve)
normalized_track = pm.curve(d=1, p=positions, k=range(len(positions)), ws=True, n=joint_name + '_normalTrack')
normalized_track = pm.closeCurve(normalized_track, replaceOriginal=True)[0]
pm.xform(normalized_track, centerPivots=True)
# move normalized track to joint, then to floor, and freeze transforms
pm.matchTransform(normalized_track, joint, pos=True, rot=False, scale=False)
normalized_track.ty.set(0)
myu.freezeTransformations(normalized_track)
decomposed_matrix = pm.createNode('decomposeMatrix', n=normalize_node + '_decompose')
normalized_pivot.worldMatrix >> decomposed_matrix.inputMatrix
nearest_point = pm.createNode('nearestPointOnCurve', n=joint_name + '_nearestPoint')
decomposed_matrix.outputTranslate >> nearest_point.inPosition
normalized_track.getShape().worldSpace >> nearest_point.inputCurve
curve_info = pm.createNode('pointOnCurveInfo', n=joint_name + '_curveInfo')
nearest_point.parameter >> curve_info.parameter
pivot_track.getShape().worldSpace >> curve_info.inputCurve
reverse_group = pm.group(em=True, n=joint_name + '_reverse_grp')
xform.parentMatrixConstraint(ik_control, reverse_group, offset=True)
pm.parent([pivot, ctrl], reverse_group)
# curve_info position is where the pivot goes! Connect something to it if you want to visualize it
ctrl.r >> pivot.r
curve_info.result.position >> pivot.rotatePivot
# connect ik handle by letting the pivot drive it
pm.parent(ik_handle, pivot)
# make the pivot drive the joint's rotations
joint.r.disconnect()
xform.parentMatrixConstraint(pivot, joint, t=False, r=True, s=False, offset=True)
# clean up by hiding curves
pivot_track.visibility.set(False)
normalized_track.visibility.set(False)
ik_control.addAttr(pcfg.banker_attribute, dt='string', k=False, h=True, s=True)
ik_control.attr(pcfg.banker_attribute).set(ctrl.name())
# hook up pivot control with fk_ik attribute if ik has an fk-ik proxy
if ik_control.hasAttr(pcfg.proxy_fk_ik):
switcher_control = switcher.get(ik_control)
switcher_attribute = switcher_control.attr(pcfg.fk_ik_attribute)
switcher_attribute >> ctrl.lodVisibility
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
control.tagAsControllerParent(ctrl, ik_control)
nodes_to_organize = [reverse_group, normalized_pivot, normalized_track, pivot_track]
self.findGroup(joint, nodes_to_organize)
self.addControls([ctrl], name=inspect.currentframe().f_code.co_name)
return ctrl
def reverse(self, driver, target, driven_negate=None, transform=None, switcher_ctrl=None, shape=None, axis=None):
"""
Creates a control that offsets the given target through rotation (usually foot roll reverse rig).
Args:
driver (pm.nodetypes.Transform): The transform that drives the whole chain. Usually the IK handle.
target (pm.nodetypes.Transform): Transform that will have control rotations added to. Usually end joint.
driven_negate (pm.nodetypes.Transform): Transform that will have control rotations subtracted from.
Usually any controls further down the chain/hierarchy of the given target.
transform (pm.nodetypes.Transform): Transform for making the control. Useful for figuring out control size.
If None given, will try to use given driven_negate, if no driven_negate, will try to use given target.
switcher_ctrl (pm.nodetypes.Transform): Transform that handles switching between FK and IK chains.
shape (method): Creates the shape control that will drive reverse rig system.
axis (string): Direction control will be facing when created.
Returns:
(pm.nodetypes.Transform): Control created.
"""
if not transform:
transform = driven_negate if driven_negate else target
if not shape:
shape = curve.square
# attempt to deduce axis if transform only has one child and axis is not given
transform = self.validateTransform(transform)
if not axis and transform.getChildren() and len(transform.getChildren()) == 1:
axis_vector = mayamath.getOrientAxis(transform, transform.getChildren()[0])
axis = convert.axisToString(axis_vector)
axis = convert.axisToTriAxis(axis)[1]
# create control
name = transform.name(stripNamespace=True) + pcfg.reverse_suffix
driver_parent = driver.getParent()
ctrl = control.create(transform, shape, name, axis, 'burnt orange', 0.5, True, parent=driver_parent)
self.addControls([ctrl], name=inspect.currentframe().f_code.co_name)
name = ctrl.name(stripNamespace=True)
pm.parent(driver, ctrl)
attribute.lockAndHideCompound(ctrl, ['t', 's'])
target_source = target.rotate.connections(scn=True, plugs=True, destination=False)
# add control's rotation to whatever is connected to target's rotate.
if target_source:
target_source = target_source[0]
plus = pm.createNode('plusMinusAverage', n='_'.join([target.name(), 'plus', name]))
target_source >> plus.input3D[0]
ctrl.rotate >> plus.input3D[1]
plus.output3D >> target.rotate
else:
ctrl.rotate >> target.rotate
# if no driven negate given or driven negate is not being offset by the offsetParentMatrix, we are finished here
if not driven_negate or not driven_negate.offsetParentMatrix.connections(scn=True, plugs=True, d=False):
return ctrl
# decompose and compose matrices to get rotation value subtracted with control's rotation
source_matrix = driven_negate.offsetParentMatrix.connections(scn=True, plugs=True, destination=False)[0]
source_name = source_matrix.node().name()
decomp_matrix = pm.createNode('decomposeMatrix', n=source_name + '_DM')
compose_matrix = pm.createNode('composeMatrix', n=source_name + '_CM')
source_matrix >> decomp_matrix.inputMatrix
decomp_matrix.outputTranslate >> compose_matrix.inputTranslate
decomp_matrix.outputScale >> compose_matrix.inputScale
minus = pm.createNode('plusMinusAverage', n='_'.join([source_name, 'minus', name]))
minus.operation.set(2)
decomp_matrix.outputRotate >> minus.input3D[0]
ctrl.rotate >> minus.input3D[1]
minus.output3D >> compose_matrix.inputRotate
compose_matrix.outputMatrix >> driven_negate.offsetParentMatrix
attribute.addReverseMessage(ctrl, driven_negate)
if not switcher_ctrl:
return ctrl
# add reverse control to switcher data and connect ik visibility onto reverse control
switcher.addData(switcher_ctrl.attr(pcfg.switcher_reverses), [name])
switcher_attribute = switcher_ctrl.attr(pcfg.fk_ik_attribute)
switcher_attribute >> ctrl.lodVisibility
# add proxy fk_ik attribute to ctrl
attribute.addSeparator(ctrl)
ctrl.addAttr(pcfg.proxy_fk_ik, proxy=switcher_attribute, k=True, dv=0, hsx=True, hsn=True, smn=0, smx=1)
# only make driven_negate by affected if IK is set to True
blend = pm.createNode('blendMatrix', n=source_name + '_negateBlend')
source_matrix >> blend.inputMatrix
compose_matrix.outputMatrix >> blend.target[0].targetMatrix
switcher_attribute >> blend.target[0].weight
blend.outputMatrix >> driven_negate.offsetParentMatrix
return ctrl
def humanLeg(self, start, end, ball, side='', parent=None, global_ctrl='', name=''):
"""
Convenience method for rigging a leg. FKIK chain, with banker, and reverse controls.
Args:
start (pm.nodetypes.Joint): Start of the chain to be driven by FK controls.
end (pm.nodetypes.Joint): End of the chain to be driven by FK controls. If none given, will only drive start
ball (pm.nodetypes.Transform): Transform that will be driven by FK chain and reversed.
side (string): Side to create banker control on.
parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint.
global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control.
name (str or None): Name to give group that will house all IK components.
Returns:
(list): Nodes created.
"""
fk_transforms, ik_transforms, ctrls = self.FKIK(start, end, parent=parent, global_ctrl=global_ctrl, name=name)
banker = self.banker(ik_transforms[-1], ctrls[-1], side=side)
ball_joint, ball_control, ball_inner = self.FK(ball, name=name)
xform.offsetConstraint(end, ball_control[0], offset=True)
ik_handle = ctrls[-1].connections(skipConversionNodes=True, type='ikHandle')[0]
reverse_ctrl = self.reverse(ik_handle, ik_transforms[-1], ball_control[0], ball, ctrls[0])
control.tagAsControllerParent(reverse_ctrl, banker)
return [fk_transforms, ik_transforms, ctrls], [ball_joint, ball_control, ball_inner], [banker, reverse_ctrl]
| en | 0.796822 | # Copyright (c) 2021 <NAME>. All Rights Reserved. Gets the root control associated with the given rig. Args: rig (pm.nodetypes.piperRig): Rig node to get root control of. Returns: (pm.nodetypes.DependNode): Root control of rig. Gets all the meshes inside all the piper skinned nodes in the scene. Returns: (set): Piper transforms that hold mesh shapes grouped under piper skinned nodes. Gets all the piperSkinnedMesh nodes that are a child of a piperRig node that start with the skeleton namespace. Args: rigs (list): rigs to find skeleton nodes of. If None given, will search for selected or scene rigs. Returns: (dictionary): piperSkinnedMesh nodes in rig(s) that start with skeleton namespace. Rig as value Gets all the transforms that are under the piperSkinnedMesh node that starts with the skeleton namespace. Returns: (dictionary): Transforms with mesh shape under piperSkinnedMesh node that starts with skeleton namespace. Locks or unlocks all the transforms under piper skinned nodes that have mesh shapes. Args: lock (int): Mode to set on meshes. 0 is unlocked, 1 is locked. Locks all the transforms under piper skinned nodes that have mesh shapes. Unlocks all the transforms under piper skinned nodes that have mesh shapes. Zeroes out the given controls to their bind pose. Retains current space. Args: controls (list): Controls to zero out. Example: from piper.mayapy.rig import Rig with Rig() as rig: root_ctrl = rig.root()[1][0] pelvis_ctrl = rig.FK('pelvis', name='Pelvis', parent=root_ctrl)[1][0] butt_ctrl = rig.extra('pelvis', 'butt', scale=1.05, spaces=[pelvis_ctrl, root_ctrl]) _, mouth_ctrls, _ = rig.FK('mouth', 'lips', parent=pelvis_ctrl, name='Mouth') [rig.FK(joint, parent=pelvis_ctrl, axis='z', name='Eyes') for joint in ['eye_l', 'eye_r']] Houses all rig scripts. Args: path (string): Path to skeletal mesh to prepare to start rigging. rig (pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced. find (boolean): Will attempt to find piperRig node in scene if no rig or path is given. group (boolean): If True, will automatically parent nodes into the groups and/or into rig node. color (boolean): If True, will automatically color controls according to settings in piper_config.py copy_controls (boolean): If True, will attempt to copy control shapes from existing rig on finish. Context manager enter method. Returns: (piper.mayapy.rig.Rig): Class that holds all methods for rigging. Context manager exit method. Prepares the scene for a rig. Returns: (pm.nodetypes.piperRig): Rig transform that holds all skinned meshes referenced. # getRelativeArt checks if scene is saved # if scene is modified, ask user if they would like to save, not save, or cancel operation # open skeletal mesh to check for bone health # perform a bone health check before referencing to emphasize any possible errors # create new file, reference the skeleton into the new file, create rig group Validates the joint by casting to a PyNode with namespace if it's not already a PyNode with namespace. Args: transform (string or PyNode): Transform to validate to make sure its a PyNode with namespace. i (string): Digit format to incremental nodes to find with given i as the starting digit. Returns: (PyNode or list): Given transform as a PyNode. Convenience method for validating multiple transforms at once. Args: transforms (list): Transforms to validate to make sure they are PyNodes. i (string): Digit format to incremental nodes to find with given i as the starting digit. Returns: (list): Transforms validated. Adds controls to the self.controls stack to be added into the controls set Args: controls (list): Control(s) to be added to controls set. inner (list): Inner controls to be added to inner controls list. name (string): Name of control set. Adds the given children as the value to the given parent key to the group_stack dictionary. Args: parent (pm.nodetypes.Transform): Node to add as key that things will be parented to. children (list): Nodes to parent to given parent. Finds the group the given transforms should be parented under based on given reference transform. Args: reference_transform (pm.nodetypes.Transform): Used to search parent hierarchy or group stack for parent. transforms (list): Nodes to parent. # try to find the reference transform's parent in the group stack to figure out where it should be parented to # if found, add transform to the found parent # else get the first parent that is either a piperRig or is a group Parents all the given children to their corresponding parent key in the group stack dictionary. Adds all the controls in self.controls to the control set node. Groups everything, creates the control set group, colorizes, copies control shapes, and displays time. Sets the colors of the given controls that end with the given prefixes the given left, right, and middle colors. Args: controls (list): Controls to set colors of. left_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to left color. right_suffix (string or Tuple): Suffix that ctrl must end with for color to be set to right color. left_color (string): Name of color for controls ending with left suffix. right_color (string): Name of color for controls ending with right suffix. middle_color (string): Name of color for controls NOT ending with either right OR left suffix. Colors all the controls according to setting in piper_config.py Organizes the given transforms into a group if name given and into the rig node. Args: transforms (Iterable): Nodes to group and/or move into rig node. prefix (string): Prefix for group name. Usually calling function name. name (string): Name to give group. Returns: (pm.nodetypes.Transform): Group node made. # preliminary checks, don't make group if no name given and there is no rig node # drive visibility of groups through rig node # set hidden, still keyable even though k is False Creates a dynamic pivot at the given transform driving the given target. Args: transform (pm.nodetypes.Transform): Transform to create dynamic pivot at. target (pm.nodetypes.Transform): Transform to drive with dynamic pivot. shape (method): Used to create curve or visual representation of FK control. axis (string): Orientation for control made. color (string): Color for control. scale (float): Multiplied times size. size (list): X, Y, Z sizes of control. Returns: (pm.nodetypes.Transform): Control created. Derives whether to tag the given ctrl with the parent, the parent's inner control, or the last in controls. Args: ctrl (pm.nodetypes.Transform or string): Transform that will receive parent to pick walk up to. parent (pm.nodetypes.Transform): Parent that could drive ctrl's chain. i (int): Iterator. controls (list): Controls being added to chain. Attempts to figure out the axis for the given iteration of the given transforms and/or duplicates. Args: i (int): Iteration count. transforms (list): Transforms to use to get orient axis. duplicates (list): Duplicates of transforms Returns: (string): Axis calculated from orientation of current iteration and next iteration. # attempt to deduce axis if transform only has one child and axis is not given Creates a root control with a squash and stretch attribute. Args: transform (pm.nodetypes.Transform or string): Joint to create root control on. name (string): Name to give group Returns: (list): Controls created in order from start to end. # create the root control as a regular FK # create a group above root control that will be scaled and squash and stretch attribute # create blender # hook up squash and stretch # connect root and rig with message for easy look up Creates FK controls for the transform chain deduced by the start and end transforms. Args: start (pm.nodetypes.Transform or string): Start of the chain to be driven by FK controls. end (pm.nodetypes.Transform or string): End of the chain to be driven by FK controls. parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint. axis (string): Only used if no end joint given for shape's axis to match rotations. shape (method): Used to create curve or visual representation of FK control. sizes (list): Sizes to use for each control. connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven. global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control. name (str or None): Name to give group that will house all FK components. Returns: (list): Controls created in order from start to end. # connects attributes that offset controls # used for scale calculation in FK control # connect all the stuff needed for volumetric scaling # edge cases for scaling Creates IK controls and IK RP solver and for the given start and end joints. Args: start (pm.nodetypes.Joint or string): Start of the joint chain. end (pm.nodetypes.Joint or string): End of the joint chain. parent (pm.nodetypes.Transform): Parent of start control. shape (method): Creates the shape control that will drive joints. sizes (list): Sizes to use for each control. connect (bool): If True, connects the duplicate FK chain to the given start/end transforms to be driven. global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control. name (str or None): Name to give group that will house all IK components. Returns: (list): Controls created in order from start to end. # start # scale buffer transform # mid # end # other unknown joint(s), left for possible future 3+ IK joint chains # connect controls to joints, and make ik handle # xform.poleVectorMatrixConstraint(ik_handle, mid_ctrl) # connect the rest # scale ctrl connect # parent pole vector to end control and create # preferred angle connection # must parent before creating spaces # create spaces for piper ik # global scale comes from parent's world matrix scale Creates a FK and IK controls that drive the chain from start to end. Args: start (pm.nodetypes.Joint or string): Start of the chain to be driven by FK controls. end (pm.nodetypes.Joint or string): End of the chain to be driven by FK controls. If none given, will only drive start parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint. fk_shape (method): Used to create curve or visual representation for the FK controls. ik_shape (method): Used to create curve or visual representation for the IK controls. proxy (boolean): If True, adds a proxy FK_IK attribute to all controls. global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control. name (str or None): Name to give group that will house all FKIK components. Returns: (list): Two lists, FK and IK controls created in order from start to end respectively. # create joint chains that is the same as the given start and end chain for FK and IK then create controls # create the switcher control and add the transforms, fk, and iks to its attribute to store it # one minus the output of the fk ik attribute in order to drive visibility of ik/fk controls # use spaces to drive original chain with fk and ik transforms and hook up switcher attributes # make proxy fk ik attribute on all the controls # start on index 1 since switcher is on index 0 # make IK control drive switcher visibility Creates extra control that doesn't drive the transform, but rather should be used with spaces and act as parent. Args: transform (pm.nodetypes.Transform or string): Transform to create control on. name (string): Name to append to given transform name. parent (pm.nodetypes.Transform): Transform to parent the control created onto. shape (method): Creates the control curve. axis (string): Orientation for control. color (string): Color of curve. scale (float): Scale to multiply by joint radius. spaces (iterator or None): A bunch of pm.nodetypes.Transform(s) that will drive the given transform. Returns: (pm.nodetypes.Transform): Control created. # allows for global scaling to work, otherwise parent under something that gets globally scaled # or fix so that global scale gets multiplied onto created control if no parent given # don't auto colorize if color is given Creates the twist control that mimics twist of given target based on given weight. Args: joint (pm.nodetypes.Transform or string): Joint to create FK control with twist attributes on. driver (pm.nodetypes.Transform or string): The "parent" for the given joint. target(pm.nodetypes.Transform or string): Used to mimic twist. axis (string or None): Axis to mimic twist of. blended (boolean): If True, will blend translate of joint between given driver and target. weight (float): Amount of twist joint will mimic from given target. global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control. name (str or None): Name to give group that will house all twist components. Returns: (list): Duplicate joint(s) as first index, control(s) as second index, and inner control(s) as third index. # get distance variables before making FK controls. # derive axis from driver and target # create FK control # name and create blend matrix # connect blend matrix and set default values # create attribute on control to drive the distance weight # create twist node and add twist attribute on control Creates controls for each given joint that will be used as part of a nurbs surface to drive given joints. Args: joints (list): Joints to create controls for. Must have at least three joints! ctrl_parent (pm.nodetypes.DependNode): Node to tag as control parent for pick-walking. shape (method): Used to create curve or visual representation of bendy control. i (string): Format and first digit to search for middle element in given joints. name (str or None): Name to give group that will house all bendy components. Returns: (list): Controls of bendy chain. # if axis is negative, rotate nurbs surface 180 degrees so that UVs end up going down the correct direction # inner control # global and control scale hooked up to locator that drives joint # multiplying inner control's translate by scale to compensate for any parent scaling # paint the skin weights so that they are set to 1 Creates a reverse foot control that changes pivot based on curve shape and control rotation input. Useful for banking. Args: joint (pm.nodetypes.Joint): Joint that will be driven by the reverse module and IK handle. ik_control (pm.nodetypes.Transform): Control that drives the IK Handle. pivot_track (pm.nodetypes.Transform): NurbsCurve shape as child that will act as the track for the pivot. side (string or None): Side to generate cross section use_track_shape (boolean): If True, will use the pivot track shape as the control shape Returns: (pm.nodetypes.Transform): Control that moves the reverse foot pivot. # get the IK handle and validate there is only one # create a pivot track (cross section curve) if no pivot track (curve) is given # if IK joint given, get the name of the regular joint by stripping the ik prefix # tries to get the meshes influenced by the skin cluster connected to the joint # create the pivot track curve # validate that only one is made # create the pivot and the normalized pivot, move the norm pivot to joint and then to floor # figure out control size, create control, lock and hide axis, translate, and scale # node to add small number # adding a small amount to avoid division by zero # need to multiply the rotation by -1 # creating the normalized (circle) version of the cross section # delete the duplicate and finally make the normalize track. Make sure to close the curve and center pivots # move normalized track to joint, then to floor, and freeze transforms # curve_info position is where the pivot goes! Connect something to it if you want to visualize it # connect ik handle by letting the pivot drive it # make the pivot drive the joint's rotations # clean up by hiding curves # hook up pivot control with fk_ik attribute if ik has an fk-ik proxy Creates a control that offsets the given target through rotation (usually foot roll reverse rig). Args: driver (pm.nodetypes.Transform): The transform that drives the whole chain. Usually the IK handle. target (pm.nodetypes.Transform): Transform that will have control rotations added to. Usually end joint. driven_negate (pm.nodetypes.Transform): Transform that will have control rotations subtracted from. Usually any controls further down the chain/hierarchy of the given target. transform (pm.nodetypes.Transform): Transform for making the control. Useful for figuring out control size. If None given, will try to use given driven_negate, if no driven_negate, will try to use given target. switcher_ctrl (pm.nodetypes.Transform): Transform that handles switching between FK and IK chains. shape (method): Creates the shape control that will drive reverse rig system. axis (string): Direction control will be facing when created. Returns: (pm.nodetypes.Transform): Control created. # attempt to deduce axis if transform only has one child and axis is not given # create control # add control's rotation to whatever is connected to target's rotate. # if no driven negate given or driven negate is not being offset by the offsetParentMatrix, we are finished here # decompose and compose matrices to get rotation value subtracted with control's rotation # add reverse control to switcher data and connect ik visibility onto reverse control # add proxy fk_ik attribute to ctrl # only make driven_negate by affected if IK is set to True Convenience method for rigging a leg. FKIK chain, with banker, and reverse controls. Args: start (pm.nodetypes.Joint): Start of the chain to be driven by FK controls. end (pm.nodetypes.Joint): End of the chain to be driven by FK controls. If none given, will only drive start ball (pm.nodetypes.Transform): Transform that will be driven by FK chain and reversed. side (string): Side to create banker control on. parent (pm.nodetypes.Transform): If given, will drive the start control through parent matrix constraint. global_ctrl (pm.nodetypes.Transform): If given, will use this to drive global scale of piperIK control. name (str or None): Name to give group that will house all IK components. Returns: (list): Nodes created. | 2.182184 | 2 |
src/cool_compiler/error/error.py | matcom-school/cool-compiler-2021 | 0 | 6630911 | <reponame>matcom-school/cool-compiler-2021
from cool_compiler.types.build_in_types import self
class CoolError:
def __init__(self, code : str) -> None:
self._list = []
self.code = code
self.pos = None
self.text = None
def any(self):
if any(self._list):
for msg in self._list:
print(msg)
return True
return False
def __call__(self, lineno, index):
self.pos = (lineno, self.find_column(index))
return self
def find_column(self, index):
last_cr = self.code.rfind('\n', 0, index)
if last_cr < 0:
last_cr = 0
column = (index - last_cr)
return column if column != 0 else 1
def __add(self, etype, text):
self._list.append(f'{self.pos} - {etype}: {text}')
def get_handler(self, lineno, index) -> self:
l = lineno + 0
i = index + 0
return lambda : self.__call__(l,i)
def add_lexical(self, text):
self.__add("LexicographicError", text)
def add_syntactic(self, text):
self.__add("SyntacticError", text)
def add_name_error(self, text):
self.__add("NameError", text)
def add_type_error(self, basee, ttype = ''):
self.__add("TypeError", f'{basee} {ttype}')
def add_attribute_error(self, ttype, attr):
self.__add("AttributeError", f'{attr} in {ttype} type')
def add_semantic_error(self, text):
self.__add("SemanticError", text) | from cool_compiler.types.build_in_types import self
class CoolError:
def __init__(self, code : str) -> None:
self._list = []
self.code = code
self.pos = None
self.text = None
def any(self):
if any(self._list):
for msg in self._list:
print(msg)
return True
return False
def __call__(self, lineno, index):
self.pos = (lineno, self.find_column(index))
return self
def find_column(self, index):
last_cr = self.code.rfind('\n', 0, index)
if last_cr < 0:
last_cr = 0
column = (index - last_cr)
return column if column != 0 else 1
def __add(self, etype, text):
self._list.append(f'{self.pos} - {etype}: {text}')
def get_handler(self, lineno, index) -> self:
l = lineno + 0
i = index + 0
return lambda : self.__call__(l,i)
def add_lexical(self, text):
self.__add("LexicographicError", text)
def add_syntactic(self, text):
self.__add("SyntacticError", text)
def add_name_error(self, text):
self.__add("NameError", text)
def add_type_error(self, basee, ttype = ''):
self.__add("TypeError", f'{basee} {ttype}')
def add_attribute_error(self, ttype, attr):
self.__add("AttributeError", f'{attr} in {ttype} type')
def add_semantic_error(self, text):
self.__add("SemanticError", text) | none | 1 | 2.805947 | 3 |
|
pyplusplus/code_creators/module.py | electronicvisions/pyplusplus | 0 | 6630912 | <reponame>electronicvisions/pyplusplus
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
from . import custom
from . import license
from . import include
from . import compound
from . import namespace
from . import algorithm
from . import module_body
from . import library_reference
from . import declaration_based
from . import include_directories
from pygccxml.utils import utils
class module_t(compound.compound_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns, code_generator_type):
"""Constructor.
"""
compound.compound_t.__init__(self)
self.__global_ns = global_ns
self._code_generator = code_generator_type
@property
def global_ns(self):
"reference to global_ns ( namespace_t ) declaration"
return self.__global_ns
def _get_license( self ):
if isinstance( self.creators[0], license.license_t ):
return self.creators[0]
return None
def _set_license( self, license_text ):
if not isinstance( license_text, license.license_t ):
license_inst = license.license_t( license_text )
if isinstance( self.creators[0], license.license_t ):
self.remove_creator( self.creators[0] )
self.adopt_creator( license_inst, 0 )
license = property( _get_license, _set_license,
doc="""License text.
The license text will always be the first children node.
@type: str or :class:`code_creators.license_t`""")
def _get_system_files_impl( self ):
return []
@utils.cached
def specially_exposed_decls(self):
"""list of exposed declarations, which were not ``included``, but still
were exposed. For example, std containers.
"""
decls = set()
#select all declaration based code creators
ccs = [cc for cc in algorithm.make_flatten_list( self ) if isinstance( cc, declaration_based.declaration_based_t )]
#leave only "ignored"
ccs = [cc for cc in ccs if cc.declaration.ignore == True]
decls = [cc.declaration for cc in ccs]
return set( decls )
def update_documentation( self, doc_extractor ):
if not doc_extractor:
return
visited = set()
for cc in algorithm.make_flatten( self ):
if not isinstance( cc, declaration_based.declaration_based_t ):
continue
if id( cc.declaration ) in visited:
continue
cc.declaration.documentation = doc_extractor( cc.declaration )
visited.add( id( cc.declaration ) )
class bpmodule_t(module_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns):
"""Constructor.
"""
module_t.__init__(self, global_ns, bpmodule_t.CODE_GENERATOR_TYPES.BOOST_PYTHON)
self.__body = None
def _get_include_dirs(self):
include_dirs = algorithm.creator_finder.find_by_class_instance(
what=include_directories.include_directories_t
, where=self.creators
, recursive=False)
if 0 == len( include_dirs ):
include_dirs = include_directories.include_directories_t()
if self.license:
self.adopt_creator( include_dirs, 1 )
else:
self.adopt_creator( include_dirs, 0 )
return include_dirs
elif 1 == len( include_dirs ):
return include_dirs[0]
else:
assert not "only single instance of include_directories_t should exist"
def _get_std_directories(self):
include_dirs = self._get_include_dirs()
return include_dirs.std
std_directories = property( _get_std_directories )
def _get_user_defined_directories(self):
include_dirs = self._get_include_dirs()
return include_dirs.user_defined
user_defined_directories = property( _get_user_defined_directories )
@property
def body(self):
"""Return reference to :class:`code_creators.module_body_t` code creator"""
if None is self.__body:
found = algorithm.creator_finder.find_by_class_instance( what=module_body.module_body_t
, where=self.creators
, recursive=False )
if found:
self.__body = found[0]
return self.__body
def last_include_index(self):
"""
return the children index of the last :class:`code_creators.include_t` object.
An exception is raised when there is no include_t object among
the children creators.
:rtype: int
"""
for i in range( len(self.creators) - 1, -1, -1 ):
if isinstance( self.creators[i], include.include_t ):
return i
else:
return 0
def replace_included_headers( self, headers, leave_system_headers=True ):
to_be_removed = []
for creator in self.creators:
if isinstance( creator, include.include_t ):
to_be_removed.append( creator )
elif isinstance( creator, module_body.module_body_t ):
break
for creator in to_be_removed:
if creator.is_system:
if not leave_system_headers:
self.remove_creator( creator )
elif creator.is_user_defined:
pass
else:
self.remove_creator( creator )
for header in headers:
self.adopt_include( include.include_t( header=header ) )
def adopt_include(self, include_creator):
"""Insert an :class:`code_creators.include_t` object.
The include creator is inserted right after the last include file.
:param include_creator: Include creator object
:type include_creator: :class:`code_creators.include_t`
"""
lii = self.last_include_index()
if lii == 0:
if not self.creators:
lii = -1
elif not isinstance( self.creators[0], include.include_t ):
lii = -1
else:
pass
self.adopt_creator( include_creator, lii + 1 )
def do_include_dirs_optimization(self):
include_dirs = self._get_include_dirs()
includes = [creator for creator in self.creators if isinstance( creator, include.include_t )]
for include_creator in includes:
include_creator.include_dirs_optimization = include_dirs
def _create_impl(self):
self.do_include_dirs_optimization()
index = 0
code = []
for index in range( len( self.creators ) ):
if not isinstance( self.creators[index], include.include_t ):
break
else:
code.append( self.creators[index].create() )
if code:
code.append( 2* os.linesep )
code.append( self.create_internal_code( self.creators[index:], indent_code=False ))
code.append( os.linesep )
return os.linesep.join( code )
def add_include( self, header, user_defined=True, system=False ):
creator = include.include_t( header=header, user_defined=user_defined, system=system )
self.adopt_include( creator )
def add_namespace_usage( self, namespace_name ):
self.adopt_creator( namespace.namespace_using_t( namespace_name )
, self.last_include_index() + 1 )
def add_namespace_alias( self, alias, full_namespace_name ):
self.adopt_creator( namespace.namespace_alias_t(
alias=alias
, full_namespace_name=full_namespace_name )
, self.last_include_index() + 1 )
def adopt_declaration_creator( self, creator ):
self.adopt_creator( creator, self.creators.index( self.body ) )
def add_declaration_code( self, code, position=None ):
self.adopt_declaration_creator( custom.custom_text_t( code ) )
class ctypes_module_t(module_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns):
"""Constructor.
"""
module_t.__init__(self, global_ns, ctypes_module_t.CODE_GENERATOR_TYPES.CTYPES)
self.treat_char_ptr_as_binary_data = False
def _create_impl(self):
return self.create_internal_code( self.creators, indent_code=False )
@utils.cached
def library_var_name(self):
for creator in self.creators:
if isinstance( creator, library_reference.library_reference_t ):
return creator.library_var_name
else:
raise RuntimeError( "Internal Error: library_reference_t creator was not created" )
| # Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
from . import custom
from . import license
from . import include
from . import compound
from . import namespace
from . import algorithm
from . import module_body
from . import library_reference
from . import declaration_based
from . import include_directories
from pygccxml.utils import utils
class module_t(compound.compound_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns, code_generator_type):
"""Constructor.
"""
compound.compound_t.__init__(self)
self.__global_ns = global_ns
self._code_generator = code_generator_type
@property
def global_ns(self):
"reference to global_ns ( namespace_t ) declaration"
return self.__global_ns
def _get_license( self ):
if isinstance( self.creators[0], license.license_t ):
return self.creators[0]
return None
def _set_license( self, license_text ):
if not isinstance( license_text, license.license_t ):
license_inst = license.license_t( license_text )
if isinstance( self.creators[0], license.license_t ):
self.remove_creator( self.creators[0] )
self.adopt_creator( license_inst, 0 )
license = property( _get_license, _set_license,
doc="""License text.
The license text will always be the first children node.
@type: str or :class:`code_creators.license_t`""")
def _get_system_files_impl( self ):
return []
@utils.cached
def specially_exposed_decls(self):
"""list of exposed declarations, which were not ``included``, but still
were exposed. For example, std containers.
"""
decls = set()
#select all declaration based code creators
ccs = [cc for cc in algorithm.make_flatten_list( self ) if isinstance( cc, declaration_based.declaration_based_t )]
#leave only "ignored"
ccs = [cc for cc in ccs if cc.declaration.ignore == True]
decls = [cc.declaration for cc in ccs]
return set( decls )
def update_documentation( self, doc_extractor ):
if not doc_extractor:
return
visited = set()
for cc in algorithm.make_flatten( self ):
if not isinstance( cc, declaration_based.declaration_based_t ):
continue
if id( cc.declaration ) in visited:
continue
cc.declaration.documentation = doc_extractor( cc.declaration )
visited.add( id( cc.declaration ) )
class bpmodule_t(module_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns):
"""Constructor.
"""
module_t.__init__(self, global_ns, bpmodule_t.CODE_GENERATOR_TYPES.BOOST_PYTHON)
self.__body = None
def _get_include_dirs(self):
include_dirs = algorithm.creator_finder.find_by_class_instance(
what=include_directories.include_directories_t
, where=self.creators
, recursive=False)
if 0 == len( include_dirs ):
include_dirs = include_directories.include_directories_t()
if self.license:
self.adopt_creator( include_dirs, 1 )
else:
self.adopt_creator( include_dirs, 0 )
return include_dirs
elif 1 == len( include_dirs ):
return include_dirs[0]
else:
assert not "only single instance of include_directories_t should exist"
def _get_std_directories(self):
include_dirs = self._get_include_dirs()
return include_dirs.std
std_directories = property( _get_std_directories )
def _get_user_defined_directories(self):
include_dirs = self._get_include_dirs()
return include_dirs.user_defined
user_defined_directories = property( _get_user_defined_directories )
@property
def body(self):
"""Return reference to :class:`code_creators.module_body_t` code creator"""
if None is self.__body:
found = algorithm.creator_finder.find_by_class_instance( what=module_body.module_body_t
, where=self.creators
, recursive=False )
if found:
self.__body = found[0]
return self.__body
def last_include_index(self):
"""
return the children index of the last :class:`code_creators.include_t` object.
An exception is raised when there is no include_t object among
the children creators.
:rtype: int
"""
for i in range( len(self.creators) - 1, -1, -1 ):
if isinstance( self.creators[i], include.include_t ):
return i
else:
return 0
def replace_included_headers( self, headers, leave_system_headers=True ):
to_be_removed = []
for creator in self.creators:
if isinstance( creator, include.include_t ):
to_be_removed.append( creator )
elif isinstance( creator, module_body.module_body_t ):
break
for creator in to_be_removed:
if creator.is_system:
if not leave_system_headers:
self.remove_creator( creator )
elif creator.is_user_defined:
pass
else:
self.remove_creator( creator )
for header in headers:
self.adopt_include( include.include_t( header=header ) )
def adopt_include(self, include_creator):
"""Insert an :class:`code_creators.include_t` object.
The include creator is inserted right after the last include file.
:param include_creator: Include creator object
:type include_creator: :class:`code_creators.include_t`
"""
lii = self.last_include_index()
if lii == 0:
if not self.creators:
lii = -1
elif not isinstance( self.creators[0], include.include_t ):
lii = -1
else:
pass
self.adopt_creator( include_creator, lii + 1 )
def do_include_dirs_optimization(self):
include_dirs = self._get_include_dirs()
includes = [creator for creator in self.creators if isinstance( creator, include.include_t )]
for include_creator in includes:
include_creator.include_dirs_optimization = include_dirs
def _create_impl(self):
self.do_include_dirs_optimization()
index = 0
code = []
for index in range( len( self.creators ) ):
if not isinstance( self.creators[index], include.include_t ):
break
else:
code.append( self.creators[index].create() )
if code:
code.append( 2* os.linesep )
code.append( self.create_internal_code( self.creators[index:], indent_code=False ))
code.append( os.linesep )
return os.linesep.join( code )
def add_include( self, header, user_defined=True, system=False ):
creator = include.include_t( header=header, user_defined=user_defined, system=system )
self.adopt_include( creator )
def add_namespace_usage( self, namespace_name ):
self.adopt_creator( namespace.namespace_using_t( namespace_name )
, self.last_include_index() + 1 )
def add_namespace_alias( self, alias, full_namespace_name ):
self.adopt_creator( namespace.namespace_alias_t(
alias=alias
, full_namespace_name=full_namespace_name )
, self.last_include_index() + 1 )
def adopt_declaration_creator( self, creator ):
self.adopt_creator( creator, self.creators.index( self.body ) )
def add_declaration_code( self, code, position=None ):
self.adopt_declaration_creator( custom.custom_text_t( code ) )
class ctypes_module_t(module_t):
"""This class represents the source code for the entire extension module.
The root of the code creator tree is always a module_t object.
"""
def __init__(self, global_ns):
"""Constructor.
"""
module_t.__init__(self, global_ns, ctypes_module_t.CODE_GENERATOR_TYPES.CTYPES)
self.treat_char_ptr_as_binary_data = False
def _create_impl(self):
return self.create_internal_code( self.creators, indent_code=False )
@utils.cached
def library_var_name(self):
for creator in self.creators:
if isinstance( creator, library_reference.library_reference_t ):
return creator.library_var_name
else:
raise RuntimeError( "Internal Error: library_reference_t creator was not created" ) | en | 0.816409 | # Copyright 2004-2008 <NAME>. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) This class represents the source code for the entire extension module. The root of the code creator tree is always a module_t object. Constructor. License text. The license text will always be the first children node. @type: str or :class:`code_creators.license_t` list of exposed declarations, which were not ``included``, but still were exposed. For example, std containers. #select all declaration based code creators #leave only "ignored" This class represents the source code for the entire extension module. The root of the code creator tree is always a module_t object. Constructor. Return reference to :class:`code_creators.module_body_t` code creator return the children index of the last :class:`code_creators.include_t` object. An exception is raised when there is no include_t object among the children creators. :rtype: int Insert an :class:`code_creators.include_t` object. The include creator is inserted right after the last include file. :param include_creator: Include creator object :type include_creator: :class:`code_creators.include_t` This class represents the source code for the entire extension module. The root of the code creator tree is always a module_t object. Constructor. | 1.833047 | 2 |
boa3_test/test_sc/variable_test/AssignLocalWithArgument.py | hal0x2328/neo3-boa | 25 | 6630913 | <reponame>hal0x2328/neo3-boa<filename>boa3_test/test_sc/variable_test/AssignLocalWithArgument.py
from boa3.builtin import public
@public
def Main(a: int) -> int:
b = a
return b
| from boa3.builtin import public
@public
def Main(a: int) -> int:
b = a
return b | none | 1 | 1.668443 | 2 |
|
niftyreg_python_utilities/apply_transform_dir.py | pritesh-mehta/niftyreg_python_utilities | 0 | 6630914 | <filename>niftyreg_python_utilities/apply_transform_dir.py
"""
@author: pritesh-mehta
"""
import os
from argparse import ArgumentParser
def apply_transform_dir(niftyreg_exe_dir, ref_dir, flo_dir, res_dir, trans_dir,
param_str='-inter 1', extension='nii.gz'):
'''Apply transform to flo_dir
'''
ref = os.listdir(ref_dir)
flo = os.listdir(flo_dir)
ref = [x for x in ref if extension in x]
flo = [x for x in flo if extension in x]
for file in ref:
ref = os.path.join(ref_dir, file)
flo = os.path.join(flo_dir, file)
res = os.path.join(res_dir, file)
trans = os.path.join(trans_dir, "nrr_cpp_" + file)
os.system(r"cd " + str(niftyreg_exe_dir) + " & reg_resample -ref " + str(ref) + " -flo " +
str(flo) + " -res " + str(res) + " -trans " + str(trans) + " " + str(param_str))
return None
def process():
parser = ArgumentParser()
parser.add_argument('--niftyreg_exe_dir', required=True, type=str)
parser.add_argument('--ref_dir', required=True, type=str)
parser.add_argument('--flo_dir', required=True, type=str)
parser.add_argument('--res_dir', required=True, type=str)
parser.add_argument('--trans_dir', required=True, type=str)
parser.add_argument('--param_str', required=False, type=str, default='-inter 1')
parser.add_argument('--extension', required=False, type=str, default='.nii.gz')
args = parser.parse_args()
apply_transform_dir(args.niftyreg_exe_dir, args.ref_dir, args.flo_dir,
args.res_dir, args.trans_dir,
param_str=args.param_str, extension=args.extension)
if __name__ == "__main__":
process() | <filename>niftyreg_python_utilities/apply_transform_dir.py
"""
@author: pritesh-mehta
"""
import os
from argparse import ArgumentParser
def apply_transform_dir(niftyreg_exe_dir, ref_dir, flo_dir, res_dir, trans_dir,
param_str='-inter 1', extension='nii.gz'):
'''Apply transform to flo_dir
'''
ref = os.listdir(ref_dir)
flo = os.listdir(flo_dir)
ref = [x for x in ref if extension in x]
flo = [x for x in flo if extension in x]
for file in ref:
ref = os.path.join(ref_dir, file)
flo = os.path.join(flo_dir, file)
res = os.path.join(res_dir, file)
trans = os.path.join(trans_dir, "nrr_cpp_" + file)
os.system(r"cd " + str(niftyreg_exe_dir) + " & reg_resample -ref " + str(ref) + " -flo " +
str(flo) + " -res " + str(res) + " -trans " + str(trans) + " " + str(param_str))
return None
def process():
parser = ArgumentParser()
parser.add_argument('--niftyreg_exe_dir', required=True, type=str)
parser.add_argument('--ref_dir', required=True, type=str)
parser.add_argument('--flo_dir', required=True, type=str)
parser.add_argument('--res_dir', required=True, type=str)
parser.add_argument('--trans_dir', required=True, type=str)
parser.add_argument('--param_str', required=False, type=str, default='-inter 1')
parser.add_argument('--extension', required=False, type=str, default='.nii.gz')
args = parser.parse_args()
apply_transform_dir(args.niftyreg_exe_dir, args.ref_dir, args.flo_dir,
args.res_dir, args.trans_dir,
param_str=args.param_str, extension=args.extension)
if __name__ == "__main__":
process() | en | 0.584948 | @author: pritesh-mehta Apply transform to flo_dir | 2.476522 | 2 |
Lib/idlelib/idle_test/test_colorizer.py | askervin/cpython | 1 | 6630915 | "Test colorizer, coverage 93%."
from idlelib import colorizer
from test.support import requires
import unittest
from unittest import mock
from functools import partial
from tkinter import Tk, Text
from idlelib import config
from idlelib.percolator import Percolator
usercfg = colorizer.idleConf.userCfg
testcfg = {
'main': config.IdleUserConfParser(''),
'highlight': config.IdleUserConfParser(''),
'keys': config.IdleUserConfParser(''),
'extensions': config.IdleUserConfParser(''),
}
source = (
"if True: int ('1') # keyword, builtin, string, comment\n"
"elif False: print(0) # 'string' in comment\n"
"else: float(None) # if in comment\n"
"if iF + If + IF: 'keyword matching must respect case'\n"
"if'': x or'' # valid string-keyword no-space combinations\n"
"async def f(): await g()\n"
"'x', '''x''', \"x\", \"\"\"x\"\"\"\n"
)
def setUpModule():
colorizer.idleConf.userCfg = testcfg
def tearDownModule():
colorizer.idleConf.userCfg = usercfg
class FunctionTest(unittest.TestCase):
def test_any(self):
self.assertEqual(colorizer.any('test', ('a', 'b', 'cd')),
'(?P<test>a|b|cd)')
def test_make_pat(self):
# Tested in more detail by testing prog.
self.assertTrue(colorizer.make_pat())
def test_prog(self):
prog = colorizer.prog
eq = self.assertEqual
line = 'def f():\n print("hello")\n'
m = prog.search(line)
eq(m.groupdict()['KEYWORD'], 'def')
m = prog.search(line, m.end())
eq(m.groupdict()['SYNC'], '\n')
m = prog.search(line, m.end())
eq(m.groupdict()['BUILTIN'], 'print')
m = prog.search(line, m.end())
eq(m.groupdict()['STRING'], '"hello"')
m = prog.search(line, m.end())
eq(m.groupdict()['SYNC'], '\n')
def test_idprog(self):
idprog = colorizer.idprog
m = idprog.match('nospace')
self.assertIsNone(m)
m = idprog.match(' space')
self.assertEqual(m.group(0), ' space')
class ColorConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
root = cls.root = Tk()
root.withdraw()
cls.text = Text(root)
@classmethod
def tearDownClass(cls):
del cls.text
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def test_color_config(self):
text = self.text
eq = self.assertEqual
colorizer.color_config(text)
# Uses IDLE Classic theme as default.
eq(text['background'], '#ffffff')
eq(text['foreground'], '#000000')
eq(text['selectbackground'], 'gray')
eq(text['selectforeground'], '#000000')
eq(text['insertbackground'], 'black')
eq(text['inactiveselectbackground'], 'gray')
class ColorDelegatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
root = cls.root = Tk()
root.withdraw()
text = cls.text = Text(root)
cls.percolator = Percolator(text)
# Delegator stack = [Delagator(text)]
@classmethod
def tearDownClass(cls):
cls.percolator.redir.close()
del cls.percolator, cls.text
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def setUp(self):
self.color = colorizer.ColorDelegator()
self.percolator.insertfilter(self.color)
# Calls color.setdelagate(Delagator(text)).
def tearDown(self):
self.color.close()
self.percolator.removefilter(self.color)
self.text.delete('1.0', 'end')
self.color.resetcache()
del self.color
def test_init(self):
color = self.color
self.assertIsInstance(color, colorizer.ColorDelegator)
# The following are class variables.
self.assertTrue(color.allow_colorizing)
self.assertFalse(color.colorizing)
def test_setdelegate(self):
# Called in setUp.
color = self.color
self.assertIsInstance(color.delegate, colorizer.Delegator)
# It is too late to mock notify_range, so test side effect.
self.assertEqual(self.root.tk.call(
'after', 'info', color.after_id)[1], 'timer')
def test_LoadTagDefs(self):
highlight = partial(config.idleConf.GetHighlight, theme='IDLE Classic')
for tag, colors in self.color.tagdefs.items():
with self.subTest(tag=tag):
self.assertIn('background', colors)
self.assertIn('foreground', colors)
if tag not in ('SYNC', 'TODO'):
self.assertEqual(colors, highlight(element=tag.lower()))
def test_config_colors(self):
text = self.text
highlight = partial(config.idleConf.GetHighlight, theme='IDLE Classic')
for tag in self.color.tagdefs:
for plane in ('background', 'foreground'):
with self.subTest(tag=tag, plane=plane):
if tag in ('SYNC', 'TODO'):
self.assertEqual(text.tag_cget(tag, plane), '')
else:
self.assertEqual(text.tag_cget(tag, plane),
highlight(element=tag.lower())[plane])
# 'sel' is marked as the highest priority.
self.assertEqual(text.tag_names()[-1], 'sel')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_insert(self, mock_notify):
text = self.text
# Initial text.
text.insert('insert', 'foo')
self.assertEqual(text.get('1.0', 'end'), 'foo\n')
mock_notify.assert_called_with('1.0', '1.0+3c')
# Additional text.
text.insert('insert', 'barbaz')
self.assertEqual(text.get('1.0', 'end'), 'foobarbaz\n')
mock_notify.assert_called_with('1.3', '1.3+6c')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_delete(self, mock_notify):
text = self.text
# Initialize text.
text.insert('insert', 'abcdefghi')
self.assertEqual(text.get('1.0', 'end'), 'abcdefghi\n')
# Delete single character.
text.delete('1.7')
self.assertEqual(text.get('1.0', 'end'), 'abcdefgi\n')
mock_notify.assert_called_with('1.7')
# Delete multiple characters.
text.delete('1.3', '1.6')
self.assertEqual(text.get('1.0', 'end'), 'abcgi\n')
mock_notify.assert_called_with('1.3')
def test_notify_range(self):
text = self.text
color = self.color
eq = self.assertEqual
# Colorizing already scheduled.
save_id = color.after_id
eq(self.root.tk.call('after', 'info', save_id)[1], 'timer')
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
# Coloring scheduled and colorizing in progress.
color.colorizing = True
color.notify_range('1.0', 'end')
self.assertFalse(color.stop_colorizing)
eq(color.after_id, save_id)
# No colorizing scheduled and colorizing in progress.
text.after_cancel(save_id)
color.after_id = None
color.notify_range('1.0', '1.0+3c')
self.assertTrue(color.stop_colorizing)
self.assertIsNotNone(color.after_id)
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# New event scheduled.
self.assertNotEqual(color.after_id, save_id)
# No colorizing scheduled and colorizing off.
text.after_cancel(color.after_id)
color.after_id = None
color.allow_colorizing = False
color.notify_range('1.4', '1.4+10c')
# Nothing scheduled when colorizing is off.
self.assertIsNone(color.after_id)
def test_toggle_colorize_event(self):
color = self.color
eq = self.assertEqual
# Starts with colorizing allowed and scheduled.
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# Toggle colorizing off.
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.allow_colorizing)
# Toggle on while colorizing in progress (doesn't add timer).
color.colorizing = True
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertTrue(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
# Toggle off while colorizing in progress.
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertTrue(color.colorizing)
self.assertTrue(color.stop_colorizing)
self.assertFalse(color.allow_colorizing)
# Toggle on while colorizing not in progress.
color.colorizing = False
color.toggle_colorize_event()
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
self.assertFalse(color.colorizing)
self.assertTrue(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
@mock.patch.object(colorizer.ColorDelegator, 'recolorize_main')
def test_recolorize(self, mock_recmain):
text = self.text
color = self.color
eq = self.assertEqual
# Call recolorize manually and not scheduled.
text.after_cancel(color.after_id)
# No delegate.
save_delegate = color.delegate
color.delegate = None
color.recolorize()
mock_recmain.assert_not_called()
color.delegate = save_delegate
# Toggle off colorizing.
color.allow_colorizing = False
color.recolorize()
mock_recmain.assert_not_called()
color.allow_colorizing = True
# Colorizing in progress.
color.colorizing = True
color.recolorize()
mock_recmain.assert_not_called()
color.colorizing = False
# Colorizing is done, but not completed, so rescheduled.
color.recolorize()
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.colorizing)
mock_recmain.assert_called()
eq(mock_recmain.call_count, 1)
# Rescheduled when TODO tag still exists.
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# No changes to text, so no scheduling added.
text.tag_remove('TODO', '1.0', 'end')
color.recolorize()
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.colorizing)
mock_recmain.assert_called()
eq(mock_recmain.call_count, 2)
self.assertIsNone(color.after_id)
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_recolorize_main(self, mock_notify):
text = self.text
color = self.color
eq = self.assertEqual
text.insert('insert', source)
expected = (('1.0', ('KEYWORD',)), ('1.2', ()), ('1.3', ('KEYWORD',)),
('1.7', ()), ('1.9', ('BUILTIN',)), ('1.14', ('STRING',)),
('1.19', ('COMMENT',)),
('2.1', ('KEYWORD',)), ('2.18', ()), ('2.25', ('COMMENT',)),
('3.6', ('BUILTIN',)), ('3.12', ('KEYWORD',)), ('3.21', ('COMMENT',)),
('4.0', ('KEYWORD',)), ('4.3', ()), ('4.6', ()),
('5.2', ('STRING',)), ('5.8', ('KEYWORD',)), ('5.10', ('STRING',)),
('6.0', ('KEYWORD',)), ('6.10', ('DEFINITION',)), ('6.11', ()),
('7.0', ('STRING',)), ('7.4', ()), ('7.5', ('STRING',)),
('7.12', ()), ('7.14', ('STRING',)),
# SYNC at the end of every line.
('1.55', ('SYNC',)), ('2.50', ('SYNC',)), ('3.34', ('SYNC',)),
)
# Nothing marked to do therefore no tags in text.
text.tag_remove('TODO', '1.0', 'end')
color.recolorize_main()
for tag in text.tag_names():
with self.subTest(tag=tag):
eq(text.tag_ranges(tag), ())
# Source marked for processing.
text.tag_add('TODO', '1.0', 'end')
# Check some indexes.
color.recolorize_main()
for index, expected_tags in expected:
with self.subTest(index=index):
eq(text.tag_names(index), expected_tags)
# Check for some tags for ranges.
eq(text.tag_nextrange('TODO', '1.0'), ())
eq(text.tag_nextrange('KEYWORD', '1.0'), ('1.0', '1.2'))
eq(text.tag_nextrange('COMMENT', '2.0'), ('2.22', '2.43'))
eq(text.tag_nextrange('SYNC', '2.0'), ('2.43', '3.0'))
eq(text.tag_nextrange('STRING', '2.0'), ('4.17', '4.53'))
eq(text.tag_nextrange('STRING', '7.0'), ('7.0', '7.3'))
eq(text.tag_nextrange('STRING', '7.3'), ('7.5', '7.12'))
eq(text.tag_nextrange('STRING', '7.12'), ('7.14', '7.17'))
eq(text.tag_nextrange('STRING', '7.17'), ('7.19', '7.26'))
eq(text.tag_nextrange('SYNC', '7.0'), ('7.26', '9.0'))
@mock.patch.object(colorizer.ColorDelegator, 'recolorize')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_removecolors(self, mock_notify, mock_recolorize):
text = self.text
color = self.color
text.insert('insert', source)
color.recolorize_main()
# recolorize_main doesn't add these tags.
text.tag_add("ERROR", "1.0")
text.tag_add("TODO", "1.0")
text.tag_add("hit", "1.0")
for tag in color.tagdefs:
with self.subTest(tag=tag):
self.assertNotEqual(text.tag_ranges(tag), ())
color.removecolors()
for tag in color.tagdefs:
with self.subTest(tag=tag):
self.assertEqual(text.tag_ranges(tag), ())
if __name__ == '__main__':
unittest.main(verbosity=2)
| "Test colorizer, coverage 93%."
from idlelib import colorizer
from test.support import requires
import unittest
from unittest import mock
from functools import partial
from tkinter import Tk, Text
from idlelib import config
from idlelib.percolator import Percolator
usercfg = colorizer.idleConf.userCfg
testcfg = {
'main': config.IdleUserConfParser(''),
'highlight': config.IdleUserConfParser(''),
'keys': config.IdleUserConfParser(''),
'extensions': config.IdleUserConfParser(''),
}
source = (
"if True: int ('1') # keyword, builtin, string, comment\n"
"elif False: print(0) # 'string' in comment\n"
"else: float(None) # if in comment\n"
"if iF + If + IF: 'keyword matching must respect case'\n"
"if'': x or'' # valid string-keyword no-space combinations\n"
"async def f(): await g()\n"
"'x', '''x''', \"x\", \"\"\"x\"\"\"\n"
)
def setUpModule():
colorizer.idleConf.userCfg = testcfg
def tearDownModule():
colorizer.idleConf.userCfg = usercfg
class FunctionTest(unittest.TestCase):
def test_any(self):
self.assertEqual(colorizer.any('test', ('a', 'b', 'cd')),
'(?P<test>a|b|cd)')
def test_make_pat(self):
# Tested in more detail by testing prog.
self.assertTrue(colorizer.make_pat())
def test_prog(self):
prog = colorizer.prog
eq = self.assertEqual
line = 'def f():\n print("hello")\n'
m = prog.search(line)
eq(m.groupdict()['KEYWORD'], 'def')
m = prog.search(line, m.end())
eq(m.groupdict()['SYNC'], '\n')
m = prog.search(line, m.end())
eq(m.groupdict()['BUILTIN'], 'print')
m = prog.search(line, m.end())
eq(m.groupdict()['STRING'], '"hello"')
m = prog.search(line, m.end())
eq(m.groupdict()['SYNC'], '\n')
def test_idprog(self):
idprog = colorizer.idprog
m = idprog.match('nospace')
self.assertIsNone(m)
m = idprog.match(' space')
self.assertEqual(m.group(0), ' space')
class ColorConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
root = cls.root = Tk()
root.withdraw()
cls.text = Text(root)
@classmethod
def tearDownClass(cls):
del cls.text
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def test_color_config(self):
text = self.text
eq = self.assertEqual
colorizer.color_config(text)
# Uses IDLE Classic theme as default.
eq(text['background'], '#ffffff')
eq(text['foreground'], '#000000')
eq(text['selectbackground'], 'gray')
eq(text['selectforeground'], '#000000')
eq(text['insertbackground'], 'black')
eq(text['inactiveselectbackground'], 'gray')
class ColorDelegatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
root = cls.root = Tk()
root.withdraw()
text = cls.text = Text(root)
cls.percolator = Percolator(text)
# Delegator stack = [Delagator(text)]
@classmethod
def tearDownClass(cls):
cls.percolator.redir.close()
del cls.percolator, cls.text
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def setUp(self):
self.color = colorizer.ColorDelegator()
self.percolator.insertfilter(self.color)
# Calls color.setdelagate(Delagator(text)).
def tearDown(self):
self.color.close()
self.percolator.removefilter(self.color)
self.text.delete('1.0', 'end')
self.color.resetcache()
del self.color
def test_init(self):
color = self.color
self.assertIsInstance(color, colorizer.ColorDelegator)
# The following are class variables.
self.assertTrue(color.allow_colorizing)
self.assertFalse(color.colorizing)
def test_setdelegate(self):
# Called in setUp.
color = self.color
self.assertIsInstance(color.delegate, colorizer.Delegator)
# It is too late to mock notify_range, so test side effect.
self.assertEqual(self.root.tk.call(
'after', 'info', color.after_id)[1], 'timer')
def test_LoadTagDefs(self):
highlight = partial(config.idleConf.GetHighlight, theme='IDLE Classic')
for tag, colors in self.color.tagdefs.items():
with self.subTest(tag=tag):
self.assertIn('background', colors)
self.assertIn('foreground', colors)
if tag not in ('SYNC', 'TODO'):
self.assertEqual(colors, highlight(element=tag.lower()))
def test_config_colors(self):
text = self.text
highlight = partial(config.idleConf.GetHighlight, theme='IDLE Classic')
for tag in self.color.tagdefs:
for plane in ('background', 'foreground'):
with self.subTest(tag=tag, plane=plane):
if tag in ('SYNC', 'TODO'):
self.assertEqual(text.tag_cget(tag, plane), '')
else:
self.assertEqual(text.tag_cget(tag, plane),
highlight(element=tag.lower())[plane])
# 'sel' is marked as the highest priority.
self.assertEqual(text.tag_names()[-1], 'sel')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_insert(self, mock_notify):
text = self.text
# Initial text.
text.insert('insert', 'foo')
self.assertEqual(text.get('1.0', 'end'), 'foo\n')
mock_notify.assert_called_with('1.0', '1.0+3c')
# Additional text.
text.insert('insert', 'barbaz')
self.assertEqual(text.get('1.0', 'end'), 'foobarbaz\n')
mock_notify.assert_called_with('1.3', '1.3+6c')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_delete(self, mock_notify):
text = self.text
# Initialize text.
text.insert('insert', 'abcdefghi')
self.assertEqual(text.get('1.0', 'end'), 'abcdefghi\n')
# Delete single character.
text.delete('1.7')
self.assertEqual(text.get('1.0', 'end'), 'abcdefgi\n')
mock_notify.assert_called_with('1.7')
# Delete multiple characters.
text.delete('1.3', '1.6')
self.assertEqual(text.get('1.0', 'end'), 'abcgi\n')
mock_notify.assert_called_with('1.3')
def test_notify_range(self):
text = self.text
color = self.color
eq = self.assertEqual
# Colorizing already scheduled.
save_id = color.after_id
eq(self.root.tk.call('after', 'info', save_id)[1], 'timer')
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
# Coloring scheduled and colorizing in progress.
color.colorizing = True
color.notify_range('1.0', 'end')
self.assertFalse(color.stop_colorizing)
eq(color.after_id, save_id)
# No colorizing scheduled and colorizing in progress.
text.after_cancel(save_id)
color.after_id = None
color.notify_range('1.0', '1.0+3c')
self.assertTrue(color.stop_colorizing)
self.assertIsNotNone(color.after_id)
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# New event scheduled.
self.assertNotEqual(color.after_id, save_id)
# No colorizing scheduled and colorizing off.
text.after_cancel(color.after_id)
color.after_id = None
color.allow_colorizing = False
color.notify_range('1.4', '1.4+10c')
# Nothing scheduled when colorizing is off.
self.assertIsNone(color.after_id)
def test_toggle_colorize_event(self):
color = self.color
eq = self.assertEqual
# Starts with colorizing allowed and scheduled.
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# Toggle colorizing off.
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertFalse(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.allow_colorizing)
# Toggle on while colorizing in progress (doesn't add timer).
color.colorizing = True
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertTrue(color.colorizing)
self.assertFalse(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
# Toggle off while colorizing in progress.
color.toggle_colorize_event()
self.assertIsNone(color.after_id)
self.assertTrue(color.colorizing)
self.assertTrue(color.stop_colorizing)
self.assertFalse(color.allow_colorizing)
# Toggle on while colorizing not in progress.
color.colorizing = False
color.toggle_colorize_event()
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
self.assertFalse(color.colorizing)
self.assertTrue(color.stop_colorizing)
self.assertTrue(color.allow_colorizing)
@mock.patch.object(colorizer.ColorDelegator, 'recolorize_main')
def test_recolorize(self, mock_recmain):
text = self.text
color = self.color
eq = self.assertEqual
# Call recolorize manually and not scheduled.
text.after_cancel(color.after_id)
# No delegate.
save_delegate = color.delegate
color.delegate = None
color.recolorize()
mock_recmain.assert_not_called()
color.delegate = save_delegate
# Toggle off colorizing.
color.allow_colorizing = False
color.recolorize()
mock_recmain.assert_not_called()
color.allow_colorizing = True
# Colorizing in progress.
color.colorizing = True
color.recolorize()
mock_recmain.assert_not_called()
color.colorizing = False
# Colorizing is done, but not completed, so rescheduled.
color.recolorize()
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.colorizing)
mock_recmain.assert_called()
eq(mock_recmain.call_count, 1)
# Rescheduled when TODO tag still exists.
eq(self.root.tk.call('after', 'info', color.after_id)[1], 'timer')
# No changes to text, so no scheduling added.
text.tag_remove('TODO', '1.0', 'end')
color.recolorize()
self.assertFalse(color.stop_colorizing)
self.assertFalse(color.colorizing)
mock_recmain.assert_called()
eq(mock_recmain.call_count, 2)
self.assertIsNone(color.after_id)
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_recolorize_main(self, mock_notify):
text = self.text
color = self.color
eq = self.assertEqual
text.insert('insert', source)
expected = (('1.0', ('KEYWORD',)), ('1.2', ()), ('1.3', ('KEYWORD',)),
('1.7', ()), ('1.9', ('BUILTIN',)), ('1.14', ('STRING',)),
('1.19', ('COMMENT',)),
('2.1', ('KEYWORD',)), ('2.18', ()), ('2.25', ('COMMENT',)),
('3.6', ('BUILTIN',)), ('3.12', ('KEYWORD',)), ('3.21', ('COMMENT',)),
('4.0', ('KEYWORD',)), ('4.3', ()), ('4.6', ()),
('5.2', ('STRING',)), ('5.8', ('KEYWORD',)), ('5.10', ('STRING',)),
('6.0', ('KEYWORD',)), ('6.10', ('DEFINITION',)), ('6.11', ()),
('7.0', ('STRING',)), ('7.4', ()), ('7.5', ('STRING',)),
('7.12', ()), ('7.14', ('STRING',)),
# SYNC at the end of every line.
('1.55', ('SYNC',)), ('2.50', ('SYNC',)), ('3.34', ('SYNC',)),
)
# Nothing marked to do therefore no tags in text.
text.tag_remove('TODO', '1.0', 'end')
color.recolorize_main()
for tag in text.tag_names():
with self.subTest(tag=tag):
eq(text.tag_ranges(tag), ())
# Source marked for processing.
text.tag_add('TODO', '1.0', 'end')
# Check some indexes.
color.recolorize_main()
for index, expected_tags in expected:
with self.subTest(index=index):
eq(text.tag_names(index), expected_tags)
# Check for some tags for ranges.
eq(text.tag_nextrange('TODO', '1.0'), ())
eq(text.tag_nextrange('KEYWORD', '1.0'), ('1.0', '1.2'))
eq(text.tag_nextrange('COMMENT', '2.0'), ('2.22', '2.43'))
eq(text.tag_nextrange('SYNC', '2.0'), ('2.43', '3.0'))
eq(text.tag_nextrange('STRING', '2.0'), ('4.17', '4.53'))
eq(text.tag_nextrange('STRING', '7.0'), ('7.0', '7.3'))
eq(text.tag_nextrange('STRING', '7.3'), ('7.5', '7.12'))
eq(text.tag_nextrange('STRING', '7.12'), ('7.14', '7.17'))
eq(text.tag_nextrange('STRING', '7.17'), ('7.19', '7.26'))
eq(text.tag_nextrange('SYNC', '7.0'), ('7.26', '9.0'))
@mock.patch.object(colorizer.ColorDelegator, 'recolorize')
@mock.patch.object(colorizer.ColorDelegator, 'notify_range')
def test_removecolors(self, mock_notify, mock_recolorize):
text = self.text
color = self.color
text.insert('insert', source)
color.recolorize_main()
# recolorize_main doesn't add these tags.
text.tag_add("ERROR", "1.0")
text.tag_add("TODO", "1.0")
text.tag_add("hit", "1.0")
for tag in color.tagdefs:
with self.subTest(tag=tag):
self.assertNotEqual(text.tag_ranges(tag), ())
color.removecolors()
for tag in color.tagdefs:
with self.subTest(tag=tag):
self.assertEqual(text.tag_ranges(tag), ())
if __name__ == '__main__':
unittest.main(verbosity=2)
| en | 0.859741 | # keyword, builtin, string, comment\n" # 'string' in comment\n" # if in comment\n" # valid string-keyword no-space combinations\n" x # Tested in more detail by testing prog. # Uses IDLE Classic theme as default. # Delegator stack = [Delagator(text)] # Calls color.setdelagate(Delagator(text)). # The following are class variables. # Called in setUp. # It is too late to mock notify_range, so test side effect. # 'sel' is marked as the highest priority. # Initial text. # Additional text. # Initialize text. # Delete single character. # Delete multiple characters. # Colorizing already scheduled. # Coloring scheduled and colorizing in progress. # No colorizing scheduled and colorizing in progress. # New event scheduled. # No colorizing scheduled and colorizing off. # Nothing scheduled when colorizing is off. # Starts with colorizing allowed and scheduled. # Toggle colorizing off. # Toggle on while colorizing in progress (doesn't add timer). # Toggle off while colorizing in progress. # Toggle on while colorizing not in progress. # Call recolorize manually and not scheduled. # No delegate. # Toggle off colorizing. # Colorizing in progress. # Colorizing is done, but not completed, so rescheduled. # Rescheduled when TODO tag still exists. # No changes to text, so no scheduling added. # SYNC at the end of every line. # Nothing marked to do therefore no tags in text. # Source marked for processing. # Check some indexes. # Check for some tags for ranges. # recolorize_main doesn't add these tags. | 2.695267 | 3 |
fringes/main_window.py | drs251/fringes | 2 | 6630916 | <filename>fringes/main_window.py
import numpy as np
import time
import pyqtgraph as pg
from PyQt5.QtCore import pyqtSlot, pyqtSignal, qDebug, QRectF, QRect, Qt
from PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QDialog, QGridLayout, QLabel, QDoubleSpinBox, QAction
from ui.main_window import Ui_MainWindow
from camera_dialog import CameraDialog
from data_handler import DataHandler
from plugin_dialog import PluginDialog
class MainWindow(QMainWindow):
class PidDialog(QDialog):
values_changed = pyqtSignal(float, float, float)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setWindowTitle("PID settings")
self.setMinimumWidth(150)
# self.setWindowFlags(Qt.WindowStaysOnTopHint)
layout = QGridLayout()
p_label = QLabel("P")
layout.addWidget(p_label, 0, 0)
self.p_spinbox = QDoubleSpinBox()
self.p_spinbox.setSingleStep(0.1)
self.p_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.p_spinbox, 0, 1)
i_label = QLabel("I")
layout.addWidget(i_label, 1, 0)
self.i_spinbox = QDoubleSpinBox()
self.i_spinbox.setSingleStep(0.1)
self.i_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.i_spinbox, 1, 1)
d_label = QLabel("D")
layout.addWidget(d_label, 2, 0)
self.d_spinbox = QDoubleSpinBox()
self.d_spinbox.setSingleStep(0.1)
self.d_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.d_spinbox, 2, 1)
self.setLayout(layout)
def _emit_new_values(self):
p = self.p_spinbox.value()
i = self.i_spinbox.value()
d = self.d_spinbox.value()
self.values_changed.emit(p, i, d)
def set_values(self, p, i, d):
self.p_spinbox.setValue(p)
self.i_spinbox.setValue(i)
self.d_spinbox.setValue(d)
def closeEvent(self, event):
event.accept()
self.deleteLater()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.graphicsView.setBackground(pg.mkColor(0.3))
self.plot_box = self.ui.graphicsView.addViewBox(row=1, col=1, lockAspect=True, enableMouse=True, invertY=True)
self.image_item = pg.ImageItem()
self.image_item.setOpts(axisOrder='row-major')
self.plot_box.addItem(self.image_item)
self.roi = None
self.ui.selectDataButton.toggled.connect(self.show_roi)
self.ui.resetSelectDataButton.clicked.connect(self.reset_roi)
self.settings_layout = QHBoxLayout()
self.settings_widget = QWidget()
self.settings_layout.addWidget(self.settings_widget)
self.ui.camSettingsWidget.setLayout(self.settings_layout)
self.data_handler = DataHandler()
for plugin in self.data_handler.plugins:
self.add_plugin(plugin.get_widget(), plugin.name)
self.data_handler.ndarray_available.connect(self.show_ndarray)
self.data_handler.camera_controls_changed.connect(self.set_camera_controls)
self.ui.actionSave_image.triggered.connect(self.data_handler.save_file)
self.data_handler.enable_saturation_widget.connect(self.enable_saturation_bar)
self.data_handler.saturation_changed.connect(self.ui.progressBar.setValue)
self.data_handler.message.connect(self.show_message)
self.camera_dialog = CameraDialog()
self.ui.actionChoose_camera.triggered.connect(self.camera_dialog.choose_camera)
self.camera_dialog.camera_changed.connect(self.data_handler.change_camera)
self.camera_dialog.choose_first_camera()
self.ui.actionTune_camera_parameters.triggered.connect(self.tune_pid)
self.ui.actionShow_Settings.toggled.connect(self.show_settings)
self.ui.actionDraw_lines.toggled.connect(self.draw_lines)
self.hline = None
self.vline = None
@pyqtSlot(np.ndarray)
def show_ndarray(self, array):
self.image_item.setImage(array)
@pyqtSlot(QWidget)
def set_camera_controls(self, controls):
self.settings_layout.removeWidget(self.settings_widget)
self.settings_widget.setParent(None)
self.settings_widget.deleteLater()
self.settings_widget = controls
self.settings_layout.addWidget(controls)
@pyqtSlot(bool)
def enable_saturation_bar(self, enable):
self.ui.progressBar.setEnabled(enable)
if not enable:
self.ui.progressBar.setValue(0)
@pyqtSlot(int)
def set_saturation_percentage(self, value):
self.ui.progressBar.setValue(value)
@pyqtSlot(bool)
def show_settings(self, show):
self.ui.bottom_settings_widget.setVisible(show)
@pyqtSlot(QWidget, str)
def add_plugin(self, widget: QWidget, name: str):
self.ui.tabWidget.addTab(widget, name)
@pyqtSlot(str)
def show_message(self, message):
self.ui.statusbar.showMessage(message, 5000)
@pyqtSlot(bool)
def show_roi(self, show):
if show:
rect = self.data_handler.clip_size
if rect is not None:
self.roi = pg.ROI([rect.left(), rect.top()], [rect.width(), rect.height()], pen=pg.mkPen(color='r'))
self.roi.addScaleHandle([0.5, 1], [0.5, 0.5])
self.roi.addScaleHandle([0, 0.5], [0.5, 0.5])
self.plot_box.addItem(self.roi)
self.roi.setZValue(10)
self.roi.sigRegionChangeFinished.connect(self.on_roi_changed)
else:
if self.roi is not None:
self.plot_box.removeItem(self.roi)
self.roi = None
@pyqtSlot()
def reset_roi(self):
self.ui.selectDataButton.setChecked(False)
self.show_roi(False)
self.data_handler.clip_size = None
@pyqtSlot()
def on_roi_changed(self):
if self.roi is not None:
pos = self.roi.pos()
size = self.roi.size()
rect = QRect(pos.x(), pos.y(), size.x(), size.y())
self.data_handler.set_clip_size(rect)
@pyqtSlot(bool)
def draw_lines(self, draw):
if draw:
x_range, y_range = self.plot_box.viewRange()
self.hline = pg.InfiniteLine(pos=np.mean(y_range), angle=0, pen=pg.mkPen('r'))
self.vline = pg.InfiniteLine(pos=np.mean(x_range), angle=90, pen=pg.mkPen('r'))
self.hline.setZValue(10)
self.vline.setZValue(10)
self.plot_box.addItem(self.hline)
self.plot_box.addItem(self.vline)
else:
if self.hline is not None:
self.plot_box.removeItem(self.hline)
self.hline = None
if self.vline is not None:
self.plot_box.removeItem(self.vline)
self.vline = None
def tune_pid(self):
try:
dialog = self.PidDialog(parent=self)
cam = self.data_handler.camera
dialog.set_values(*cam.get_pid())
dialog.values_changed.connect(self.set_pid_values)
dialog.show()
dialog.raise_()
dialog.activateWindow()
except Exception as err:
print(str(err))
def set_pid_values(self, p, i, d):
try:
cam = self.data_handler.camera
cam.set_pid(p, i, d)
except:
pass
| <filename>fringes/main_window.py
import numpy as np
import time
import pyqtgraph as pg
from PyQt5.QtCore import pyqtSlot, pyqtSignal, qDebug, QRectF, QRect, Qt
from PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QDialog, QGridLayout, QLabel, QDoubleSpinBox, QAction
from ui.main_window import Ui_MainWindow
from camera_dialog import CameraDialog
from data_handler import DataHandler
from plugin_dialog import PluginDialog
class MainWindow(QMainWindow):
class PidDialog(QDialog):
values_changed = pyqtSignal(float, float, float)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setWindowTitle("PID settings")
self.setMinimumWidth(150)
# self.setWindowFlags(Qt.WindowStaysOnTopHint)
layout = QGridLayout()
p_label = QLabel("P")
layout.addWidget(p_label, 0, 0)
self.p_spinbox = QDoubleSpinBox()
self.p_spinbox.setSingleStep(0.1)
self.p_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.p_spinbox, 0, 1)
i_label = QLabel("I")
layout.addWidget(i_label, 1, 0)
self.i_spinbox = QDoubleSpinBox()
self.i_spinbox.setSingleStep(0.1)
self.i_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.i_spinbox, 1, 1)
d_label = QLabel("D")
layout.addWidget(d_label, 2, 0)
self.d_spinbox = QDoubleSpinBox()
self.d_spinbox.setSingleStep(0.1)
self.d_spinbox.valueChanged.connect(self._emit_new_values)
layout.addWidget(self.d_spinbox, 2, 1)
self.setLayout(layout)
def _emit_new_values(self):
p = self.p_spinbox.value()
i = self.i_spinbox.value()
d = self.d_spinbox.value()
self.values_changed.emit(p, i, d)
def set_values(self, p, i, d):
self.p_spinbox.setValue(p)
self.i_spinbox.setValue(i)
self.d_spinbox.setValue(d)
def closeEvent(self, event):
event.accept()
self.deleteLater()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.graphicsView.setBackground(pg.mkColor(0.3))
self.plot_box = self.ui.graphicsView.addViewBox(row=1, col=1, lockAspect=True, enableMouse=True, invertY=True)
self.image_item = pg.ImageItem()
self.image_item.setOpts(axisOrder='row-major')
self.plot_box.addItem(self.image_item)
self.roi = None
self.ui.selectDataButton.toggled.connect(self.show_roi)
self.ui.resetSelectDataButton.clicked.connect(self.reset_roi)
self.settings_layout = QHBoxLayout()
self.settings_widget = QWidget()
self.settings_layout.addWidget(self.settings_widget)
self.ui.camSettingsWidget.setLayout(self.settings_layout)
self.data_handler = DataHandler()
for plugin in self.data_handler.plugins:
self.add_plugin(plugin.get_widget(), plugin.name)
self.data_handler.ndarray_available.connect(self.show_ndarray)
self.data_handler.camera_controls_changed.connect(self.set_camera_controls)
self.ui.actionSave_image.triggered.connect(self.data_handler.save_file)
self.data_handler.enable_saturation_widget.connect(self.enable_saturation_bar)
self.data_handler.saturation_changed.connect(self.ui.progressBar.setValue)
self.data_handler.message.connect(self.show_message)
self.camera_dialog = CameraDialog()
self.ui.actionChoose_camera.triggered.connect(self.camera_dialog.choose_camera)
self.camera_dialog.camera_changed.connect(self.data_handler.change_camera)
self.camera_dialog.choose_first_camera()
self.ui.actionTune_camera_parameters.triggered.connect(self.tune_pid)
self.ui.actionShow_Settings.toggled.connect(self.show_settings)
self.ui.actionDraw_lines.toggled.connect(self.draw_lines)
self.hline = None
self.vline = None
@pyqtSlot(np.ndarray)
def show_ndarray(self, array):
self.image_item.setImage(array)
@pyqtSlot(QWidget)
def set_camera_controls(self, controls):
self.settings_layout.removeWidget(self.settings_widget)
self.settings_widget.setParent(None)
self.settings_widget.deleteLater()
self.settings_widget = controls
self.settings_layout.addWidget(controls)
@pyqtSlot(bool)
def enable_saturation_bar(self, enable):
self.ui.progressBar.setEnabled(enable)
if not enable:
self.ui.progressBar.setValue(0)
@pyqtSlot(int)
def set_saturation_percentage(self, value):
self.ui.progressBar.setValue(value)
@pyqtSlot(bool)
def show_settings(self, show):
self.ui.bottom_settings_widget.setVisible(show)
@pyqtSlot(QWidget, str)
def add_plugin(self, widget: QWidget, name: str):
self.ui.tabWidget.addTab(widget, name)
@pyqtSlot(str)
def show_message(self, message):
self.ui.statusbar.showMessage(message, 5000)
@pyqtSlot(bool)
def show_roi(self, show):
if show:
rect = self.data_handler.clip_size
if rect is not None:
self.roi = pg.ROI([rect.left(), rect.top()], [rect.width(), rect.height()], pen=pg.mkPen(color='r'))
self.roi.addScaleHandle([0.5, 1], [0.5, 0.5])
self.roi.addScaleHandle([0, 0.5], [0.5, 0.5])
self.plot_box.addItem(self.roi)
self.roi.setZValue(10)
self.roi.sigRegionChangeFinished.connect(self.on_roi_changed)
else:
if self.roi is not None:
self.plot_box.removeItem(self.roi)
self.roi = None
@pyqtSlot()
def reset_roi(self):
self.ui.selectDataButton.setChecked(False)
self.show_roi(False)
self.data_handler.clip_size = None
@pyqtSlot()
def on_roi_changed(self):
if self.roi is not None:
pos = self.roi.pos()
size = self.roi.size()
rect = QRect(pos.x(), pos.y(), size.x(), size.y())
self.data_handler.set_clip_size(rect)
@pyqtSlot(bool)
def draw_lines(self, draw):
if draw:
x_range, y_range = self.plot_box.viewRange()
self.hline = pg.InfiniteLine(pos=np.mean(y_range), angle=0, pen=pg.mkPen('r'))
self.vline = pg.InfiniteLine(pos=np.mean(x_range), angle=90, pen=pg.mkPen('r'))
self.hline.setZValue(10)
self.vline.setZValue(10)
self.plot_box.addItem(self.hline)
self.plot_box.addItem(self.vline)
else:
if self.hline is not None:
self.plot_box.removeItem(self.hline)
self.hline = None
if self.vline is not None:
self.plot_box.removeItem(self.vline)
self.vline = None
def tune_pid(self):
try:
dialog = self.PidDialog(parent=self)
cam = self.data_handler.camera
dialog.set_values(*cam.get_pid())
dialog.values_changed.connect(self.set_pid_values)
dialog.show()
dialog.raise_()
dialog.activateWindow()
except Exception as err:
print(str(err))
def set_pid_values(self, p, i, d):
try:
cam = self.data_handler.camera
cam.set_pid(p, i, d)
except:
pass
| zh | 0.157011 | # self.setWindowFlags(Qt.WindowStaysOnTopHint) | 2.159931 | 2 |
level.py | Kedyn/SuperMario | 0 | 6630917 | <reponame>Kedyn/SuperMario<filename>level.py
from tilemap import TileMap
class Level:
def __init__(self, file, screen):
self.__tile_map = TileMap(file, screen)
self.done = False
def keydown(self, key):
self.__tile_map.mario.keydown(key)
def keyup(self, key):
self.__tile_map.mario.keyup(key)
def update(self):
self.__tile_map.update()
def render(self):
self.__tile_map.render()
| from tilemap import TileMap
class Level:
def __init__(self, file, screen):
self.__tile_map = TileMap(file, screen)
self.done = False
def keydown(self, key):
self.__tile_map.mario.keydown(key)
def keyup(self, key):
self.__tile_map.mario.keyup(key)
def update(self):
self.__tile_map.update()
def render(self):
self.__tile_map.render() | none | 1 | 2.81734 | 3 |
|
geometric_analysis.py | siorconsulting/nrip-jamaica-open-source | 0 | 6630918 | import os
from utils import *
import geopandas as gpd
# import whitebox
# wbt = whitebox.WhiteboxTools()
wbt = wbt_setup(verbose=False)
wokring_dir = wbt.work_dir
__all__ = ['intersect', # NOT TESTED
'zonal_statistics', # NOT TESTED
'distance_from_points', # TESTED
'distance_from_lines', # TESTED
'distance_from_polygons', # TESTED
'distance_from_raster', # TESTED
'hotspots_from_points', # TESTED
'hotspots_from_lines', # TESTED
'hotspots_from_polygons', # TESTED
'hotspots_from_raster', # TESTED
'interpolate_points', # NOT TESTED
'summarize_within', # NOT TESTED
]
def intersect(input_vector_file, overlay, output_vector_file):
"""
Function to return all feature parts that occur in both input layers
Inputs:
input_vector_file: str <-- path to vector(.shp) file
overlay: str <-- path to overlay vector(.shp) file
output_vector_file: str <-- name of output vector(.shp) file
Outputs:
output_vector_file: shapefile <-- output vector shapefile
"""
wbt.intersect(input_vector_file, overlay, output_vector_file)
wbt.work_dir = working_dir
os.chdir(wbt.work_dir)
def zonal_statistics(input_raster, input_zones, output_raster, field='FID', stat='mean', input_zones_is_raster=True):
"""Calculates zonal statistics based on an input raster, using raster or polygon zones.
Inputs:
input_raster : str <-- path to raster(.tif) file
input_zones : str <-- input path to raster(.tif) or polygon(.shp)
output_raster : str <-- output raster(.tif) file name
field [optional] : str <-- deafult value is 'FID'
stat [optional] : str <-- default value is 'mean'
input_zones_is_raster [optional] : boolean <-- default value is 'True'
Exports:
output_raster : raster <-- output raster(.tif) file
Returns:
None
"""
if input_zones_is_raster:
input_zones_raster = input_zones # if statement assigning input_zones to raster variable if already a raster
else:
input_zones_raster = 'temp_input_raster.tif' # assigning file name
temp_input_zones = 'temp_input_zones.shp'
gdf = gpd.read_file(input_zones)
gdf['FID'] = gdf.index
gdf.to_file(temp_input_zones)
wbt.vector_polygons_to_raster(temp_input_zones, input_zones_raster, field=field, nodata=False) # transforming polygon to raster if input_zones is a polygon
wbt.zonal_statistics(i=input_raster, features=input_zones_raster, output=output_raster, stat=stat)
if not input_zones_is_raster:
os.remove(os.path.join(wbt.work_dir,input_zones_raster)) # removing temporary raster file if one had to be created
os.chdir(wbt.work_dir)
def distance_from_points(input_points, output_raster):
"""
Creates new raster showing distances between input points
Inputs:
input_points: str <-- path to input point (.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_points = 'temp_input_points.shp'
gdf = gpd.read_file(input_points)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_points)
input_raster = 'temp_input_raster.tif' # assigning file name
input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_points_to_raster(temp_input_points,input_raster,field='FID_wbt', cell_size=100) # points to raster transformation
wbt.convert_nodata_to_zero(input_raster, input_raster_zeros)
wbt.euclidean_distance(input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir, input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir, input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_points))
os.chdir(wbt.work_dir)
def distance_from_lines(input_lines, output_raster):
"""
Creates new raster showing distances between lines
Inputs:
input_lines: str <-- path to input point(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_lines = 'temp_input_lines.shp'
gdf = gpd.read_file(input_lines)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_lines)
input_raster = 'temp_input_raster.tif' # assigning file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_lines_to_raster(temp_input_lines, input_raster,field='FID_wbt', cell_size=100) # lines to raster transformation
wbt.convert_nodata_to_zero(input_raster,temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir,input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir,temp_input_lines))
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def distance_from_polygons(input_polygons, output_raster):
"""
Creates new raster showing distances between polygons
Inputs:
input_polygons: str <-- path to input polygon(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
temp_input_polygons = 'temp_input_polygons.shp'
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
gdf = gpd.read_file(input_polygons)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_polygons)
input_raster = 'temp_input_raster.tif' # assigning file name
wbt.vector_polygons_to_raster(temp_input_polygons ,input_raster, field='FID_wbt', cell_size = 100) # polygons to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir,input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir,temp_input_polygons))
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def distance_from_raster(input_raster, output_raster):
"""
Creates new raster showing distances within raster
Inputs:
input_raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file
"""
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on input raster
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def hotspots_from_points(input_points, output_raster, sigma = 100):
"""
Creates hotspot raster from input point shapefile
Inputs:
input_points: str <-- path to input point(.shp) file
output_raster: str <-- name of output raster(.tif) file
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 100
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
temp_input_points = 'temp_input_points.shp'
gdf = gpd.read_file(input_points)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_points)
input_raster = 'temp_hotspots_from_points.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_points_to_raster(temp_input_points, input_raster, field='FID_wbt', cell_size=100) # points to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary raster file
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_points))
os.chdir(wbt.work_dir)
def hotspots_from_lines(input_lines, output_raster, sigma =10):
"""
Creates hotspot raster from input line shapefile
Inputs:
input lines: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_lines = 'temp_input_lines.shp'
gdf = gpd.read_file(input_lines)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_lines)
input_raster = 'temp_hotspots_from_lines.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_lines_to_raster(temp_input_lines, input_raster, field = 'FID_wbt', cell_size = 100) # lines to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary raster file
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_lines)) # remove temporary file
os.chdir(wbt.work_dir)
def hotspots_from_polygons(input_polygons, output_raster,sigma = 10):
"""
Creates hotspot raster from input polygon shapefile
Inputs:
input polygons: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Output:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_polygons = 'temp_input_polygons.shp'
gdf = gpd.read_file(input_polygons)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_polygons)
input_raster = 'temp_hotspots_from_polygons.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_rasters_zeros.tif'
wbt.vector_polygons_to_raster(temp_input_polygons, input_raster, field='FID_wbt', cell_size=100) # polygons to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary file
os.remove(os.path.join(wbt.work_dir, temp_input_polygons))
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def hotspots_from_raster(input_raster, output_raster,sigma = 10):
"""
Creates hotspot raster from input raster file
Inputs:
input raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def interpolate_points(input_points, field, output_raster):
"""
Intepolates points into a raster surface
Inputs:
input_points: str <-- path to input point shapefile
output_raster : str <-- name of output raster(.tif) file
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
wbt.radial_basis_function_interpolation(field, i=input_points, output=output_raster) # interpolation function
def summarize_within(input_vector, feature_polygons, output_polygon, field_to_summarize=None, aggfunc='mean'):
"""
Summarizies vector data relative to existing polygons
Inputs:
input_vector: str <-- path to input vector(.shp) file. Can be point/lines/polygons
feature_polygons: str <-- path to input polygons(.shp)
output_polygon: str <-- name of output polygon(.shp) file
field_to_summarize: str <-- name of field to summarize
aggfunc [optional]: str: aggregation function, default is 'mean'
Outputs:
output_polygon: shapefile <-- polygon(.shp) file
Returns:
None
"""
input_vector = gpd.read_file(input_vector) # geopandas read vector
feature_polygons = gpd.read_file(feature_polygons) # geopandas polygons
if field_to_summarize == None:
field_to_summarize == 'Index_WBT'
input_vector['Index_WBT'] = input_vector.index
# input_vector_gdf = gpd.GeoDataFrame(input_vector[[field_to_summarize,'geometry']])
input_vector_gdf = input_vector
input_vector_join = input_vector_gdf.join(other=feature_polygons,rsuffix='_P') # attribute join on both inputs
input_vector_join = input_vector_join.drop(columns=['geometry_P'])
input_vector_join.dissolve(by=field_to_summarize, aggfunc=aggfunc) # dissolve geometries
input_vector_join.to_file(os.path.join(wbt.work_dir, output_polygon)) # save as file ouput_polygons
| import os
from utils import *
import geopandas as gpd
# import whitebox
# wbt = whitebox.WhiteboxTools()
wbt = wbt_setup(verbose=False)
wokring_dir = wbt.work_dir
__all__ = ['intersect', # NOT TESTED
'zonal_statistics', # NOT TESTED
'distance_from_points', # TESTED
'distance_from_lines', # TESTED
'distance_from_polygons', # TESTED
'distance_from_raster', # TESTED
'hotspots_from_points', # TESTED
'hotspots_from_lines', # TESTED
'hotspots_from_polygons', # TESTED
'hotspots_from_raster', # TESTED
'interpolate_points', # NOT TESTED
'summarize_within', # NOT TESTED
]
def intersect(input_vector_file, overlay, output_vector_file):
"""
Function to return all feature parts that occur in both input layers
Inputs:
input_vector_file: str <-- path to vector(.shp) file
overlay: str <-- path to overlay vector(.shp) file
output_vector_file: str <-- name of output vector(.shp) file
Outputs:
output_vector_file: shapefile <-- output vector shapefile
"""
wbt.intersect(input_vector_file, overlay, output_vector_file)
wbt.work_dir = working_dir
os.chdir(wbt.work_dir)
def zonal_statistics(input_raster, input_zones, output_raster, field='FID', stat='mean', input_zones_is_raster=True):
"""Calculates zonal statistics based on an input raster, using raster or polygon zones.
Inputs:
input_raster : str <-- path to raster(.tif) file
input_zones : str <-- input path to raster(.tif) or polygon(.shp)
output_raster : str <-- output raster(.tif) file name
field [optional] : str <-- deafult value is 'FID'
stat [optional] : str <-- default value is 'mean'
input_zones_is_raster [optional] : boolean <-- default value is 'True'
Exports:
output_raster : raster <-- output raster(.tif) file
Returns:
None
"""
if input_zones_is_raster:
input_zones_raster = input_zones # if statement assigning input_zones to raster variable if already a raster
else:
input_zones_raster = 'temp_input_raster.tif' # assigning file name
temp_input_zones = 'temp_input_zones.shp'
gdf = gpd.read_file(input_zones)
gdf['FID'] = gdf.index
gdf.to_file(temp_input_zones)
wbt.vector_polygons_to_raster(temp_input_zones, input_zones_raster, field=field, nodata=False) # transforming polygon to raster if input_zones is a polygon
wbt.zonal_statistics(i=input_raster, features=input_zones_raster, output=output_raster, stat=stat)
if not input_zones_is_raster:
os.remove(os.path.join(wbt.work_dir,input_zones_raster)) # removing temporary raster file if one had to be created
os.chdir(wbt.work_dir)
def distance_from_points(input_points, output_raster):
"""
Creates new raster showing distances between input points
Inputs:
input_points: str <-- path to input point (.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_points = 'temp_input_points.shp'
gdf = gpd.read_file(input_points)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_points)
input_raster = 'temp_input_raster.tif' # assigning file name
input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_points_to_raster(temp_input_points,input_raster,field='FID_wbt', cell_size=100) # points to raster transformation
wbt.convert_nodata_to_zero(input_raster, input_raster_zeros)
wbt.euclidean_distance(input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir, input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir, input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_points))
os.chdir(wbt.work_dir)
def distance_from_lines(input_lines, output_raster):
"""
Creates new raster showing distances between lines
Inputs:
input_lines: str <-- path to input point(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_lines = 'temp_input_lines.shp'
gdf = gpd.read_file(input_lines)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_lines)
input_raster = 'temp_input_raster.tif' # assigning file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_lines_to_raster(temp_input_lines, input_raster,field='FID_wbt', cell_size=100) # lines to raster transformation
wbt.convert_nodata_to_zero(input_raster,temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir,input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir,temp_input_lines))
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def distance_from_polygons(input_polygons, output_raster):
"""
Creates new raster showing distances between polygons
Inputs:
input_polygons: str <-- path to input polygon(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
temp_input_polygons = 'temp_input_polygons.shp'
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
gdf = gpd.read_file(input_polygons)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_polygons)
input_raster = 'temp_input_raster.tif' # assigning file name
wbt.vector_polygons_to_raster(temp_input_polygons ,input_raster, field='FID_wbt', cell_size = 100) # polygons to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on created raster
os.remove(os.path.join(wbt.work_dir,input_raster)) # removes temporary raster file
os.remove(os.path.join(wbt.work_dir,temp_input_polygons))
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def distance_from_raster(input_raster, output_raster):
"""
Creates new raster showing distances within raster
Inputs:
input_raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file
"""
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.euclidean_distance(temp_input_raster_zeros, output_raster) # euclidean distance calculated on input raster
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def hotspots_from_points(input_points, output_raster, sigma = 100):
"""
Creates hotspot raster from input point shapefile
Inputs:
input_points: str <-- path to input point(.shp) file
output_raster: str <-- name of output raster(.tif) file
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 100
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
temp_input_points = 'temp_input_points.shp'
gdf = gpd.read_file(input_points)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_points)
input_raster = 'temp_hotspots_from_points.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_points_to_raster(temp_input_points, input_raster, field='FID_wbt', cell_size=100) # points to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary raster file
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_points))
os.chdir(wbt.work_dir)
def hotspots_from_lines(input_lines, output_raster, sigma =10):
"""
Creates hotspot raster from input line shapefile
Inputs:
input lines: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_lines = 'temp_input_lines.shp'
gdf = gpd.read_file(input_lines)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_lines)
input_raster = 'temp_hotspots_from_lines.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.vector_lines_to_raster(temp_input_lines, input_raster, field = 'FID_wbt', cell_size = 100) # lines to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary raster file
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.remove(os.path.join(wbt.work_dir, temp_input_lines)) # remove temporary file
os.chdir(wbt.work_dir)
def hotspots_from_polygons(input_polygons, output_raster,sigma = 10):
"""
Creates hotspot raster from input polygon shapefile
Inputs:
input polygons: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Output:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_polygons = 'temp_input_polygons.shp'
gdf = gpd.read_file(input_polygons)
gdf['FID_wbt'] = 1
gdf.to_file(temp_input_polygons)
input_raster = 'temp_hotspots_from_polygons.tif' # assigning temporary file name
temp_input_raster_zeros = 'temp_input_rasters_zeros.tif'
wbt.vector_polygons_to_raster(temp_input_polygons, input_raster, field='FID_wbt', cell_size=100) # polygons to raster transformation
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir, input_raster)) # remove temporary file
os.remove(os.path.join(wbt.work_dir, temp_input_polygons))
os.remove(os.path.join(wbt.work_dir, temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def hotspots_from_raster(input_raster, output_raster,sigma = 10):
"""
Creates hotspot raster from input raster file
Inputs:
input raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None
"""
temp_input_raster_zeros = 'temp_input_raster_zeros.tif'
wbt.convert_nodata_to_zero(input_raster, temp_input_raster_zeros)
wbt.gaussian_filter(temp_input_raster_zeros, output_raster, sigma=sigma)
os.remove(os.path.join(wbt.work_dir,temp_input_raster_zeros))
os.chdir(wbt.work_dir)
def interpolate_points(input_points, field, output_raster):
"""
Intepolates points into a raster surface
Inputs:
input_points: str <-- path to input point shapefile
output_raster : str <-- name of output raster(.tif) file
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None
"""
wbt.radial_basis_function_interpolation(field, i=input_points, output=output_raster) # interpolation function
def summarize_within(input_vector, feature_polygons, output_polygon, field_to_summarize=None, aggfunc='mean'):
"""
Summarizies vector data relative to existing polygons
Inputs:
input_vector: str <-- path to input vector(.shp) file. Can be point/lines/polygons
feature_polygons: str <-- path to input polygons(.shp)
output_polygon: str <-- name of output polygon(.shp) file
field_to_summarize: str <-- name of field to summarize
aggfunc [optional]: str: aggregation function, default is 'mean'
Outputs:
output_polygon: shapefile <-- polygon(.shp) file
Returns:
None
"""
input_vector = gpd.read_file(input_vector) # geopandas read vector
feature_polygons = gpd.read_file(feature_polygons) # geopandas polygons
if field_to_summarize == None:
field_to_summarize == 'Index_WBT'
input_vector['Index_WBT'] = input_vector.index
# input_vector_gdf = gpd.GeoDataFrame(input_vector[[field_to_summarize,'geometry']])
input_vector_gdf = input_vector
input_vector_join = input_vector_gdf.join(other=feature_polygons,rsuffix='_P') # attribute join on both inputs
input_vector_join = input_vector_join.drop(columns=['geometry_P'])
input_vector_join.dissolve(by=field_to_summarize, aggfunc=aggfunc) # dissolve geometries
input_vector_join.to_file(os.path.join(wbt.work_dir, output_polygon)) # save as file ouput_polygons
| en | 0.505044 | # import whitebox # wbt = whitebox.WhiteboxTools() # NOT TESTED # NOT TESTED # TESTED # TESTED # TESTED # TESTED # TESTED # TESTED # TESTED # TESTED # NOT TESTED # NOT TESTED Function to return all feature parts that occur in both input layers
Inputs:
input_vector_file: str <-- path to vector(.shp) file
overlay: str <-- path to overlay vector(.shp) file
output_vector_file: str <-- name of output vector(.shp) file
Outputs:
output_vector_file: shapefile <-- output vector shapefile Calculates zonal statistics based on an input raster, using raster or polygon zones.
Inputs:
input_raster : str <-- path to raster(.tif) file
input_zones : str <-- input path to raster(.tif) or polygon(.shp)
output_raster : str <-- output raster(.tif) file name
field [optional] : str <-- deafult value is 'FID'
stat [optional] : str <-- default value is 'mean'
input_zones_is_raster [optional] : boolean <-- default value is 'True'
Exports:
output_raster : raster <-- output raster(.tif) file
Returns:
None # if statement assigning input_zones to raster variable if already a raster # assigning file name # transforming polygon to raster if input_zones is a polygon # removing temporary raster file if one had to be created Creates new raster showing distances between input points
Inputs:
input_points: str <-- path to input point (.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None # assigning file name # points to raster transformation # euclidean distance calculated on created raster # removes temporary raster file Creates new raster showing distances between lines
Inputs:
input_lines: str <-- path to input point(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None # assigning file name # lines to raster transformation # euclidean distance calculated on created raster # removes temporary raster file Creates new raster showing distances between polygons
Inputs:
input_polygons: str <-- path to input polygon(.shp) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None # assigning file name # polygons to raster transformation # euclidean distance calculated on created raster # removes temporary raster file Creates new raster showing distances within raster
Inputs:
input_raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
Outputs:
output_raster: raster <-- output raster(.tif) file # euclidean distance calculated on input raster Creates hotspot raster from input point shapefile
Inputs:
input_points: str <-- path to input point(.shp) file
output_raster: str <-- name of output raster(.tif) file
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 100
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None # assigning temporary file name # points to raster transformation # remove temporary raster file Creates hotspot raster from input line shapefile
Inputs:
input lines: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None # assigning temporary file name # lines to raster transformation # remove temporary raster file # remove temporary file Creates hotspot raster from input polygon shapefile
Inputs:
input polygons: str <-- path to input line(.shp) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Output:
output_raster: raster <-- raster(.tif) file
Returns:
None # assigning temporary file name # polygons to raster transformation # remove temporary file Creates hotspot raster from input raster file
Inputs:
input raster: str <-- path to input raster(.tif) file
output_raster: str <-- raster(.tif) file name
sigma [optional]: str <-- represents standard deviation of gaussian filter, default 10
Outputs:
output_raster: raster <-- raster(.tif) file
Returns:
None Intepolates points into a raster surface
Inputs:
input_points: str <-- path to input point shapefile
output_raster : str <-- name of output raster(.tif) file
Outputs:
output_raster: raster <-- output raster(.tif) file
Returns:
None # interpolation function Summarizies vector data relative to existing polygons
Inputs:
input_vector: str <-- path to input vector(.shp) file. Can be point/lines/polygons
feature_polygons: str <-- path to input polygons(.shp)
output_polygon: str <-- name of output polygon(.shp) file
field_to_summarize: str <-- name of field to summarize
aggfunc [optional]: str: aggregation function, default is 'mean'
Outputs:
output_polygon: shapefile <-- polygon(.shp) file
Returns:
None # geopandas read vector # geopandas polygons # input_vector_gdf = gpd.GeoDataFrame(input_vector[[field_to_summarize,'geometry']]) # attribute join on both inputs # dissolve geometries # save as file ouput_polygons | 2.902955 | 3 |
accountable/config.py | wohlgejm/accountable | 2 | 6630919 | import os
try:
from UserDict import UserDict
except ImportError:
from collections import UserDict
import click
import yaml
from requests.auth import HTTPBasicAuth
CONFIG_DIR = os.path.expanduser('~/.accountable')
DEFAULT_ISSUE_FIELDS = [
{'reporter': 'displayName'},
{'assignee': 'displayName'},
{'issuetype': 'name'},
{'status': {'statusCategory': 'name'}},
'summary',
'description'
]
DEFAULT_ALIASES = {'cob': 'checkoutbranch',
'co': 'checkout'}
CONFIG_FILE = '{}/config.yaml'.format(CONFIG_DIR)
class Config(UserDict, object):
def __getitem__(self, key):
if not self.data:
self.data = self._load_config()
return self.data[key]
def update(self, *args, **kwargs):
if self._exists():
self.data = self._load_config()
super(Config, self).update(*args, **kwargs)
self.data.setdefault('aliases', DEFAULT_ALIASES)
self.data.setdefault('issue_fields', DEFAULT_ISSUE_FIELDS)
self._save()
def issue_schema(self):
schema = {}
for field in self['issue_fields']:
if isinstance(field, str):
schema[field] = None
else:
schema.update(field)
return schema
@property
def auth(self):
return HTTPBasicAuth(self['username'], self['password'])
def _exists(self):
return True if os.path.isfile(CONFIG_FILE) else False
def _load_config(self):
with open(CONFIG_FILE, 'r') as f:
config = yaml.load(f)
return config
def _save(self):
self._create_config_dir()
with open(CONFIG_FILE, 'w+') as f:
f.write(yaml.dump(self.data, default_flow_style=False,
encoding='utf-8'))
click.secho(
'Configuration file written to {}'.format(CONFIG_FILE),
fg='blue'
)
def _create_config_dir(self):
if not os.path.exists(CONFIG_DIR):
click.secho('Creating {}'.format(CONFIG_DIR), fg='blue')
os.makedirs(CONFIG_DIR)
| import os
try:
from UserDict import UserDict
except ImportError:
from collections import UserDict
import click
import yaml
from requests.auth import HTTPBasicAuth
CONFIG_DIR = os.path.expanduser('~/.accountable')
DEFAULT_ISSUE_FIELDS = [
{'reporter': 'displayName'},
{'assignee': 'displayName'},
{'issuetype': 'name'},
{'status': {'statusCategory': 'name'}},
'summary',
'description'
]
DEFAULT_ALIASES = {'cob': 'checkoutbranch',
'co': 'checkout'}
CONFIG_FILE = '{}/config.yaml'.format(CONFIG_DIR)
class Config(UserDict, object):
def __getitem__(self, key):
if not self.data:
self.data = self._load_config()
return self.data[key]
def update(self, *args, **kwargs):
if self._exists():
self.data = self._load_config()
super(Config, self).update(*args, **kwargs)
self.data.setdefault('aliases', DEFAULT_ALIASES)
self.data.setdefault('issue_fields', DEFAULT_ISSUE_FIELDS)
self._save()
def issue_schema(self):
schema = {}
for field in self['issue_fields']:
if isinstance(field, str):
schema[field] = None
else:
schema.update(field)
return schema
@property
def auth(self):
return HTTPBasicAuth(self['username'], self['password'])
def _exists(self):
return True if os.path.isfile(CONFIG_FILE) else False
def _load_config(self):
with open(CONFIG_FILE, 'r') as f:
config = yaml.load(f)
return config
def _save(self):
self._create_config_dir()
with open(CONFIG_FILE, 'w+') as f:
f.write(yaml.dump(self.data, default_flow_style=False,
encoding='utf-8'))
click.secho(
'Configuration file written to {}'.format(CONFIG_FILE),
fg='blue'
)
def _create_config_dir(self):
if not os.path.exists(CONFIG_DIR):
click.secho('Creating {}'.format(CONFIG_DIR), fg='blue')
os.makedirs(CONFIG_DIR)
| none | 1 | 2.324412 | 2 |
|
obsolete/parser/view_namespace.py | dina-fouad/pyccel | 206 | 6630920 | <filename>obsolete/parser/view_namespace.py
def view_namespace(namespace, entry):
"""
Print contents of a namespace.
Parameters
----------
namespace: dict
Dictionary that represents the current namespace, usually attached to a BasicParser object.
entry: str
Key of interest.
"""
# TODO improve
try:
from tabulate import tabulate
table = []
for (k, v) in namespace[entry].items():
k_str = '{}'.format(k)
if entry == 'imports':
if v is None:
v_str = '*'
else:
v_str = '{}'.format(v)
elif entry == 'variables':
v_str = '{}'.format(type(v))
else:
raise NotImplementedError('TODO')
line = [k_str, v_str]
table.append(line)
headers = ['module', 'target']
# txt = tabulate(table, headers, tablefmt="rst")
txt = tabulate(table, tablefmt='rst')
print (txt)
except NotImplementedError:
print ('------- namespace.{} -------'.format(entry))
for (k, v) in namespace[entry].items():
print ('{var} \t :: \t {value}'.format(var=k, value=v))
print ('-------------------------')
| <filename>obsolete/parser/view_namespace.py
def view_namespace(namespace, entry):
"""
Print contents of a namespace.
Parameters
----------
namespace: dict
Dictionary that represents the current namespace, usually attached to a BasicParser object.
entry: str
Key of interest.
"""
# TODO improve
try:
from tabulate import tabulate
table = []
for (k, v) in namespace[entry].items():
k_str = '{}'.format(k)
if entry == 'imports':
if v is None:
v_str = '*'
else:
v_str = '{}'.format(v)
elif entry == 'variables':
v_str = '{}'.format(type(v))
else:
raise NotImplementedError('TODO')
line = [k_str, v_str]
table.append(line)
headers = ['module', 'target']
# txt = tabulate(table, headers, tablefmt="rst")
txt = tabulate(table, tablefmt='rst')
print (txt)
except NotImplementedError:
print ('------- namespace.{} -------'.format(entry))
for (k, v) in namespace[entry].items():
print ('{var} \t :: \t {value}'.format(var=k, value=v))
print ('-------------------------')
| en | 0.499163 | Print contents of a namespace. Parameters ---------- namespace: dict Dictionary that represents the current namespace, usually attached to a BasicParser object. entry: str Key of interest. # TODO improve # txt = tabulate(table, headers, tablefmt="rst") | 3.311317 | 3 |
facebookApi/views.py | vladyslavnUA/client-dynamic | 1 | 6630921 | <gh_stars>1-10
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView)
import datetime
from dateutil import parser
from django.contrib.auth.models import User
from django.shortcuts import render, HttpResponseRedirect, HttpResponse, redirect
from django.urls import reverse, reverse_lazy
import facebook
from django.core.files.storage import FileSystemStorage
from .facebookAPI import FacebookAPI
class DashboardView(ListView):
# returns sum of an array
def get_sum(self, data):
total = 0
for i in data:
total += i
return total
def get_fullName(self, user):
name = False
if user.last_name != "":
name = str(user.first_name)
if user.last_name != "":
name = " "+str(user.last_name)
return name
def get(self, request, page_token, page_id):
user = User.objects.get(pk=request.user.id)
social_user = user.social_auth.get(provider="facebook")
graph = FacebookAPI(page_token, page_id)
fb_page_engagments, fb_page_engagments_months = graph.get_page_post_engagements()
fb_total_cta, fb_total_cta_months = graph.get_page_clicks_monthly()
# graph.page_positive_feedback_by_type()
context = {'page': graph.get_page_info(), "fb_p_eng_users": graph.get_page_engagements(),
"fb_page_reach": graph.get_page_reach(), "fb_page_impressions": graph.get_page_impressions_monthly(),
"fb_page_engagments": fb_page_engagments, "fb_page_engagments_months": fb_page_engagments_months,
"total_page_engagments": self.get_sum(fb_page_engagments), "fb_total_cta": fb_total_cta,
"fb_total_cta_months": fb_total_cta_months, "picture": social_user.extra_data["picture"]["data"]["url"],
"total_cta": self.get_sum(fb_total_cta), 'posts': graph.get_page_posts(), "name": self.get_fullName(user),
"token": page_token
}
return render(request, "facebookApi/dashboard.html", context)
def PostFacebook(req, page_token):
user = User.objects.get(pk=req.user.id)
graph = FacebookAPI(page_token)
if req.method == 'POST':
message=req.POST.get('message')
location=req.POST.get('location')
image=req.POST.get('image')
link=req.POST.get('link')
if image is not "":
myfile = req.FILES['image']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
graph.post_to_facebook(message, uploaded_file_url, link)
else:
graph.post_to_facebook(message, None, link)
return HttpResponseRedirect(reverse('facebookApi:dashboard', kwargs={'page_token': user.profile.fb_page_token, 'page_id':user.profile.fb_page_id }))
class LinkPageView(ListView):
def get(self, request):
context = {'data': 'self.getWeatherData()'}
return render(request, "facebookApi/link-page.html", context)
class PagesView(ListView):
def get(self, request, show):
user = User.objects.get(pk=request.user.id)
social_user = user.social_auth.get(provider="facebook")
token = social_user.extra_data['access_token']
# if user.profile.fb_page_id != '' and user.profile.fb_page_token != '':
# return HttpResponseRedirect(reverse('clients:dashboard', kwargs={'page_token': user.profile.fb_page_token, 'page_id':user.profile.fb_page_id }))
graph = FacebookAPI(token)
data = graph.get_page_info('first_name, location, link, email, posts, picture')
context = {'pages': graph.get_pages(), "show": show}
return render(request, "facebookApi/pages.html", context)
class SinglePageView(ListView):
def get(self, request, token, page_id, page_name):
user = User.objects.get(pk=request.user.id)
user = user.social_auth.get(provider="facebook")
graph = facebook.GraphAPI(token, page_id)
context = {'posts': graph.get_page_posts(), 'page_id': page_id, 'page_name': page_name, 'page_token': token}
return render(request, "facebookApi/single-page.html", context)
class UserProfileView(ListView):
def get(self, request):
user = User.objects.get(pk=request.user.id)
# print("-------------------")
try:
social_user = user.social_auth.get()
social_use = social_user.extra_data.get('id')
# token = social_user.extra_data['access_token']
social_s = social_user.extra_data
print("Social User: ", social_user)
print("Social: ", social_s)
if "twitter" == social_user.provider:
return redirect('twitter:user-profile')
elif "linkedin-oauth2" == social_user.provider:
return redirect('linkedin:user-profile')
except:
pass
name = False
if user.last_name != "" and user.last_name != "":
name = str(user.first_name)+" "+str(user.last_name)
try:
context = {"user": user, "name": name, "social_user": social_user.extra_data, "account_id": social_use }
if 'picture' in social_user.extra_data:
context['picture'] = social_user.extra_data["picture"]["data"]["url"]
except:
context = {"user": user, "name": name}
return render(request, "facebookApi/user.html", context)
def post(self, request):
user = User.objects.get(pk=request.user.id)
# profile = Profile()
user.first_name = request.POST.get("first_name")
user.last_name = request.POST.get("last_name")
user.email = request.POST.get("email")
user.profile.company = request.POST.get("company")
user.profile.bio = request.POST.get("bio")
user.profile.role = request.POST.get("role")
user.save()
return HttpResponseRedirect(reverse('facebookApi:link-page'))
def SaveFbPageView(req, page_token, page_id):
user = User.objects.get(pk=req.user.id)
user.profile.fb_page_id = page_id
user.profile.fb_page_token = page_token
user.save()
return HttpResponseRedirect(reverse('facebookApi:dashboard', kwargs={'page_token': page_token, 'page_id':page_id })) | from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView)
import datetime
from dateutil import parser
from django.contrib.auth.models import User
from django.shortcuts import render, HttpResponseRedirect, HttpResponse, redirect
from django.urls import reverse, reverse_lazy
import facebook
from django.core.files.storage import FileSystemStorage
from .facebookAPI import FacebookAPI
class DashboardView(ListView):
# returns sum of an array
def get_sum(self, data):
total = 0
for i in data:
total += i
return total
def get_fullName(self, user):
name = False
if user.last_name != "":
name = str(user.first_name)
if user.last_name != "":
name = " "+str(user.last_name)
return name
def get(self, request, page_token, page_id):
user = User.objects.get(pk=request.user.id)
social_user = user.social_auth.get(provider="facebook")
graph = FacebookAPI(page_token, page_id)
fb_page_engagments, fb_page_engagments_months = graph.get_page_post_engagements()
fb_total_cta, fb_total_cta_months = graph.get_page_clicks_monthly()
# graph.page_positive_feedback_by_type()
context = {'page': graph.get_page_info(), "fb_p_eng_users": graph.get_page_engagements(),
"fb_page_reach": graph.get_page_reach(), "fb_page_impressions": graph.get_page_impressions_monthly(),
"fb_page_engagments": fb_page_engagments, "fb_page_engagments_months": fb_page_engagments_months,
"total_page_engagments": self.get_sum(fb_page_engagments), "fb_total_cta": fb_total_cta,
"fb_total_cta_months": fb_total_cta_months, "picture": social_user.extra_data["picture"]["data"]["url"],
"total_cta": self.get_sum(fb_total_cta), 'posts': graph.get_page_posts(), "name": self.get_fullName(user),
"token": page_token
}
return render(request, "facebookApi/dashboard.html", context)
def PostFacebook(req, page_token):
user = User.objects.get(pk=req.user.id)
graph = FacebookAPI(page_token)
if req.method == 'POST':
message=req.POST.get('message')
location=req.POST.get('location')
image=req.POST.get('image')
link=req.POST.get('link')
if image is not "":
myfile = req.FILES['image']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
graph.post_to_facebook(message, uploaded_file_url, link)
else:
graph.post_to_facebook(message, None, link)
return HttpResponseRedirect(reverse('facebookApi:dashboard', kwargs={'page_token': user.profile.fb_page_token, 'page_id':user.profile.fb_page_id }))
class LinkPageView(ListView):
def get(self, request):
context = {'data': 'self.getWeatherData()'}
return render(request, "facebookApi/link-page.html", context)
class PagesView(ListView):
def get(self, request, show):
user = User.objects.get(pk=request.user.id)
social_user = user.social_auth.get(provider="facebook")
token = social_user.extra_data['access_token']
# if user.profile.fb_page_id != '' and user.profile.fb_page_token != '':
# return HttpResponseRedirect(reverse('clients:dashboard', kwargs={'page_token': user.profile.fb_page_token, 'page_id':user.profile.fb_page_id }))
graph = FacebookAPI(token)
data = graph.get_page_info('first_name, location, link, email, posts, picture')
context = {'pages': graph.get_pages(), "show": show}
return render(request, "facebookApi/pages.html", context)
class SinglePageView(ListView):
def get(self, request, token, page_id, page_name):
user = User.objects.get(pk=request.user.id)
user = user.social_auth.get(provider="facebook")
graph = facebook.GraphAPI(token, page_id)
context = {'posts': graph.get_page_posts(), 'page_id': page_id, 'page_name': page_name, 'page_token': token}
return render(request, "facebookApi/single-page.html", context)
class UserProfileView(ListView):
def get(self, request):
user = User.objects.get(pk=request.user.id)
# print("-------------------")
try:
social_user = user.social_auth.get()
social_use = social_user.extra_data.get('id')
# token = social_user.extra_data['access_token']
social_s = social_user.extra_data
print("Social User: ", social_user)
print("Social: ", social_s)
if "twitter" == social_user.provider:
return redirect('twitter:user-profile')
elif "linkedin-oauth2" == social_user.provider:
return redirect('linkedin:user-profile')
except:
pass
name = False
if user.last_name != "" and user.last_name != "":
name = str(user.first_name)+" "+str(user.last_name)
try:
context = {"user": user, "name": name, "social_user": social_user.extra_data, "account_id": social_use }
if 'picture' in social_user.extra_data:
context['picture'] = social_user.extra_data["picture"]["data"]["url"]
except:
context = {"user": user, "name": name}
return render(request, "facebookApi/user.html", context)
def post(self, request):
user = User.objects.get(pk=request.user.id)
# profile = Profile()
user.first_name = request.POST.get("first_name")
user.last_name = request.POST.get("last_name")
user.email = request.POST.get("email")
user.profile.company = request.POST.get("company")
user.profile.bio = request.POST.get("bio")
user.profile.role = request.POST.get("role")
user.save()
return HttpResponseRedirect(reverse('facebookApi:link-page'))
def SaveFbPageView(req, page_token, page_id):
user = User.objects.get(pk=req.user.id)
user.profile.fb_page_id = page_id
user.profile.fb_page_token = page_token
user.save()
return HttpResponseRedirect(reverse('facebookApi:dashboard', kwargs={'page_token': page_token, 'page_id':page_id })) | en | 0.202904 | # returns sum of an array # graph.page_positive_feedback_by_type() # if user.profile.fb_page_id != '' and user.profile.fb_page_token != '': # return HttpResponseRedirect(reverse('clients:dashboard', kwargs={'page_token': user.profile.fb_page_token, 'page_id':user.profile.fb_page_id })) # print("-------------------") # token = social_user.extra_data['access_token'] # profile = Profile() | 2.153684 | 2 |
airbyte-cdk/python/airbyte_cdk/sources/declarative/declarative_source.py | heap/airbyte | 22 | 6630922 | <gh_stars>10-100
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from abc import abstractmethod
from typing import Tuple
from airbyte_cdk.sources.abstract_source import AbstractSource
from airbyte_cdk.sources.declarative.checks.connection_checker import ConnectionChecker
class DeclarativeSource(AbstractSource):
"""
Base class for declarative Source. Concrete sources need to define the connection_checker to use
"""
@property
@abstractmethod
def connection_checker(self) -> ConnectionChecker:
pass
def check_connection(self, logger, config) -> Tuple[bool, any]:
return self.connection_checker.check_connection(self, logger, config)
| #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from abc import abstractmethod
from typing import Tuple
from airbyte_cdk.sources.abstract_source import AbstractSource
from airbyte_cdk.sources.declarative.checks.connection_checker import ConnectionChecker
class DeclarativeSource(AbstractSource):
"""
Base class for declarative Source. Concrete sources need to define the connection_checker to use
"""
@property
@abstractmethod
def connection_checker(self) -> ConnectionChecker:
pass
def check_connection(self, logger, config) -> Tuple[bool, any]:
return self.connection_checker.check_connection(self, logger, config) | en | 0.835974 | # # Copyright (c) 2022 Airbyte, Inc., all rights reserved. # Base class for declarative Source. Concrete sources need to define the connection_checker to use | 2.256166 | 2 |
kibble/scanners/scanners/git-sloc.py | jbampton/kibble | 1 | 6630923 | <gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import time
from kibble.configuration import conf
from kibble.scanners.utils import git, sloc
""" Source Lines of Code counter for Git """
title = "SloC Counter for Git"
version = "0.1.0"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna analyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def scan(kibble_bit, source):
rid = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
if source["steps"]["sync"]["good"] and os.path.exists(gpath):
source["steps"]["count"] = {
"time": time.time(),
"status": "SLoC count started at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": True,
"good": True,
}
kibble_bit.update_source(source)
try:
branch = git.default_branch(source, gpath)
subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True)
except: # pylint: disable=bare-except
kibble_bit.pprint("SLoC counter failed to find main branch for %s!!" % url)
return False
kibble_bit.pprint("Running SLoC count for %s" % url)
languages, codecount, comment, blank, years, cost = sloc.count(gpath)
sloc_ = {
"sourceID": source["sourceID"],
"loc": codecount,
"comments": comment,
"blanks": blank,
"years": years,
"cost": cost,
"languages": languages,
}
source["sloc"] = sloc_
source["steps"]["count"] = {
"time": time.time(),
"status": "SLoC count completed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": False,
"good": True,
}
kibble_bit.update_source(source)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import time
from kibble.configuration import conf
from kibble.scanners.utils import git, sloc
""" Source Lines of Code counter for Git """
title = "SloC Counter for Git"
version = "0.1.0"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna analyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def scan(kibble_bit, source):
rid = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
if source["steps"]["sync"]["good"] and os.path.exists(gpath):
source["steps"]["count"] = {
"time": time.time(),
"status": "SLoC count started at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": True,
"good": True,
}
kibble_bit.update_source(source)
try:
branch = git.default_branch(source, gpath)
subprocess.call("cd %s && git checkout %s" % (gpath, branch), shell=True)
except: # pylint: disable=bare-except
kibble_bit.pprint("SLoC counter failed to find main branch for %s!!" % url)
return False
kibble_bit.pprint("Running SLoC count for %s" % url)
languages, codecount, comment, blank, years, cost = sloc.count(gpath)
sloc_ = {
"sourceID": source["sourceID"],
"loc": codecount,
"comments": comment,
"blanks": blank,
"years": years,
"cost": cost,
"languages": languages,
}
source["sloc"] = sloc_
source["steps"]["count"] = {
"time": time.time(),
"status": "SLoC count completed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": False,
"good": True,
}
kibble_bit.update_source(source) | en | 0.872465 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Source Lines of Code counter for Git Do we accept this source? # There are cases where we have a github repo, but don't wanna analyze the code, just issues # pylint: disable=bare-except | 2.143491 | 2 |
quant/common/util.py | doubleDragon/QuantBot | 7 | 6630924 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
def convert_currency_bfx(currency):
currency = currency.lower()
if currency == "dash":
currency = "dsh"
if currency == "bcc":
currency = "bch"
if currency == "iota":
currency = 'iot'
return currency.upper()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
def convert_currency_bfx(currency):
currency = currency.lower()
if currency == "dash":
currency = "dsh"
if currency == "bcc":
currency = "bch"
if currency == "iota":
currency = 'iot'
return currency.upper()
| fr | 0.146266 | #!/usr/bin/env python # -*- coding: UTF-8 -*- | 3.618355 | 4 |
opentsdb.py | bikash/opentsdb_spark | 2 | 6630925 | """
Concept from:
[1] https://github.com/venidera/otsdb_client/blob/master/otsdb_client/client.py
2017.02, <NAME>, DNVGL
Insert spark dataframe to opentsdb
Example usage
--------------
>>> import opentsdb
>>> oc = opentsdb()
>>> oc.ts_insert(df)
Note: it works only with spark dataframe.
"""
import requests as gr
import time
import itertools
from datetime import datetime
import socket
import urllib2
import httplib
import json
import datetime as dt
import random
from dateutil import rrule
from collections import OrderedDict
from multiprocessing import Process, Queue, Pool
import time
import logging
import logging.config
import re
from json import dumps as tdumps, loads
import sys, os
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger('opentsdb.process')
class opentsdb(object):
def __init__(self, hostname='localhost', port=9998):
self.port = port
self.hostname = hostname
self.url = 'http://%s:%d' % (hostname, port)
self.headers = {'content-type': "application/json"}
self.aggregators = self.get_aggregators()
self.ids = {"filter": {}, "metric": {}}
## test opentsdb connection
def ping(self, host, port):
import socket
try:
socket.socket().connect((host, port))
print('Ping in '+host+':'+str(port) + " OpenTSDB Server: Ok")
return True
except socket.error as err:
if err.errno == socket.errno.ECONNREFUSED:
raise Exception('Can\'t connect to OpenTSDB Server')
raise Exception('Fail to test OpenTSDB connection status')
def get_endpoint(self, key=""):
endpoint = '/api' + {
'filters': '/config/filters',
'query_exp': '/query/exp',
'aggr': '/aggregators',
'suggest': '/suggest',
'version': '/version',
'put': '/put?details',
'query': '/query',
'stats': '/stats',
}.get(str(key))
assert endpoint is not '/api', \
"Please provide a valid endpoint."
return endpoint
def _get(self, endpoint="", params=dict()):
r = gr.get(self.url + self.get_endpoint(endpoint),
params=params)
#gr.map([r],exception_handler=exception_handler)
return r
def _post(self, endpoint="", data=dict()):
assert isinstance(data, dict), 'Field <data> must be a dict.'
r = gr.post(self.url + self.get_endpoint(endpoint),
data=self.dumps(data), headers=self.headers)
#gr.map([r],exception_handler=exception_handler)
return r
def process_response(self, response):
status = response.status_code
if not (200 <= status < 300):
logger.info("HTTP error code = %d" % status)
return False
data = loads(response.text)
return data if data else None
def filters(self):
""" Lists the various filters loaded by the TSD """
resp = self._get(endpoint="filters")
return self.process_response(resp)
def statistics(self):
"""Get info about what metrics are registered and with what stats."""
resp = self._get(endpoint="stats")
return self.process_response(resp)
def get_aggregators(self):
"""Used to get the list of default aggregation functions. """
resp = self._get(endpoint="aggr")
return self.process_response(resp)
def version(self):
"""Used to check OpenTSDB version. """
resp = self._get(endpoint="version")
return self.process_response(resp)
def suggest(self, type='metrics', q='', max=9999):
""" Matches the string in the query on the first chars of the stored data.
Parameters
----------
'type' : string (default='metrics')
The type of data. Must be one of the following: metrics, tagk or tagv.
'q' : string, optional (default='')
A string to match on for the given type.
'max' : int, optional (default=9999)
The maximum number of suggested results. Must be greater than 0.
"""
resp = self._get(endpoint="suggest", params={'type': type, 'q': q, 'max': max})
return self.process_response(resp)
def put(self, metric=None, timestamps=[], values=[], tags=dict(),
details=True, verbose=True, ptcl=20, att=5):
""" Put time serie points into OpenTSDB over HTTP.
Parameters
----------
'metric' : string, required (default=None)
The name of the metric you are storing.
'timestamps' : int, required (default=None) ** [generated over mktime]
A Unix epoch style timestamp in seconds or milliseconds.
'values' : array, required (default=[])
The values to record.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'details' : boolean, optional (default=True)
Whether or not to return detailed information
'verbose' : boolean, optional (default=False)
Enable verbose output.
'ptcl' : int, required (default=10)
Number of points sent per http request
'att' : int, required (default=5)
Number of HTTP request attempts
"""
assert isinstance(metric, str), 'Field <metric> must be a string.'
assert isinstance(values, list), 'Field <values> must be a list.'
assert isinstance(timestamps, list), 'Field <timestamps> must be a list.'
if len(timestamps) > 0:
assert len(timestamps) == len(values), \
'Field <timestamps> dont fit field <values>.'
assert all(isinstance(x, (int, datetime)) for x in timestamps), \
'Field <timestamps> must be integer or datetime'
pts = list()
ptl = []
ptc = 0
for n, v in enumerate(values):
v = float(v)
if not timestamps:
current_milli_time = lambda: int(round(time.time() * 1000))
nts = current_milli_time()
else:
nts = timestamps[n]
if isinstance(nts, datetime):
nts = int(time.mktime(nts.timetuple()))
elif not isinstance(nts, int):
nts = int(nts)
u = {'timestamp': nts, 'metric': metric, 'value': v, 'tags': tags}
ptl.append(u)
ptc += 1
if ptc == ptcl:
ptc = 0
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
ptl = list()
if ptl:
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
attempts = 0
fails = 1
while attempts < att and fails > 0:
#gr.map(pts,exception_handler=exception_handler)
if verbose:
print('Attempt %d: Request submitted with HTTP status codes %s' \
% (attempts + 1, str([x.response.status_code for x in pts])))
pts = [x for x in pts if not 200 <= x.response.status_code <= 300]
attempts += 1
fails = len([x for x in pts])
if verbose:
total = len(values)
print("%d of %d (%.2f%%) requests were successfully sent" \
% (total - fails, total, 100 * round(float((total - fails))/total, 2)))
return {
'points': len(values),
'success': len(values) - fails,
'failed': fails
}
def query(self, queries=[], start='1h-ago', end='now', show_summary=False,
show_json=False, nots=False, tsd=True, group=False):
""" Enables extracting data from the storage system
Parameters
----------
'metric' : string, required (default=None)
The name of a metric stored in the system.
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'show_summary' : boolean, optional (default=False)
Whether or not to show a summary of timings surrounding the query.
'show_json': boolean, optional (default=False)
If true, returns the response in the JSON format
'nots': boolean, optional (default=False)
Hides timestamp results
'tsd': boolean, optional (default=True)
Set timestamp as datetime object instead of an integer
'group': boolean, optional (default=False)
Returns the points of the time series grouped (i.e. metric + tags) in one list
"""
assert isinstance(queries, list), 'Field <queries> must be a list.'
assert len(queries) > 0, 'Field <queries> must have at least one query'
for q in queries:
assert isinstance(q, dict), 'Field <element> must be a dict.'
assert all(i in q.keys() for i in ['m', 'aggr', 'tags']), \
'Not all required elements were informed.'
assert isinstance(q['m'], str), \
'Field <metric> must be a string.'
assert q['aggr'] in self.aggregators, \
'The aggregator is not valid.'
assert isinstance(q['tags'], dict), \
'Field <tags> must be a dict'
if 'rate' in q.keys():
assert isinstance(q['rate'], bool), \
'Field <rate> must be True or False'
data = {"start": start, "end": end, "queries":
[{
"aggregator": q['aggr'],
"metric": q['m'],
"tags": q['tags'],
"rate": q['rate'] if 'rate' in q.keys() else False,
'show_summary': show_summary
} for q in queries]
}
resp = self._post(endpoint="query", data=data)
if 200 <= resp.status_code <= 300:
result = None
if show_json:
# Raw response
result = resp.text
else:
data = loads(resp.text)
if group:
dpss = dict()
for x in data:
if 'metric' in x.keys():
for k,v in x['dps'].items():
if k in dpss.keys():
dpss[k] += v
else:
dpss[k] = v
points = sorted(dpss.items())
if not nots:
result = {'results':{'timestamps':[],'values':[]}}
if tsd:
result['results']['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
result['results']['timestamps'] = [x[0] for x in points]
else:
result = {'results':{'values':[]}}
result['results']['values'] = [float(x[1]) for x in points]
else:
result = {'results':[]}
for x in data:
if 'metric' in x.keys():
dps = x['dps']
points = sorted(dps.items())
resd = {'metric':x['metric'],'tags':x['tags'],'timestamps':[],'values':[float(y[1]) for y in points]}
if not nots:
if tsd:
resd['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
resd['timestamps'] = [x[0] for x in points]
else:
del resd['timestamps']
result['results'].append(resd)
if show_summary:
result['summary'] = data[-1]['statsSummary']
return result
else:
print('No results found')
return []
def gen_id(self, tid="", desc=""):
assert tid in self.ids.keys(), "Field <tip> is not valid."
assert desc, "Field <desc> is not valid."
if desc not in self.ids[tid].keys():
if len(self.ids[tid]) == 0:
self.ids[tid][desc] = 1
else:
self.ids[tid][desc] = max(self.ids[tid].values()) + 1
return "%s%d" % (tid[:1], self.ids[tid][desc])
def build_policy(self, vpol=None):
assert vpol != None, \
'Field <vpol> must have a value.'
if vpol == 0:
return {'policy': 'zero'}
elif any(isinstance(vpol, i) for i in [int, float]):
return {'policy': 'scalar', 'value': vpol}
elif vpol in ['nan', 'null']:
return {'policy': vpol}
else:
assert False, 'Field <vpol> is not valid.'
def build_downsampler(self, aggr='max', interval=None, vpol=None):
assert interval != None, \
'Field <interval> is not valid.'
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
ret = {'interval': interval, 'aggregator': aggr}
if vpol:
ret['fillPolicy'] = self.build_policy(vpol)
return ret
def build_filter(self, tags={}, group=True):
assert len(tags) > 0 and isinstance(tags, dict), \
'Field <tags> is not valid.'
obj = {"id" : self.gen_id("filter", self.dumps(tags)), "tags" : []}
for t in tags:
obj["tags"].append(
{
"type": "literal_or",
"tagk": t,
"filter": tags[t],
"groupBy": group
}
)
return obj
def query_expressions(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], exprs=[], dsampler=None, forceAggregate=False):
""" Allows for querying data using expressions.
Parameters
----------
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'vpol': [int, float, str], required (default=0)
The value used to replace "missing" values, i.e. when a data point was
expected but couldn't be found in storage.
'metrics': array of tuples, required (default=[])
Determines the pairs (metric, tags) in the expressions.
'exprs': array of tuples, required (default=[])
A list with one or more pairs (id, expr) of expressions.
'dsampler': tuple of three elements, optional (default=None)
Reduces the number of data points returned, given an interval
'forceAggregate': boolean, optional (default=false)
Forces the aggregation of metrics with the same name
"""
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
assert any(isinstance(vpol, i) for i in [int, float]) or \
(isinstance(vpol, str) and vpol in ['null', 'nan']), \
'Field <vpol> is not valid.'
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
assert isinstance(exprs, list), 'Field <exprs> must be a list.'
assert len(exprs) > 0, 'Field <exprs> must have at least one metric'
for e in exprs:
assert len(e) == 2, \
'Tuple must have the (id, expr) format.'
assert isinstance(e[0], str), \
'Field <id> must be a string.'
assert isinstance(e[1], str), \
'Field <expr> must be a string.'
if dsampler:
assert 2 <= len(dsampler) <= 3, \
'Field <dsampler> must be composed by (interval, aggr) ' \
'or (interval, aggr, vpol).'
assert isinstance(dsampler[0], str), \
'Field <interval> must be a string.'
assert dsampler[1] in self.aggregators, \
'Field <aggr> is not a valid aggregator.'
# Setting <time> definitions
time = {
'start': start,
'aggregator': aggr,
'end': end
}
if dsampler:
time['downsampler'] = self.build_downsampler(
interval=dsampler[0], aggr=dsampler[1],
vpol=dsampler[2] if len(dsampler) == 3 else None)
# Setting <filters> definitions
filters = {self.dumps(i): self.build_filter(tags=i['tags']) for i in metrics}
# Setting <metric> definitions
q_metrics = []
for m in metrics:
obj = {
'id': self.gen_id(tid="metric", desc=self.dumps(m)),
'filter': filters[self.dumps(m)]['id'],
'metric': m['m']
}
if vpol is not None:
obj['fillPolicy'] = self.build_policy(vpol)
q_metrics.append(obj)
filters = filters.values()
filters = [i for n, i in enumerate(filters) if i not in filters[n + 1:]]
assert isinstance(filters, list) and len(filters) > 0, \
'Object filter is not valid.'
# Setting <expression> definitions
q_exprs = []
for e in exprs:
m_id = e[1]
for i, j in self.ids["metric"].iteritems():
m_id = m_id.replace(i, "m%d" % j)
obj = {
'id': e[0],
'expr': m_id
}
q_exprs.append(obj)
outputs = [
{
'id': e[0],
'alias': 'Expression %s' % e[0]
} for e in exprs]
# Building the data query
data = {
'time': time,
'metrics': q_metrics,
'filters': filters,
'expressions': q_exprs,
'outputs': outputs
}
# Sending request to OTSDB and capturing HTTP response
resp = self._post(endpoint="query_exp", data=data)
res = self.process_response(resp)
if forceAggregate == True:
for i in range(len(res["outputs"])):
# Forcing the aggregation
dps = res["outputs"][i]["dps"]
new_dps = []
for dp in dps:
if len(dp) > 2:
new_dps.append([dp[0], sum(dp[1:])])
res["outputs"][i]["dps"] = new_dps
res["outputs"][i]["dpsMeta"]["series"] = 1
res["outputs"][i]["meta"] = []
return res
def query_summing(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], dsampler=None):
""" Sum all required metrics using query with expressions """
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
expr = ""
for m in metrics:
expr += "%s + " % self.dumps(m)
expr = expr[:-3]
expressions = [("sum", expr)]
return self.query_expressions(aggr='sum', start=start, end=end, vpol=vpol,
metrics=metrics, exprs=expressions, dsampler=dsampler, forceAggregate=True)
def dumps(self, x):
return tdumps(x, default=str)
if __name__ == "__main__":
oc = opentsdb()
oc.ping("localhost",9998)
| """
Concept from:
[1] https://github.com/venidera/otsdb_client/blob/master/otsdb_client/client.py
2017.02, <NAME>, DNVGL
Insert spark dataframe to opentsdb
Example usage
--------------
>>> import opentsdb
>>> oc = opentsdb()
>>> oc.ts_insert(df)
Note: it works only with spark dataframe.
"""
import requests as gr
import time
import itertools
from datetime import datetime
import socket
import urllib2
import httplib
import json
import datetime as dt
import random
from dateutil import rrule
from collections import OrderedDict
from multiprocessing import Process, Queue, Pool
import time
import logging
import logging.config
import re
from json import dumps as tdumps, loads
import sys, os
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger('opentsdb.process')
class opentsdb(object):
def __init__(self, hostname='localhost', port=9998):
self.port = port
self.hostname = hostname
self.url = 'http://%s:%d' % (hostname, port)
self.headers = {'content-type': "application/json"}
self.aggregators = self.get_aggregators()
self.ids = {"filter": {}, "metric": {}}
## test opentsdb connection
def ping(self, host, port):
import socket
try:
socket.socket().connect((host, port))
print('Ping in '+host+':'+str(port) + " OpenTSDB Server: Ok")
return True
except socket.error as err:
if err.errno == socket.errno.ECONNREFUSED:
raise Exception('Can\'t connect to OpenTSDB Server')
raise Exception('Fail to test OpenTSDB connection status')
def get_endpoint(self, key=""):
endpoint = '/api' + {
'filters': '/config/filters',
'query_exp': '/query/exp',
'aggr': '/aggregators',
'suggest': '/suggest',
'version': '/version',
'put': '/put?details',
'query': '/query',
'stats': '/stats',
}.get(str(key))
assert endpoint is not '/api', \
"Please provide a valid endpoint."
return endpoint
def _get(self, endpoint="", params=dict()):
r = gr.get(self.url + self.get_endpoint(endpoint),
params=params)
#gr.map([r],exception_handler=exception_handler)
return r
def _post(self, endpoint="", data=dict()):
assert isinstance(data, dict), 'Field <data> must be a dict.'
r = gr.post(self.url + self.get_endpoint(endpoint),
data=self.dumps(data), headers=self.headers)
#gr.map([r],exception_handler=exception_handler)
return r
def process_response(self, response):
status = response.status_code
if not (200 <= status < 300):
logger.info("HTTP error code = %d" % status)
return False
data = loads(response.text)
return data if data else None
def filters(self):
""" Lists the various filters loaded by the TSD """
resp = self._get(endpoint="filters")
return self.process_response(resp)
def statistics(self):
"""Get info about what metrics are registered and with what stats."""
resp = self._get(endpoint="stats")
return self.process_response(resp)
def get_aggregators(self):
"""Used to get the list of default aggregation functions. """
resp = self._get(endpoint="aggr")
return self.process_response(resp)
def version(self):
"""Used to check OpenTSDB version. """
resp = self._get(endpoint="version")
return self.process_response(resp)
def suggest(self, type='metrics', q='', max=9999):
""" Matches the string in the query on the first chars of the stored data.
Parameters
----------
'type' : string (default='metrics')
The type of data. Must be one of the following: metrics, tagk or tagv.
'q' : string, optional (default='')
A string to match on for the given type.
'max' : int, optional (default=9999)
The maximum number of suggested results. Must be greater than 0.
"""
resp = self._get(endpoint="suggest", params={'type': type, 'q': q, 'max': max})
return self.process_response(resp)
def put(self, metric=None, timestamps=[], values=[], tags=dict(),
details=True, verbose=True, ptcl=20, att=5):
""" Put time serie points into OpenTSDB over HTTP.
Parameters
----------
'metric' : string, required (default=None)
The name of the metric you are storing.
'timestamps' : int, required (default=None) ** [generated over mktime]
A Unix epoch style timestamp in seconds or milliseconds.
'values' : array, required (default=[])
The values to record.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'details' : boolean, optional (default=True)
Whether or not to return detailed information
'verbose' : boolean, optional (default=False)
Enable verbose output.
'ptcl' : int, required (default=10)
Number of points sent per http request
'att' : int, required (default=5)
Number of HTTP request attempts
"""
assert isinstance(metric, str), 'Field <metric> must be a string.'
assert isinstance(values, list), 'Field <values> must be a list.'
assert isinstance(timestamps, list), 'Field <timestamps> must be a list.'
if len(timestamps) > 0:
assert len(timestamps) == len(values), \
'Field <timestamps> dont fit field <values>.'
assert all(isinstance(x, (int, datetime)) for x in timestamps), \
'Field <timestamps> must be integer or datetime'
pts = list()
ptl = []
ptc = 0
for n, v in enumerate(values):
v = float(v)
if not timestamps:
current_milli_time = lambda: int(round(time.time() * 1000))
nts = current_milli_time()
else:
nts = timestamps[n]
if isinstance(nts, datetime):
nts = int(time.mktime(nts.timetuple()))
elif not isinstance(nts, int):
nts = int(nts)
u = {'timestamp': nts, 'metric': metric, 'value': v, 'tags': tags}
ptl.append(u)
ptc += 1
if ptc == ptcl:
ptc = 0
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
ptl = list()
if ptl:
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
attempts = 0
fails = 1
while attempts < att and fails > 0:
#gr.map(pts,exception_handler=exception_handler)
if verbose:
print('Attempt %d: Request submitted with HTTP status codes %s' \
% (attempts + 1, str([x.response.status_code for x in pts])))
pts = [x for x in pts if not 200 <= x.response.status_code <= 300]
attempts += 1
fails = len([x for x in pts])
if verbose:
total = len(values)
print("%d of %d (%.2f%%) requests were successfully sent" \
% (total - fails, total, 100 * round(float((total - fails))/total, 2)))
return {
'points': len(values),
'success': len(values) - fails,
'failed': fails
}
def query(self, queries=[], start='1h-ago', end='now', show_summary=False,
show_json=False, nots=False, tsd=True, group=False):
""" Enables extracting data from the storage system
Parameters
----------
'metric' : string, required (default=None)
The name of a metric stored in the system.
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'show_summary' : boolean, optional (default=False)
Whether or not to show a summary of timings surrounding the query.
'show_json': boolean, optional (default=False)
If true, returns the response in the JSON format
'nots': boolean, optional (default=False)
Hides timestamp results
'tsd': boolean, optional (default=True)
Set timestamp as datetime object instead of an integer
'group': boolean, optional (default=False)
Returns the points of the time series grouped (i.e. metric + tags) in one list
"""
assert isinstance(queries, list), 'Field <queries> must be a list.'
assert len(queries) > 0, 'Field <queries> must have at least one query'
for q in queries:
assert isinstance(q, dict), 'Field <element> must be a dict.'
assert all(i in q.keys() for i in ['m', 'aggr', 'tags']), \
'Not all required elements were informed.'
assert isinstance(q['m'], str), \
'Field <metric> must be a string.'
assert q['aggr'] in self.aggregators, \
'The aggregator is not valid.'
assert isinstance(q['tags'], dict), \
'Field <tags> must be a dict'
if 'rate' in q.keys():
assert isinstance(q['rate'], bool), \
'Field <rate> must be True or False'
data = {"start": start, "end": end, "queries":
[{
"aggregator": q['aggr'],
"metric": q['m'],
"tags": q['tags'],
"rate": q['rate'] if 'rate' in q.keys() else False,
'show_summary': show_summary
} for q in queries]
}
resp = self._post(endpoint="query", data=data)
if 200 <= resp.status_code <= 300:
result = None
if show_json:
# Raw response
result = resp.text
else:
data = loads(resp.text)
if group:
dpss = dict()
for x in data:
if 'metric' in x.keys():
for k,v in x['dps'].items():
if k in dpss.keys():
dpss[k] += v
else:
dpss[k] = v
points = sorted(dpss.items())
if not nots:
result = {'results':{'timestamps':[],'values':[]}}
if tsd:
result['results']['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
result['results']['timestamps'] = [x[0] for x in points]
else:
result = {'results':{'values':[]}}
result['results']['values'] = [float(x[1]) for x in points]
else:
result = {'results':[]}
for x in data:
if 'metric' in x.keys():
dps = x['dps']
points = sorted(dps.items())
resd = {'metric':x['metric'],'tags':x['tags'],'timestamps':[],'values':[float(y[1]) for y in points]}
if not nots:
if tsd:
resd['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
resd['timestamps'] = [x[0] for x in points]
else:
del resd['timestamps']
result['results'].append(resd)
if show_summary:
result['summary'] = data[-1]['statsSummary']
return result
else:
print('No results found')
return []
def gen_id(self, tid="", desc=""):
assert tid in self.ids.keys(), "Field <tip> is not valid."
assert desc, "Field <desc> is not valid."
if desc not in self.ids[tid].keys():
if len(self.ids[tid]) == 0:
self.ids[tid][desc] = 1
else:
self.ids[tid][desc] = max(self.ids[tid].values()) + 1
return "%s%d" % (tid[:1], self.ids[tid][desc])
def build_policy(self, vpol=None):
assert vpol != None, \
'Field <vpol> must have a value.'
if vpol == 0:
return {'policy': 'zero'}
elif any(isinstance(vpol, i) for i in [int, float]):
return {'policy': 'scalar', 'value': vpol}
elif vpol in ['nan', 'null']:
return {'policy': vpol}
else:
assert False, 'Field <vpol> is not valid.'
def build_downsampler(self, aggr='max', interval=None, vpol=None):
assert interval != None, \
'Field <interval> is not valid.'
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
ret = {'interval': interval, 'aggregator': aggr}
if vpol:
ret['fillPolicy'] = self.build_policy(vpol)
return ret
def build_filter(self, tags={}, group=True):
assert len(tags) > 0 and isinstance(tags, dict), \
'Field <tags> is not valid.'
obj = {"id" : self.gen_id("filter", self.dumps(tags)), "tags" : []}
for t in tags:
obj["tags"].append(
{
"type": "literal_or",
"tagk": t,
"filter": tags[t],
"groupBy": group
}
)
return obj
def query_expressions(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], exprs=[], dsampler=None, forceAggregate=False):
""" Allows for querying data using expressions.
Parameters
----------
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'vpol': [int, float, str], required (default=0)
The value used to replace "missing" values, i.e. when a data point was
expected but couldn't be found in storage.
'metrics': array of tuples, required (default=[])
Determines the pairs (metric, tags) in the expressions.
'exprs': array of tuples, required (default=[])
A list with one or more pairs (id, expr) of expressions.
'dsampler': tuple of three elements, optional (default=None)
Reduces the number of data points returned, given an interval
'forceAggregate': boolean, optional (default=false)
Forces the aggregation of metrics with the same name
"""
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
assert any(isinstance(vpol, i) for i in [int, float]) or \
(isinstance(vpol, str) and vpol in ['null', 'nan']), \
'Field <vpol> is not valid.'
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
assert isinstance(exprs, list), 'Field <exprs> must be a list.'
assert len(exprs) > 0, 'Field <exprs> must have at least one metric'
for e in exprs:
assert len(e) == 2, \
'Tuple must have the (id, expr) format.'
assert isinstance(e[0], str), \
'Field <id> must be a string.'
assert isinstance(e[1], str), \
'Field <expr> must be a string.'
if dsampler:
assert 2 <= len(dsampler) <= 3, \
'Field <dsampler> must be composed by (interval, aggr) ' \
'or (interval, aggr, vpol).'
assert isinstance(dsampler[0], str), \
'Field <interval> must be a string.'
assert dsampler[1] in self.aggregators, \
'Field <aggr> is not a valid aggregator.'
# Setting <time> definitions
time = {
'start': start,
'aggregator': aggr,
'end': end
}
if dsampler:
time['downsampler'] = self.build_downsampler(
interval=dsampler[0], aggr=dsampler[1],
vpol=dsampler[2] if len(dsampler) == 3 else None)
# Setting <filters> definitions
filters = {self.dumps(i): self.build_filter(tags=i['tags']) for i in metrics}
# Setting <metric> definitions
q_metrics = []
for m in metrics:
obj = {
'id': self.gen_id(tid="metric", desc=self.dumps(m)),
'filter': filters[self.dumps(m)]['id'],
'metric': m['m']
}
if vpol is not None:
obj['fillPolicy'] = self.build_policy(vpol)
q_metrics.append(obj)
filters = filters.values()
filters = [i for n, i in enumerate(filters) if i not in filters[n + 1:]]
assert isinstance(filters, list) and len(filters) > 0, \
'Object filter is not valid.'
# Setting <expression> definitions
q_exprs = []
for e in exprs:
m_id = e[1]
for i, j in self.ids["metric"].iteritems():
m_id = m_id.replace(i, "m%d" % j)
obj = {
'id': e[0],
'expr': m_id
}
q_exprs.append(obj)
outputs = [
{
'id': e[0],
'alias': 'Expression %s' % e[0]
} for e in exprs]
# Building the data query
data = {
'time': time,
'metrics': q_metrics,
'filters': filters,
'expressions': q_exprs,
'outputs': outputs
}
# Sending request to OTSDB and capturing HTTP response
resp = self._post(endpoint="query_exp", data=data)
res = self.process_response(resp)
if forceAggregate == True:
for i in range(len(res["outputs"])):
# Forcing the aggregation
dps = res["outputs"][i]["dps"]
new_dps = []
for dp in dps:
if len(dp) > 2:
new_dps.append([dp[0], sum(dp[1:])])
res["outputs"][i]["dps"] = new_dps
res["outputs"][i]["dpsMeta"]["series"] = 1
res["outputs"][i]["meta"] = []
return res
def query_summing(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], dsampler=None):
""" Sum all required metrics using query with expressions """
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
expr = ""
for m in metrics:
expr += "%s + " % self.dumps(m)
expr = expr[:-3]
expressions = [("sum", expr)]
return self.query_expressions(aggr='sum', start=start, end=end, vpol=vpol,
metrics=metrics, exprs=expressions, dsampler=dsampler, forceAggregate=True)
def dumps(self, x):
return tdumps(x, default=str)
if __name__ == "__main__":
oc = opentsdb()
oc.ping("localhost",9998)
| en | 0.442524 | Concept from: [1] https://github.com/venidera/otsdb_client/blob/master/otsdb_client/client.py 2017.02, <NAME>, DNVGL Insert spark dataframe to opentsdb Example usage -------------- >>> import opentsdb >>> oc = opentsdb() >>> oc.ts_insert(df) Note: it works only with spark dataframe. ## test opentsdb connection #gr.map([r],exception_handler=exception_handler) #gr.map([r],exception_handler=exception_handler) Lists the various filters loaded by the TSD Get info about what metrics are registered and with what stats. Used to get the list of default aggregation functions. Used to check OpenTSDB version. Matches the string in the query on the first chars of the stored data. Parameters ---------- 'type' : string (default='metrics') The type of data. Must be one of the following: metrics, tagk or tagv. 'q' : string, optional (default='') A string to match on for the given type. 'max' : int, optional (default=9999) The maximum number of suggested results. Must be greater than 0. Put time serie points into OpenTSDB over HTTP. Parameters ---------- 'metric' : string, required (default=None) The name of the metric you are storing. 'timestamps' : int, required (default=None) ** [generated over mktime] A Unix epoch style timestamp in seconds or milliseconds. 'values' : array, required (default=[]) The values to record. 'tags' : map, required (default=dict()) A map of tag name/tag value pairs. 'details' : boolean, optional (default=True) Whether or not to return detailed information 'verbose' : boolean, optional (default=False) Enable verbose output. 'ptcl' : int, required (default=10) Number of points sent per http request 'att' : int, required (default=5) Number of HTTP request attempts #gr.map(pts,exception_handler=exception_handler) Enables extracting data from the storage system Parameters ---------- 'metric' : string, required (default=None) The name of a metric stored in the system. 'aggr' : string, required (default=sum) The name of an aggregation function to use. 'tags' : map, required (default=dict()) A map of tag name/tag value pairs. 'start' : string, required (default=1h-ago) The start time for the query. 'end' : string, optional (default=current time) An end time for the query. 'show_summary' : boolean, optional (default=False) Whether or not to show a summary of timings surrounding the query. 'show_json': boolean, optional (default=False) If true, returns the response in the JSON format 'nots': boolean, optional (default=False) Hides timestamp results 'tsd': boolean, optional (default=True) Set timestamp as datetime object instead of an integer 'group': boolean, optional (default=False) Returns the points of the time series grouped (i.e. metric + tags) in one list # Raw response Allows for querying data using expressions. Parameters ---------- 'aggr' : string, required (default=sum) The name of an aggregation function to use. 'start' : string, required (default=1h-ago) The start time for the query. 'end' : string, optional (default=current time) An end time for the query. 'vpol': [int, float, str], required (default=0) The value used to replace "missing" values, i.e. when a data point was expected but couldn't be found in storage. 'metrics': array of tuples, required (default=[]) Determines the pairs (metric, tags) in the expressions. 'exprs': array of tuples, required (default=[]) A list with one or more pairs (id, expr) of expressions. 'dsampler': tuple of three elements, optional (default=None) Reduces the number of data points returned, given an interval 'forceAggregate': boolean, optional (default=false) Forces the aggregation of metrics with the same name # Setting <time> definitions # Setting <filters> definitions # Setting <metric> definitions # Setting <expression> definitions # Building the data query # Sending request to OTSDB and capturing HTTP response # Forcing the aggregation Sum all required metrics using query with expressions | 2.825808 | 3 |
1021/tspomp/kmeans2_kuro0613.py | Kurogi-Lab/CAN2 | 0 | 6630926 | #!/bin/python
# -*- coding: utf-8 -*-
#
#http://pythondatascience.plavox.info/scikit-learn/%E3%82%AF%E3%83%A9%E3%82%B9%E3%82%BF%E5%88%86%E6%9E%90-k-means
#
#python kmeans2_kuro.py -fn ~/sotu/2017/can2b/tmp/tspSth.dat -K 2 -L 6
#
import sys
import numpy as np
from sklearn.cluster import KMeans
import argparse
import pandas as pd #sudo pip install pandas
import math
import os
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
import matplotlib
import matplotlib.gridspec as gridspec
from matplotlib.colors import Normalize
import mylib
#import os.path
#import os
#script_dir = os.path.abspath(os.path.dirname(__file__))
#os.path.abspath(__file__)
def mypltshow(fnfig,disp=1):
plt.savefig(fnfig);
if disp==1:
myshell("sleep 1;gv "+fnfig+"&");
plt.close() #plt.show()
def horizon(y1,y2,Ey):
T=len(y1)
for t in range(T):
if(abs(y1[t]-y2[t])>Ey):
break;
return t-1
nth=2
nth=3
def main():
#
parser = argparse.ArgumentParser(description='Kmeans')
parser.add_argument('-fnyp',default='',
help='file name of predictions')
parser.add_argument('-fnyg',default='',
help='file name of ground truth')
parser.add_argument('-K', default=2, type=int,
help='number of clusters')
parser.add_argument('--Lmax','-L', default=1, type=int,
help='Level of clustering for hierarchical')
parser.add_argument('-Lt', default=1, type=int,
help='1 for truncation 0 for no trunc for L')
parser.add_argument('-Lmin', default=0, type=int,
help='Minimum Level of clustering for hierarchical')
parser.add_argument('-H', default=100, type=int,
help='Prediction Horizon')
parser.add_argument('-hh', default=10, type=int,
help='additional horizon to classify')
parser.add_argument('-Ey', default=10, type=int,
help='threshold of ey')
parser.add_argument('-m', default=1, type=int,
help='1 for mean, 0 for leave-one-out')
parser.add_argument('-d1', default='.', type=str,
help='directory d1')
parser.add_argument('-DISP', default=1, type=int,
help='1 for DISP, 0 for noDISP')
parser.add_argument('-msg',default='',
help='message to carry')
parser.add_argument('-tp0',default=2000, type=int,
help='prediction start tyme')
args = parser.parse_args()
# import pdb;pdb.set_trace(); #for debug
# import os.path
# fnyp=os.path.expanduser(args.fnyp)
Sth_id = np.array(pd.read_csv('{}/tspSth_id.dat'.format(args.d1),delim_whitespace=True,dtype=np.int32,header=None)).reshape((-1))
all_id = np.array(pd.read_csv('{}/tspall_id.dat'.format(args.d1),delim_whitespace=True,dtype=np.int32,header=None))
tp0=args.tp0
colsplt=["black", "red", "dark-green", "magenta", "green", "light-green", "salmon", "pink", "grey"]
colsplt=["black", "red", "dark-green", "blue", "magenta", "green", "light-green", "salmon", "pink", "grey"]
colsplt=["red", "dark-green", "green", "blue", "green", "light-green", "blue", "cyan", "orange" "salmon", "pink", "magenta", "grey"]
colsplt=["red", "dark-green", "dark-green", "blue", "dark-green", "green", "blue", "cyan", "light-green","orange" "salmon", "pink", "magenta", "grey"]
fnyLc='tmp/yLc'+str(tp0)
fpyLc=open(fnyLc+'.plt','w')
fpyLc.write('set style data lines;set nokey\n')
K=int(args.K)
Lmax=int(args.Lmax)+1
Lmin=int(args.Lmin)
# read dataset
# import pdb;pdb.set_trace(); #for debug
# Y = np.loadtxt(fnyp, delimiter=' ')
# import pdb;pdb.set_trace(); #for debug
fnyp='{}/{}'.format(args.d1,args.fnyp)
if not os.path.isfile(fnyp):
# if len(fnyp) == 0:
print '#fnyp=%s does not exist' % (fnyp)
return
Y = np.array(pd.read_csv(fnyp,delim_whitespace=True,dtype=np.float32,header=None))
Y = Y.T # transpose
N,T=Y.shape #N:number of time series, T:horizon
Ey=args.Ey #threshold for predictable horizon
h_all=[]
# H=np.zeros((L+1,2**L)).astype('float32')
# fnyg=os.path.expanduser(args.fnyg)
fnyg='{}/{}'.format(args.d1,args.fnyg)
# import pdb;pdb.set_trace(); #for debug
if os.path.isfile(fnyg):
print '#predictable horizons'
yg = np.array(pd.read_csv(fnyg,delim_whitespace=True,dtype=np.float32,header=None))
# plt.plot(x, yg,color=cols[0]) #ground truth
##best yp for ygt
hb=0
ib=0
for i in range(N):
h=horizon(Y[i],yg,Ey)
h_all.append(h)
if h>hb:
hb=h
ib=i
h_all_A=np.array(h_all)
print 'max h(y%d,yg)=%d' % (ib,hb)
DISP=args.DISP
fig=plt.figure(figsize=(6,8))
# fig2=plt.figure(figsize=(8,6))
gs=gridspec.GridSpec(4,2)
plt.subplots_adjust(wspace=0.5, hspace=1.0)
# plt.subplots_adjust(wspace=0.5, hspace=0.5)
C=np.zeros((N,Lmax+1)).astype('uint8')
envN='export N=N:'
Nenv=[]
if Lmax == -1:
y=np.zeros((1,H)).astype('float32') #dummy
for n in range(N):
y=np.concatenate((y,Y[n,:H].reshape(1,H)),axis=0)
y=np.delete(y,0,axis=0) #delete dummy
km = KMeans(n_clusters=K, init='k-means++', n_init=10, max_iter=300,tol=0.0001,precompute_distances='auto', verbose=0,random_state=None, copy_x=True, n_jobs=1)
pred = km.fit_predict(y)
else:#hierarchical clustering
LONGhg=0
# import pdb;pdb.set_trace(); #for debug
Lctodeg=dict()
Lctodegcuml=dict()
Lctohg=dict()
L2ctoLig=dict()
hgmaxL=[]
hVmaxL=[]
for L in range(Lmin,Lmax):
ctoh0=dict()
ctonY=dict()
ctoLi=dict()
ctoLic0=dict()
ctoLic1=dict()
ctoLig=dict()
if LONGhg == 1 and args.Lt == 1:
break
l=L
nc=[]
for c in range(K**l): #clas=0 or 1
strc=str(c)
# strc=str(L)+'-'+str(c)
# ctoc[strc]=c
y=np.zeros((1,T)).astype('float32') #dummy
Li=[]
for i in range(N):
if C[i,l] == c:
y=np.concatenate((y,Y[i,:].reshape(1,T)),axis=0)
Li.append(i)
y=np.delete(y,0,axis=0) #delete dummy
# import pdb;pdb.set_trace(); #for debug
nY,T1=y.shape #T1=T
if nY > nth: # for execute k-means cluster >=2:??
h0=T
# usemean=0
if args.m == 1: #use mean
ym = np.mean(y, axis=0)
for n in range(nY):
h=horizon(y[n],ym,Ey)
if h0 > h:
h0=h
else:#leave-one-out
for n1 in range(nY):
for n2 in range(nY):
if n1 != n2:
h=horizon(y[n1],y[n2],Ey)
if h0 > h:
h0=h
ctoh0[strc]=h0
ctonY[strc]=nY
# H[l,c]=h
print 'l c nY h=%3d %3d %3d %3d' % (l,c,nY,h0)
# y=y[:,:h]
# y=y[:,:h+10] ##?
######################## K-means bellow
y=y[:,:h0+args.hh] ##?
if nY >= nth:
km = KMeans(n_clusters=K, init='k-means++', n_init=10, max_iter=300,tol=0.0001,precompute_distances='auto', verbose=0,random_state=None, copy_x=True, n_jobs=1)
pred = km.fit_predict(y)
else:
pred = [0 for i in range(len(y))] #set all y in class 0
ip=0
# Li=[] #List of i
Lic0=[] #List of i
Lic1=[] #List of i
for i in range(N):
if C[i,l] == c:
C[i,l+1] = C[i,l]*K + pred[ip]
# Li.append(i)
if pred[ip] ==0:
Lic0.append(i)
else:
Lic1.append(i)
ip += 1
# if L == 3 and c == 0:
# import pdb;pdb.set_trace(); #for debug
ctoLi[strc]=Li #list of n
ctoLic0[strc]=Lic0 #list of n
ctoLic1[strc]=Lic1 #list of n
#check191212 if L>=1: ###check191212
#check191212 y_=Y[Li,:]
#check191212 y0=Y[Lic0,:]
#check191212 y1=Y[Lic1,:]
#check191212 xax = np.arange(0, Y.shape[1], 1)
#check191212 plt.clf()
#check191212 fig.add_subplot(3,1,1)
#check191212 plt.plot(xax, y_.T)
#check191212 plt.title('L{}c{}'.format(L,c))
#check191212 fig.add_subplot(3,1,2)
#check191212 plt.plot(xax, y0.T)
#check191212 plt.title('L{}c{}-0'.format(L,c))
#check191212 fig.add_subplot(3,1,3)
#check191212 plt.plot(xax, y1.T)
#check191212 plt.title('L{}c{}-1'.format(L,c))
#check191212 plt.pause(0.05) #plt.show() #
#check191212 import pdb;pdb.set_trace(); #for debug
######################## K-means above
else: #if nY > 3: --> nY<=3
# print '#####################nY=%d<2,c=%d,L=%d' % (nY,c,L)
ctoLic0[strc]=Li #list of n
ctoLic1[strc]=[] #list of n
ctoh0[strc]=0
ctonY[strc]=nY
for n in range(N):
if C[n,l] == c:
C[n,l+1] = C[n,l]*K
ctoLi[strc]=[n]
# for L in range(Lmin,Lmax): ##close for L?
#####close L? NO
# import pdb;pdb.set_trace(); #for debug
# if L==3:
# import pdb;pdb.set_trace(); #for debug
if L >=0:
for n in range(N):
print 'n%2d' % (n),
for l in range(0,L+2):
print'%d ' % (C[n,l]),
# print'L%d C%3d ' % (l,C[n,l]),
print ''
for i,strc in enumerate(ctoh0):
c=int(strc) #???????
# import pdb;pdb.set_trace(); #for debug
# for c in cton.keys():
# for i in range(len(ctoLi)):
# c=str(i)
if strc in ctoLi.keys():
print 'c=%d nY=%d h=%d iY' % (c,ctonY[strc],ctoh0[strc]),ctoLi[strc]
print '####'
# import pdb;pdb.set_trace(); #for debug
# cols=matplotlib.colors.cnames.keys()
cols = ["r", "g", "b", "c", "m", "y", "k"]
cols.append(matplotlib.colors.cnames.keys())
x = np.arange(0, T, 1)
# for i in range(len(ctoLi)):
# c=str(i)
NsL=[]
for ic,c in enumerate(ctoLi):
for n in ctoLi[c]:
if ctonY[c] > 1:
y = Y[n,:]
plt.plot(x, y,color=cols[i % 7])
# plt.pause(0.05)
# import pdb;pdb.set_trace(); #for debug
print 'L%d c%s n=%d h0=%d' % (L,c,ctonY[c],ctoh0[c]),
if L==0:
print ''
else:
print(' iY={}'.format(ctoLi[c]))
# Ns=all_id[Sth_id[ctoLi[c]]]
# import pdb;pdb.set_trace(); #for debug
# h_Lc=h_all_A[ctoLi[c]]
# Nsh=np.concatenate([Ns,h_Lc.reshape((-1,1))],axis=1)
# iNsh=np.concatenate([np.array(ctoLi[c]).reshape(-1,1),Nsh],axis=1)
# print('i N s h:\n{}'.format(iNsh))
# import pdb;pdb.set_trace(); #for debug
df=pd.DataFrame(Nsh)
df.to_csv('{}/Nsh-L{}c{}.csv'.format(args.d1,L,c),index=False,sep=' ',header=None)
nNs=Ns.shape[0]
Nss='N:{}'.format(Ns[0,0])
for iN in range(1,nNs):
Nss=Nss+',{}'.format(Ns[iN,0])
NsL.append([Nss,nNs,c])
#
if DISP: plt.show()
if L==Lmax-1:
cols = ["b", "g", "c", "m", "y", "k"]
plt.clf()
# nc1=len(ctoLi)+1
ygdisp=0
nc1=len(ctoLi)+Lmax-2
xax = np.arange(0, Y.shape[1], 1)
if ygdisp==1:
nc1=len(ctoLi)+Lmax-1
fig.add_subplot(nc1,1,1)
plt.plot(xax, yg,linewidth=5,color="r")# plt.plot(xax, yg)
plt.title('yg')
for ic,c in enumerate(ctoLi):
y_=Y[ctoLi[c],:]
# print '#check nc1,1,int(c)+2={},{},{}'.format(nc1,1,int(c)+2)
# import pdb;pdb.set_trace(); #for debug
fig.add_subplot(nc1,1,int(c)+1) #ygdisp=-
# fig.add_subplot(nc1,1,int(c)+2) #ygdisp=1
plt.plot(xax, y_.T,linewidth=1)
# plt.plot(xax, y_.T,linewidth=1,color=cols[ctoLi[c][0]%6])
plt.plot(xax, yg,linewidth=2,linestyle='solid',color='r')
# plt.plot(xax, yg,linewidth=2,linestyle='dashdot',color='r')
plt.title('yp in L{}c{} n{}'.format(L,c,y_.shape[0]))
# plt.pause(0.05) #plt.show() #
fnfig='{}/y_L{}.eps'.format(args.d1,L)
mypltshow(fnfig)
fig=plt.figure(figsize=(4,4))
nc1=len(ctoLi)+1
plt.clf()
# plt.xlim(0,500);plt.ylim(0,500)
# fig.add_subplot(nc1,1,1)
plt.scatter(h_all_A,LOOH_all_A0,s=20, c="w", alpha=1.0, linewidths="2",edgecolors="k");
plt.grid(which='major',color='black',linestyle='--');
n=len(h_all_A)
plt.title('hV vs. hg @L{}c{} n{}'.format(0,0,n))
fnfig='{}/hh_L{}.eps'.format(args.d1,0)
mypltshow(fnfig,1)
for ic,c in enumerate(ctoLi):
plt.clf()
# plt.xlim(0,500); plt.ylim(0,500)
# fig.add_subplot(nc1,1,int(c)+1)
plt.scatter(h_all_A[ctoLi[c]],LOOH_all_A[ctoLi[c]],s=20, c="w", alpha=1.0, linewidths="2",edgecolors="k");
plt.grid(which='major',color='black',linestyle='--');
n=len(ctoLi[c])
plt.title('hV vs. hg@L{}c{} n{}'.format(L,c,n))
fnfig='{}/hh_L{}.eps'.format(args.d1,L)
mypltshow(fnfig,0)
# import pdb;pdb.set_trace(); #for debug
## hh=np.concatenate([h_all_A.reshape(-1,1),LOOH_all_A.reshape(-1,1)],axis=1)
for hD in ['hg','hV']:#
plt.clf()
for ic,c in enumerate(ctoLi):
fig.add_subplot(nc1,1,int(c)+1)
if hD=='hg':
ha=h_all_A[ctoLi[c]]
else:
# import pdb;pdb.set_trace(); #for debug
ha=LOOH_all_A[ctoLi[c]]
plt.xlim(0,500)
# plt.ylim(0,40)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
binmin=0;binmax=400;binwidth=10
plt.hist(ha, bins=np.arange(binmin, binmax + binwidth, binwidth), histtype='barstacked', ec='black')
plt.title('{}@L{}c{} n{}'.format(hD,L,c,len(ha)))
fnfig='{}/{}_L{}.eps'.format(args.d1,hD,L)
mypltshow(fnfig,0)
##################
# import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
# if L>0:
# import pdb;pdb.set_trace(); #for debug
fp=open('{}/Ns-L{}.env'.format(args.d1,L),'w')
fp.write('export N_S="{}'.format(NsL[0][0]))
for ic in range(1,len(NsL)):
fp.write(' {}'.format(NsL[ic][0]))
fp.write('"\nexport n_seed_S="{}'.format(int(100./NsL[0][1]+0.5)))
for ic in range(1,len(NsL)):
fp.write(' {}'.format(int(100./NsL[ic][1]+0.5)))
fp.write('"\n')
fp.write('export nc_S="{},{}'.format(NsL[0][1],NsL[0][2]))
for ic in range(1,len(NsL)):
fp.write(' {},{}'.format(NsL[ic][1],NsL[ic][2]))
fp.write('" #number of N and c for check\n')
fp.write('export d1={}'.format(args.d1))
fp.close()
# print 'L%d c%s n=%d hc=%d' % (L,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c]
##
print '###mean'
ymean=np.zeros((K**L,T)).astype('float32') #dummy
# for i in range(len(ctoLi)):
# c=str(i)
for i,c in enumerate(ctoLi):
for n in ctoLi[c]:
ymean[i] += Y[n,:]
ymean[i] = ymean[i]/len(ctoLi[c])
# if ctonY[c] > 1:
# plt.plot(x, ymean[i],color=cols[i])
# print 'i=%d c=%s nY=%d hm=%d' % (i,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c]
# plt.show()
###
f=open('ymean.dat','w')
for t in range(T):
f.write('%g' % ymean[0,t])
for i in range(1,len(ctoLi)):
f.write(' %g' % ymean[i,t])
f.write('\n')
f.close()
print 'ymean.dat is saved'
if L >=1:# normalize cumulative deg
degcumlsum=0
for c in range(K**L): #
_c=c*K
degcuml = 1.;
keycuml=str(L-1)+'-'+str(c)
for l in range(1):
# for l in range(L):
_c=_c/K
key=str(L-l-1)+'-'+str(_c)
if key in Lctodeg:
degcuml *= Lctodeg[key]
Lctodegcuml[keycuml]=degcuml
degcumlsum += degcuml
print 'degcuml:L%d-' % (L-1),
for c in range(K**L): #
keycuml=str(L-1)+'-'+str(c)
# import pdb;pdb.set_trace(); #for debug
Lctodegcuml[keycuml] /=degcumlsum
print '(%d)%.3f' % (c,Lctodegcuml[keycuml]),
# print '%s:%.2f' % (keycuml,Lctodegcuml[keycuml]),
print ''
# fnyg=os.path.expanduser(args.fnyg)
### fnyg='{}/{}'.format(args.d1,args.fnyg)
#### fnyg=args.fnyg
#### import pdb;pdb.set_trace(); #for debug
if os.path.isfile(fnyg):
print '#predictable horizons'
yg = np.array(pd.read_csv(fnyg,delim_whitespace=True,dtype=np.float32,header=None))
# plt.plot(x, yg,color=cols[0]) #ground truth
##best yp for ygt
## hb=0
## ib=0
## for i in range(N):
## h=horizon(Y[i],yg,Ey)
## h_all.append(h)
## if h>hb:
## hb=h
## ib=i
## print 'max h(y%d,yg)=%d' % (ib,hb),
h_all_A=np.array(h_all)
LOOH_all_A=np.zeros(len(h_all_A)).astype('float32')
print('max h(yi,yg)={} for i={}'.format(h_all_A.max(),np.where(h_all_A == h_all_A.max())))
print 'deg:', Lctodeg
# import pdb;pdb.set_trace(); #for debug
# plt.plot(x, Y[ib],color=cols[1])
##mean
# for i in range(len(ctoLi)):
# c=str(i)
# import pdb;pdb.set_trace(); #for debug
for i,strc in enumerate(ctoLi):
c=int(strc) #?????
key0=str(L)+'-'+str(c*K)
key1=str(L)+'-'+str(c*K+1)
key=str(L-1)+'-'+strc
h=horizon(ymean[i],yg,Ey)
Lctohg[key]=h
print 'L%d c%s N%d h(Yi,ymean)=%d h(ymean,yg)=%d' % (L,strc,ctonY[strc],ctoh0[strc],h),
if ctonY[strc] >=nth: #tag1
icol=(i+2) % 7
plt.plot(x, ymean[i],color=cols[icol])
#best yp via LOOCV horizon
nLOOH=len(ctoLi[strc])
LOOH=np.zeros(nLOOH).astype('float32') #
for j,n in enumerate(ctoLi[strc]):
for m in ctoLi[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOH[j] += H
LOOH[j] /= (nLOOH-1) #######sum of horizon
LOOH_all_A[n]=LOOH[j]
LOOHmax=LOOH.max() #LOOCVH
# if L==3:
# import pdb;pdb.set_trace(); #for debug
nLOOHc0=len(ctoLic0[strc])
nLOOHc1=len(ctoLic1[strc])
# print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)
if nLOOHc0 >=nth and nLOOHc1 >=nth: #best yp via LOOCV horizon for c0
LOOHc0=np.zeros(nLOOHc0).astype('float32') #
for j,n in enumerate(ctoLic0[strc]):
for m in ctoLic0[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOHc0[j] += H
LOOHc0[j] /= (nLOOHc0-1) #######sum of horizon
# print 'LOOHc0(len=%d)' % nLOOHc0, LOOHc0
LOOHc0max=LOOHc0.max() #LOOCVHc0
#best yp via LOOCV horizon for c1
LOOHc1=np.zeros(nLOOHc1).astype('float32') #
for j,n in enumerate(ctoLic1[strc]):
for m in ctoLic1[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOHc1[j] += H
LOOHc1[j] /= (nLOOHc1-1) #######sum of horizon
# print 'LOOHc1(len=%d)' % nLOOHc1, LOOHc1
LOOHc1max=LOOHc1.max() #LOOCVHc0
####
deg0=float(nLOOHc0)*(LOOHc0max-ctoh0[strc])
deg1=float(nLOOHc1)*(LOOHc1max-ctoh0[strc])
# if (deg0+deg1)==0 == 0:
# print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)
# print 'deg0=%g = %d*(%g-%g)' % (deg0,nLOOHc0,LOOHc0max,ctoh0[strc])
# print 'deg1=%g = %d*(%g-%g)' % (deg1,nLOOHc1,LOOHc1max,ctoh0[strc])
Lctodeg[key0]=deg0/(deg0+deg1)
# import pdb;pdb.set_trace(); #for debug
Lctodeg[key1]=deg1/(deg0+deg1)
else: #if nLOOHc0 >=3 and nLOOHc1 >=2:
if nLOOHc0 >= nth:
Lctodeg[key0]=1 #0.5 #1
else:
Lctodeg[key0]=0
if nLOOHc1 >= nth:
Lctodeg[key1]=1 #0.5 #1
else:
Lctodeg[key1]=0
####
Lhg=[]
for j in range(len(LOOH)):
if LOOH[j] == LOOHmax: #search all maximum
n=ctoLi[strc][j]
h=horizon(Y[n],yg,Ey)
print 'h(y%d,LOO)=%.1f h(y%d,yg)=%.1f' % (n,LOOH.max(),n,h), #n??
ctoLig[strc]=[n] #last ig
Lhg.append(h) ###???use max?
if h>=100.0:
LONGhg=1
print '***',
if len(Lhg)>0:
Lctohg[key]=max(Lhg)
else:
Lctohg[key]=0
####disp degs
if L>=1:
keycuml=str(L-1)+'-'+str(c)
print 'degs:%3f:' % (Lctodegcuml[keycuml]),
_c=c*K
# for l in range(1):
for l in range(L):
_c=_c/K
keyl=str(L-l-1)+'-'+str(_c)
if keyl in Lctodeg:
print '%s:%.2f' % (keyl,Lctodeg[keyl]),
else:
print '%s:?' % (keyl),
# print 'degs=', Lctodeg,
# print 'LOOCVh(yi%d)=%.1f h(yi%d,yg)=%.1f' % (LOOH.argmax(),LOOH.max(),n,horizon(Y[n],yg,Ey)),
# plt.plot(x, Y[n],color="black")
# print ' LOOCVh=%g nLOOH=%d ' % (LOOH,nLOOH),
else: # if ctonY[strc] >=3: tag1
print ' h(,yg)=%.1f' % (horizon(Y[ctoLi[strc][0]],yg,Ey)),
# LOOH=0;nLOOH=0
# print ' LOOCVh=%g nLOOH=%d' % (LOOH,nLOOH),
if L==0:
print ''
else:
print 'iY',ctoLi[strc]
c=strc
Ns=all_id[Sth_id[ctoLi[c]]]
h_Lc=h_all_A[ctoLi[c]]
Nsh=np.concatenate([Ns,h_Lc.reshape((-1,1))],axis=1)
iNsh=np.concatenate([np.array(ctoLi[c]).reshape(-1,1),Nsh],axis=1)
if len(h_Lc)==1:
LOOH=np.ones(1)*(-1) #
iNshh=np.concatenate([iNsh,LOOH.reshape(-1,1)],axis=1)
print('i N s h(yi,yg) h-LOOCV(yi)')
LOOHmax=np.max(LOOH)
hgmax=np.max(h_Lc)
mes=''
mesi=''
mesg=''
for i in range(len(iNshh)):
mes='{} {:3.0f} {:3.0f} {:2.0f}'.format(mes,iNshh[i,0],iNshh[i,1],iNshh[i,2])
if iNshh[i,3]==hgmax:
mes='{} {:3.0f}* '.format(mes,iNshh[i,3])
mesg='{}({:.0f},{:.0f},{:.0f},{:.0f}*,{:.0f})'.format(mesg,iNshh[i,0],iNshh[i,1],iNshh[i,2],iNshh[i,3],iNshh[i,4])
else:
mes='{} {:3.0f}'.format(mes,iNshh[i,3])
if iNshh[i,4]==LOOHmax:
mes='{} {:3.1f}* '.format(mes,iNshh[i,4])
mesi='{}({:.0f},{:.0f},{:.0f},{:.0f},{:.0f}*)'.format(mesi,iNshh[i,0],iNshh[i,1],iNshh[i,2],iNshh[i,3],iNshh[i,4])
Hg.append(iNshh[i,3]) #only last one
if len(iNshh) > 1 and L==Lmax-1:
# import pdb;pdb.set_trace(); #for debug
envN='{}{:.0f},'.format(envN,iNshh[i,1])
Nenv.append(iNshh[i,1])
else:
mes='{} {:3.1f} '.format(mes,iNshh[i,4])
mes+='\n'
# import pdb;pdb.set_trace(); #for debug
print(mes)
nc.append([len(ha),)
ha=LOOH;print('hi-LOOCV@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {}'.format(L,c,len(ha),np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),mesi,args.msg))
if L==0: LOOH_all_A0=LOOH_all_A
#hist,bin_edges=np.histogram(hp,bins=10)
# plt.clf()
# plt.xlim(50,500)
# plt.ylim(0,50)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
# plt.title('hV@L{}c{}'.format(L,c))
# fnfig='{}/hV_L{}c{}.eps'.format(args.d1,L,c)
# plt.savefig(fnfig)
# mylib.myshell('gv {}&'.format(fnfig))
ha=h_Lc;print('h(yi,yg)@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {}'.format(L,c,len(ha),np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),mesg,args.msg))
# plt.clf()
# plt.xlim(50,500)
# plt.ylim(0,50)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
# plt.title('hg@L{}c{}'.format(L,c))
# fnfig='{}/hg_L{}c{}.eps'.format(args.d1,L,c)
# plt.savefig(fnfig)
# mylib.myshell('gv {}&'.format(fnfig))
# import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
# print('h(yi,yg) with min{} max{} mean{:.3g} median{:.3g} std{:.3g}'.format(np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha)))
#print 'np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)',np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)
#np.sum np.nansum Compute sum of elements
#np.prod np.nanprod Compute product of elements
#np.mean np.nanmean Compute mean of elements
#np.std np.nanstd Compute standard deviation
#np.var np.nanvar Compute variance
#np.min np.nanmin Find minimum value
#np.max np.nanmax Find maximum value
#np.argmin np.nanargmin Find index of minimum value
#np.argmax np.nanargmax Find index of maximum value
#np.median np.nanmedian Compute median of elements
#np.percentile np.nanpercentile Compute rank-based statistics of elements
#np.any N/A Evaluate whether any elements are true
#np.all N/A Evaluate whether all elements are true
if DISP:
plt.show()
###
L2ctoLig[str(L)]=ctoLig
###
# for i,strc in enumerate(ctoLig):
# c=int(strc) #
# if strc in ctoLig.keys():
# Li=ctoLig[strc]
# i1=Li[0]+1
# col=colsplt[(2**L+c+1)%9]
# col=colsplt[(L)%9+1]
# lw=(Lmax-L)*2
# lw=2
# lt=2*L+1
# if L==0:
# fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# else:
# fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
####
if L==Lmax-1:
# import pdb;pdb.set_trace(); #for debug
envN=envN[:-1] #remove the last char ','
fp=open('{}/N-L{}.env'.format(args.d1,L),'w')
fp.write('export N=N:{} #{}'.format(int(min(Nenv)),envN))
fp.close()
for Lmax1 in range(1,Lmax):
fpyLc.write('\nset term tgif;set output \"%sL%d.obj\"\n' % (fnyLc,Lmax1))
i1=1;lt=1;lw=2;col=colsplt[0]
fpyLc.write('plot \"'+fnyg+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
for L in range(Lmin,Lmax1+1):
strL=str(L)
if strL in L2ctoLig.keys():
ctoLig=L2ctoLig[str(L)]
for c in range(K**L):
strc=str(c)
if strc in ctoLig.keys():
Li=ctoLig[strc]
i1=Li[0]+1
col=colsplt[(L)%9+1]
col=colsplt[(2**L+c)%9]
lw=(Lmax-L)*2
lw=2
lt=2*L+1
if L==0:
fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# fpyLc.write('plot \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
elif L == Lmax1:
fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# fpyLc.write('\n');
fpyLc.write('\nset term postscript eps enhanced color;set output \"%sL%d.eps\";replot\n' % (fnyLc,Lmax1))
#####
fpyLc.close()
print args.tp0,
Lctodegcuml['-1-0']=1
for L in range(0,Lmax):
c=0;
key=str(L-1)+'-'+str(c)
for c in range(K**L): #clas=0 or 1
degs=[]
hgs=[]
key=str(L-1)+'-'+str(c)
print 'L%s' % (key),
if key in Lctodegcuml:
degs.append(Lctodegcuml[key])
else:
degs.append(0)
if key in Lctohg.keys():
hgs.append(Lctohg[key])
else:
hgs.append(0)
adegs=np.array(degs)
ilist=np.argsort(degs)[::-1]
for i in ilist:
print ' %.3f %.3f' % (hgs[i],degs[i]),
### display the y-t in L-c by gnuplot
print '#class hg deg ...'
print 'Lctohg:',Lctohg
print 'Lc-hg %d ' % (tp0),L2ctoLig
print 'dtodegcuml:',Lctodegcuml
print('#Results2 are saved in d1={}'.format(args.d1));
if __name__ == "__main__":
argv=sys.argv
cmd=''
for a in argv:# for i,a in enumerate(argv):
cmd+=a+' '
print('#start:python {}'.format(cmd))
main()
#
| #!/bin/python
# -*- coding: utf-8 -*-
#
#http://pythondatascience.plavox.info/scikit-learn/%E3%82%AF%E3%83%A9%E3%82%B9%E3%82%BF%E5%88%86%E6%9E%90-k-means
#
#python kmeans2_kuro.py -fn ~/sotu/2017/can2b/tmp/tspSth.dat -K 2 -L 6
#
import sys
import numpy as np
from sklearn.cluster import KMeans
import argparse
import pandas as pd #sudo pip install pandas
import math
import os
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
import matplotlib
import matplotlib.gridspec as gridspec
from matplotlib.colors import Normalize
import mylib
#import os.path
#import os
#script_dir = os.path.abspath(os.path.dirname(__file__))
#os.path.abspath(__file__)
def mypltshow(fnfig,disp=1):
plt.savefig(fnfig);
if disp==1:
myshell("sleep 1;gv "+fnfig+"&");
plt.close() #plt.show()
def horizon(y1,y2,Ey):
T=len(y1)
for t in range(T):
if(abs(y1[t]-y2[t])>Ey):
break;
return t-1
nth=2
nth=3
def main():
#
parser = argparse.ArgumentParser(description='Kmeans')
parser.add_argument('-fnyp',default='',
help='file name of predictions')
parser.add_argument('-fnyg',default='',
help='file name of ground truth')
parser.add_argument('-K', default=2, type=int,
help='number of clusters')
parser.add_argument('--Lmax','-L', default=1, type=int,
help='Level of clustering for hierarchical')
parser.add_argument('-Lt', default=1, type=int,
help='1 for truncation 0 for no trunc for L')
parser.add_argument('-Lmin', default=0, type=int,
help='Minimum Level of clustering for hierarchical')
parser.add_argument('-H', default=100, type=int,
help='Prediction Horizon')
parser.add_argument('-hh', default=10, type=int,
help='additional horizon to classify')
parser.add_argument('-Ey', default=10, type=int,
help='threshold of ey')
parser.add_argument('-m', default=1, type=int,
help='1 for mean, 0 for leave-one-out')
parser.add_argument('-d1', default='.', type=str,
help='directory d1')
parser.add_argument('-DISP', default=1, type=int,
help='1 for DISP, 0 for noDISP')
parser.add_argument('-msg',default='',
help='message to carry')
parser.add_argument('-tp0',default=2000, type=int,
help='prediction start tyme')
args = parser.parse_args()
# import pdb;pdb.set_trace(); #for debug
# import os.path
# fnyp=os.path.expanduser(args.fnyp)
Sth_id = np.array(pd.read_csv('{}/tspSth_id.dat'.format(args.d1),delim_whitespace=True,dtype=np.int32,header=None)).reshape((-1))
all_id = np.array(pd.read_csv('{}/tspall_id.dat'.format(args.d1),delim_whitespace=True,dtype=np.int32,header=None))
tp0=args.tp0
colsplt=["black", "red", "dark-green", "magenta", "green", "light-green", "salmon", "pink", "grey"]
colsplt=["black", "red", "dark-green", "blue", "magenta", "green", "light-green", "salmon", "pink", "grey"]
colsplt=["red", "dark-green", "green", "blue", "green", "light-green", "blue", "cyan", "orange" "salmon", "pink", "magenta", "grey"]
colsplt=["red", "dark-green", "dark-green", "blue", "dark-green", "green", "blue", "cyan", "light-green","orange" "salmon", "pink", "magenta", "grey"]
fnyLc='tmp/yLc'+str(tp0)
fpyLc=open(fnyLc+'.plt','w')
fpyLc.write('set style data lines;set nokey\n')
K=int(args.K)
Lmax=int(args.Lmax)+1
Lmin=int(args.Lmin)
# read dataset
# import pdb;pdb.set_trace(); #for debug
# Y = np.loadtxt(fnyp, delimiter=' ')
# import pdb;pdb.set_trace(); #for debug
fnyp='{}/{}'.format(args.d1,args.fnyp)
if not os.path.isfile(fnyp):
# if len(fnyp) == 0:
print '#fnyp=%s does not exist' % (fnyp)
return
Y = np.array(pd.read_csv(fnyp,delim_whitespace=True,dtype=np.float32,header=None))
Y = Y.T # transpose
N,T=Y.shape #N:number of time series, T:horizon
Ey=args.Ey #threshold for predictable horizon
h_all=[]
# H=np.zeros((L+1,2**L)).astype('float32')
# fnyg=os.path.expanduser(args.fnyg)
fnyg='{}/{}'.format(args.d1,args.fnyg)
# import pdb;pdb.set_trace(); #for debug
if os.path.isfile(fnyg):
print '#predictable horizons'
yg = np.array(pd.read_csv(fnyg,delim_whitespace=True,dtype=np.float32,header=None))
# plt.plot(x, yg,color=cols[0]) #ground truth
##best yp for ygt
hb=0
ib=0
for i in range(N):
h=horizon(Y[i],yg,Ey)
h_all.append(h)
if h>hb:
hb=h
ib=i
h_all_A=np.array(h_all)
print 'max h(y%d,yg)=%d' % (ib,hb)
DISP=args.DISP
fig=plt.figure(figsize=(6,8))
# fig2=plt.figure(figsize=(8,6))
gs=gridspec.GridSpec(4,2)
plt.subplots_adjust(wspace=0.5, hspace=1.0)
# plt.subplots_adjust(wspace=0.5, hspace=0.5)
C=np.zeros((N,Lmax+1)).astype('uint8')
envN='export N=N:'
Nenv=[]
if Lmax == -1:
y=np.zeros((1,H)).astype('float32') #dummy
for n in range(N):
y=np.concatenate((y,Y[n,:H].reshape(1,H)),axis=0)
y=np.delete(y,0,axis=0) #delete dummy
km = KMeans(n_clusters=K, init='k-means++', n_init=10, max_iter=300,tol=0.0001,precompute_distances='auto', verbose=0,random_state=None, copy_x=True, n_jobs=1)
pred = km.fit_predict(y)
else:#hierarchical clustering
LONGhg=0
# import pdb;pdb.set_trace(); #for debug
Lctodeg=dict()
Lctodegcuml=dict()
Lctohg=dict()
L2ctoLig=dict()
hgmaxL=[]
hVmaxL=[]
for L in range(Lmin,Lmax):
ctoh0=dict()
ctonY=dict()
ctoLi=dict()
ctoLic0=dict()
ctoLic1=dict()
ctoLig=dict()
if LONGhg == 1 and args.Lt == 1:
break
l=L
nc=[]
for c in range(K**l): #clas=0 or 1
strc=str(c)
# strc=str(L)+'-'+str(c)
# ctoc[strc]=c
y=np.zeros((1,T)).astype('float32') #dummy
Li=[]
for i in range(N):
if C[i,l] == c:
y=np.concatenate((y,Y[i,:].reshape(1,T)),axis=0)
Li.append(i)
y=np.delete(y,0,axis=0) #delete dummy
# import pdb;pdb.set_trace(); #for debug
nY,T1=y.shape #T1=T
if nY > nth: # for execute k-means cluster >=2:??
h0=T
# usemean=0
if args.m == 1: #use mean
ym = np.mean(y, axis=0)
for n in range(nY):
h=horizon(y[n],ym,Ey)
if h0 > h:
h0=h
else:#leave-one-out
for n1 in range(nY):
for n2 in range(nY):
if n1 != n2:
h=horizon(y[n1],y[n2],Ey)
if h0 > h:
h0=h
ctoh0[strc]=h0
ctonY[strc]=nY
# H[l,c]=h
print 'l c nY h=%3d %3d %3d %3d' % (l,c,nY,h0)
# y=y[:,:h]
# y=y[:,:h+10] ##?
######################## K-means bellow
y=y[:,:h0+args.hh] ##?
if nY >= nth:
km = KMeans(n_clusters=K, init='k-means++', n_init=10, max_iter=300,tol=0.0001,precompute_distances='auto', verbose=0,random_state=None, copy_x=True, n_jobs=1)
pred = km.fit_predict(y)
else:
pred = [0 for i in range(len(y))] #set all y in class 0
ip=0
# Li=[] #List of i
Lic0=[] #List of i
Lic1=[] #List of i
for i in range(N):
if C[i,l] == c:
C[i,l+1] = C[i,l]*K + pred[ip]
# Li.append(i)
if pred[ip] ==0:
Lic0.append(i)
else:
Lic1.append(i)
ip += 1
# if L == 3 and c == 0:
# import pdb;pdb.set_trace(); #for debug
ctoLi[strc]=Li #list of n
ctoLic0[strc]=Lic0 #list of n
ctoLic1[strc]=Lic1 #list of n
#check191212 if L>=1: ###check191212
#check191212 y_=Y[Li,:]
#check191212 y0=Y[Lic0,:]
#check191212 y1=Y[Lic1,:]
#check191212 xax = np.arange(0, Y.shape[1], 1)
#check191212 plt.clf()
#check191212 fig.add_subplot(3,1,1)
#check191212 plt.plot(xax, y_.T)
#check191212 plt.title('L{}c{}'.format(L,c))
#check191212 fig.add_subplot(3,1,2)
#check191212 plt.plot(xax, y0.T)
#check191212 plt.title('L{}c{}-0'.format(L,c))
#check191212 fig.add_subplot(3,1,3)
#check191212 plt.plot(xax, y1.T)
#check191212 plt.title('L{}c{}-1'.format(L,c))
#check191212 plt.pause(0.05) #plt.show() #
#check191212 import pdb;pdb.set_trace(); #for debug
######################## K-means above
else: #if nY > 3: --> nY<=3
# print '#####################nY=%d<2,c=%d,L=%d' % (nY,c,L)
ctoLic0[strc]=Li #list of n
ctoLic1[strc]=[] #list of n
ctoh0[strc]=0
ctonY[strc]=nY
for n in range(N):
if C[n,l] == c:
C[n,l+1] = C[n,l]*K
ctoLi[strc]=[n]
# for L in range(Lmin,Lmax): ##close for L?
#####close L? NO
# import pdb;pdb.set_trace(); #for debug
# if L==3:
# import pdb;pdb.set_trace(); #for debug
if L >=0:
for n in range(N):
print 'n%2d' % (n),
for l in range(0,L+2):
print'%d ' % (C[n,l]),
# print'L%d C%3d ' % (l,C[n,l]),
print ''
for i,strc in enumerate(ctoh0):
c=int(strc) #???????
# import pdb;pdb.set_trace(); #for debug
# for c in cton.keys():
# for i in range(len(ctoLi)):
# c=str(i)
if strc in ctoLi.keys():
print 'c=%d nY=%d h=%d iY' % (c,ctonY[strc],ctoh0[strc]),ctoLi[strc]
print '####'
# import pdb;pdb.set_trace(); #for debug
# cols=matplotlib.colors.cnames.keys()
cols = ["r", "g", "b", "c", "m", "y", "k"]
cols.append(matplotlib.colors.cnames.keys())
x = np.arange(0, T, 1)
# for i in range(len(ctoLi)):
# c=str(i)
NsL=[]
for ic,c in enumerate(ctoLi):
for n in ctoLi[c]:
if ctonY[c] > 1:
y = Y[n,:]
plt.plot(x, y,color=cols[i % 7])
# plt.pause(0.05)
# import pdb;pdb.set_trace(); #for debug
print 'L%d c%s n=%d h0=%d' % (L,c,ctonY[c],ctoh0[c]),
if L==0:
print ''
else:
print(' iY={}'.format(ctoLi[c]))
# Ns=all_id[Sth_id[ctoLi[c]]]
# import pdb;pdb.set_trace(); #for debug
# h_Lc=h_all_A[ctoLi[c]]
# Nsh=np.concatenate([Ns,h_Lc.reshape((-1,1))],axis=1)
# iNsh=np.concatenate([np.array(ctoLi[c]).reshape(-1,1),Nsh],axis=1)
# print('i N s h:\n{}'.format(iNsh))
# import pdb;pdb.set_trace(); #for debug
df=pd.DataFrame(Nsh)
df.to_csv('{}/Nsh-L{}c{}.csv'.format(args.d1,L,c),index=False,sep=' ',header=None)
nNs=Ns.shape[0]
Nss='N:{}'.format(Ns[0,0])
for iN in range(1,nNs):
Nss=Nss+',{}'.format(Ns[iN,0])
NsL.append([Nss,nNs,c])
#
if DISP: plt.show()
if L==Lmax-1:
cols = ["b", "g", "c", "m", "y", "k"]
plt.clf()
# nc1=len(ctoLi)+1
ygdisp=0
nc1=len(ctoLi)+Lmax-2
xax = np.arange(0, Y.shape[1], 1)
if ygdisp==1:
nc1=len(ctoLi)+Lmax-1
fig.add_subplot(nc1,1,1)
plt.plot(xax, yg,linewidth=5,color="r")# plt.plot(xax, yg)
plt.title('yg')
for ic,c in enumerate(ctoLi):
y_=Y[ctoLi[c],:]
# print '#check nc1,1,int(c)+2={},{},{}'.format(nc1,1,int(c)+2)
# import pdb;pdb.set_trace(); #for debug
fig.add_subplot(nc1,1,int(c)+1) #ygdisp=-
# fig.add_subplot(nc1,1,int(c)+2) #ygdisp=1
plt.plot(xax, y_.T,linewidth=1)
# plt.plot(xax, y_.T,linewidth=1,color=cols[ctoLi[c][0]%6])
plt.plot(xax, yg,linewidth=2,linestyle='solid',color='r')
# plt.plot(xax, yg,linewidth=2,linestyle='dashdot',color='r')
plt.title('yp in L{}c{} n{}'.format(L,c,y_.shape[0]))
# plt.pause(0.05) #plt.show() #
fnfig='{}/y_L{}.eps'.format(args.d1,L)
mypltshow(fnfig)
fig=plt.figure(figsize=(4,4))
nc1=len(ctoLi)+1
plt.clf()
# plt.xlim(0,500);plt.ylim(0,500)
# fig.add_subplot(nc1,1,1)
plt.scatter(h_all_A,LOOH_all_A0,s=20, c="w", alpha=1.0, linewidths="2",edgecolors="k");
plt.grid(which='major',color='black',linestyle='--');
n=len(h_all_A)
plt.title('hV vs. hg @L{}c{} n{}'.format(0,0,n))
fnfig='{}/hh_L{}.eps'.format(args.d1,0)
mypltshow(fnfig,1)
for ic,c in enumerate(ctoLi):
plt.clf()
# plt.xlim(0,500); plt.ylim(0,500)
# fig.add_subplot(nc1,1,int(c)+1)
plt.scatter(h_all_A[ctoLi[c]],LOOH_all_A[ctoLi[c]],s=20, c="w", alpha=1.0, linewidths="2",edgecolors="k");
plt.grid(which='major',color='black',linestyle='--');
n=len(ctoLi[c])
plt.title('hV vs. hg@L{}c{} n{}'.format(L,c,n))
fnfig='{}/hh_L{}.eps'.format(args.d1,L)
mypltshow(fnfig,0)
# import pdb;pdb.set_trace(); #for debug
## hh=np.concatenate([h_all_A.reshape(-1,1),LOOH_all_A.reshape(-1,1)],axis=1)
for hD in ['hg','hV']:#
plt.clf()
for ic,c in enumerate(ctoLi):
fig.add_subplot(nc1,1,int(c)+1)
if hD=='hg':
ha=h_all_A[ctoLi[c]]
else:
# import pdb;pdb.set_trace(); #for debug
ha=LOOH_all_A[ctoLi[c]]
plt.xlim(0,500)
# plt.ylim(0,40)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
binmin=0;binmax=400;binwidth=10
plt.hist(ha, bins=np.arange(binmin, binmax + binwidth, binwidth), histtype='barstacked', ec='black')
plt.title('{}@L{}c{} n{}'.format(hD,L,c,len(ha)))
fnfig='{}/{}_L{}.eps'.format(args.d1,hD,L)
mypltshow(fnfig,0)
##################
# import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
# if L>0:
# import pdb;pdb.set_trace(); #for debug
fp=open('{}/Ns-L{}.env'.format(args.d1,L),'w')
fp.write('export N_S="{}'.format(NsL[0][0]))
for ic in range(1,len(NsL)):
fp.write(' {}'.format(NsL[ic][0]))
fp.write('"\nexport n_seed_S="{}'.format(int(100./NsL[0][1]+0.5)))
for ic in range(1,len(NsL)):
fp.write(' {}'.format(int(100./NsL[ic][1]+0.5)))
fp.write('"\n')
fp.write('export nc_S="{},{}'.format(NsL[0][1],NsL[0][2]))
for ic in range(1,len(NsL)):
fp.write(' {},{}'.format(NsL[ic][1],NsL[ic][2]))
fp.write('" #number of N and c for check\n')
fp.write('export d1={}'.format(args.d1))
fp.close()
# print 'L%d c%s n=%d hc=%d' % (L,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c]
##
print '###mean'
ymean=np.zeros((K**L,T)).astype('float32') #dummy
# for i in range(len(ctoLi)):
# c=str(i)
for i,c in enumerate(ctoLi):
for n in ctoLi[c]:
ymean[i] += Y[n,:]
ymean[i] = ymean[i]/len(ctoLi[c])
# if ctonY[c] > 1:
# plt.plot(x, ymean[i],color=cols[i])
# print 'i=%d c=%s nY=%d hm=%d' % (i,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c]
# plt.show()
###
f=open('ymean.dat','w')
for t in range(T):
f.write('%g' % ymean[0,t])
for i in range(1,len(ctoLi)):
f.write(' %g' % ymean[i,t])
f.write('\n')
f.close()
print 'ymean.dat is saved'
if L >=1:# normalize cumulative deg
degcumlsum=0
for c in range(K**L): #
_c=c*K
degcuml = 1.;
keycuml=str(L-1)+'-'+str(c)
for l in range(1):
# for l in range(L):
_c=_c/K
key=str(L-l-1)+'-'+str(_c)
if key in Lctodeg:
degcuml *= Lctodeg[key]
Lctodegcuml[keycuml]=degcuml
degcumlsum += degcuml
print 'degcuml:L%d-' % (L-1),
for c in range(K**L): #
keycuml=str(L-1)+'-'+str(c)
# import pdb;pdb.set_trace(); #for debug
Lctodegcuml[keycuml] /=degcumlsum
print '(%d)%.3f' % (c,Lctodegcuml[keycuml]),
# print '%s:%.2f' % (keycuml,Lctodegcuml[keycuml]),
print ''
# fnyg=os.path.expanduser(args.fnyg)
### fnyg='{}/{}'.format(args.d1,args.fnyg)
#### fnyg=args.fnyg
#### import pdb;pdb.set_trace(); #for debug
if os.path.isfile(fnyg):
print '#predictable horizons'
yg = np.array(pd.read_csv(fnyg,delim_whitespace=True,dtype=np.float32,header=None))
# plt.plot(x, yg,color=cols[0]) #ground truth
##best yp for ygt
## hb=0
## ib=0
## for i in range(N):
## h=horizon(Y[i],yg,Ey)
## h_all.append(h)
## if h>hb:
## hb=h
## ib=i
## print 'max h(y%d,yg)=%d' % (ib,hb),
h_all_A=np.array(h_all)
LOOH_all_A=np.zeros(len(h_all_A)).astype('float32')
print('max h(yi,yg)={} for i={}'.format(h_all_A.max(),np.where(h_all_A == h_all_A.max())))
print 'deg:', Lctodeg
# import pdb;pdb.set_trace(); #for debug
# plt.plot(x, Y[ib],color=cols[1])
##mean
# for i in range(len(ctoLi)):
# c=str(i)
# import pdb;pdb.set_trace(); #for debug
for i,strc in enumerate(ctoLi):
c=int(strc) #?????
key0=str(L)+'-'+str(c*K)
key1=str(L)+'-'+str(c*K+1)
key=str(L-1)+'-'+strc
h=horizon(ymean[i],yg,Ey)
Lctohg[key]=h
print 'L%d c%s N%d h(Yi,ymean)=%d h(ymean,yg)=%d' % (L,strc,ctonY[strc],ctoh0[strc],h),
if ctonY[strc] >=nth: #tag1
icol=(i+2) % 7
plt.plot(x, ymean[i],color=cols[icol])
#best yp via LOOCV horizon
nLOOH=len(ctoLi[strc])
LOOH=np.zeros(nLOOH).astype('float32') #
for j,n in enumerate(ctoLi[strc]):
for m in ctoLi[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOH[j] += H
LOOH[j] /= (nLOOH-1) #######sum of horizon
LOOH_all_A[n]=LOOH[j]
LOOHmax=LOOH.max() #LOOCVH
# if L==3:
# import pdb;pdb.set_trace(); #for debug
nLOOHc0=len(ctoLic0[strc])
nLOOHc1=len(ctoLic1[strc])
# print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)
if nLOOHc0 >=nth and nLOOHc1 >=nth: #best yp via LOOCV horizon for c0
LOOHc0=np.zeros(nLOOHc0).astype('float32') #
for j,n in enumerate(ctoLic0[strc]):
for m in ctoLic0[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOHc0[j] += H
LOOHc0[j] /= (nLOOHc0-1) #######sum of horizon
# print 'LOOHc0(len=%d)' % nLOOHc0, LOOHc0
LOOHc0max=LOOHc0.max() #LOOCVHc0
#best yp via LOOCV horizon for c1
LOOHc1=np.zeros(nLOOHc1).astype('float32') #
for j,n in enumerate(ctoLic1[strc]):
for m in ctoLic1[strc]:
if n != m:
H= horizon(Y[n],Y[m],Ey)
LOOHc1[j] += H
LOOHc1[j] /= (nLOOHc1-1) #######sum of horizon
# print 'LOOHc1(len=%d)' % nLOOHc1, LOOHc1
LOOHc1max=LOOHc1.max() #LOOCVHc0
####
deg0=float(nLOOHc0)*(LOOHc0max-ctoh0[strc])
deg1=float(nLOOHc1)*(LOOHc1max-ctoh0[strc])
# if (deg0+deg1)==0 == 0:
# print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)
# print 'deg0=%g = %d*(%g-%g)' % (deg0,nLOOHc0,LOOHc0max,ctoh0[strc])
# print 'deg1=%g = %d*(%g-%g)' % (deg1,nLOOHc1,LOOHc1max,ctoh0[strc])
Lctodeg[key0]=deg0/(deg0+deg1)
# import pdb;pdb.set_trace(); #for debug
Lctodeg[key1]=deg1/(deg0+deg1)
else: #if nLOOHc0 >=3 and nLOOHc1 >=2:
if nLOOHc0 >= nth:
Lctodeg[key0]=1 #0.5 #1
else:
Lctodeg[key0]=0
if nLOOHc1 >= nth:
Lctodeg[key1]=1 #0.5 #1
else:
Lctodeg[key1]=0
####
Lhg=[]
for j in range(len(LOOH)):
if LOOH[j] == LOOHmax: #search all maximum
n=ctoLi[strc][j]
h=horizon(Y[n],yg,Ey)
print 'h(y%d,LOO)=%.1f h(y%d,yg)=%.1f' % (n,LOOH.max(),n,h), #n??
ctoLig[strc]=[n] #last ig
Lhg.append(h) ###???use max?
if h>=100.0:
LONGhg=1
print '***',
if len(Lhg)>0:
Lctohg[key]=max(Lhg)
else:
Lctohg[key]=0
####disp degs
if L>=1:
keycuml=str(L-1)+'-'+str(c)
print 'degs:%3f:' % (Lctodegcuml[keycuml]),
_c=c*K
# for l in range(1):
for l in range(L):
_c=_c/K
keyl=str(L-l-1)+'-'+str(_c)
if keyl in Lctodeg:
print '%s:%.2f' % (keyl,Lctodeg[keyl]),
else:
print '%s:?' % (keyl),
# print 'degs=', Lctodeg,
# print 'LOOCVh(yi%d)=%.1f h(yi%d,yg)=%.1f' % (LOOH.argmax(),LOOH.max(),n,horizon(Y[n],yg,Ey)),
# plt.plot(x, Y[n],color="black")
# print ' LOOCVh=%g nLOOH=%d ' % (LOOH,nLOOH),
else: # if ctonY[strc] >=3: tag1
print ' h(,yg)=%.1f' % (horizon(Y[ctoLi[strc][0]],yg,Ey)),
# LOOH=0;nLOOH=0
# print ' LOOCVh=%g nLOOH=%d' % (LOOH,nLOOH),
if L==0:
print ''
else:
print 'iY',ctoLi[strc]
c=strc
Ns=all_id[Sth_id[ctoLi[c]]]
h_Lc=h_all_A[ctoLi[c]]
Nsh=np.concatenate([Ns,h_Lc.reshape((-1,1))],axis=1)
iNsh=np.concatenate([np.array(ctoLi[c]).reshape(-1,1),Nsh],axis=1)
if len(h_Lc)==1:
LOOH=np.ones(1)*(-1) #
iNshh=np.concatenate([iNsh,LOOH.reshape(-1,1)],axis=1)
print('i N s h(yi,yg) h-LOOCV(yi)')
LOOHmax=np.max(LOOH)
hgmax=np.max(h_Lc)
mes=''
mesi=''
mesg=''
for i in range(len(iNshh)):
mes='{} {:3.0f} {:3.0f} {:2.0f}'.format(mes,iNshh[i,0],iNshh[i,1],iNshh[i,2])
if iNshh[i,3]==hgmax:
mes='{} {:3.0f}* '.format(mes,iNshh[i,3])
mesg='{}({:.0f},{:.0f},{:.0f},{:.0f}*,{:.0f})'.format(mesg,iNshh[i,0],iNshh[i,1],iNshh[i,2],iNshh[i,3],iNshh[i,4])
else:
mes='{} {:3.0f}'.format(mes,iNshh[i,3])
if iNshh[i,4]==LOOHmax:
mes='{} {:3.1f}* '.format(mes,iNshh[i,4])
mesi='{}({:.0f},{:.0f},{:.0f},{:.0f},{:.0f}*)'.format(mesi,iNshh[i,0],iNshh[i,1],iNshh[i,2],iNshh[i,3],iNshh[i,4])
Hg.append(iNshh[i,3]) #only last one
if len(iNshh) > 1 and L==Lmax-1:
# import pdb;pdb.set_trace(); #for debug
envN='{}{:.0f},'.format(envN,iNshh[i,1])
Nenv.append(iNshh[i,1])
else:
mes='{} {:3.1f} '.format(mes,iNshh[i,4])
mes+='\n'
# import pdb;pdb.set_trace(); #for debug
print(mes)
nc.append([len(ha),)
ha=LOOH;print('hi-LOOCV@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {}'.format(L,c,len(ha),np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),mesi,args.msg))
if L==0: LOOH_all_A0=LOOH_all_A
#hist,bin_edges=np.histogram(hp,bins=10)
# plt.clf()
# plt.xlim(50,500)
# plt.ylim(0,50)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
# plt.title('hV@L{}c{}'.format(L,c))
# fnfig='{}/hV_L{}c{}.eps'.format(args.d1,L,c)
# plt.savefig(fnfig)
# mylib.myshell('gv {}&'.format(fnfig))
ha=h_Lc;print('h(yi,yg)@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {}'.format(L,c,len(ha),np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),mesg,args.msg))
# plt.clf()
# plt.xlim(50,500)
# plt.ylim(0,50)
# plt.hist(ha, bins=20, histtype='barstacked', ec='black')
# plt.title('hg@L{}c{}'.format(L,c))
# fnfig='{}/hg_L{}c{}.eps'.format(args.d1,L,c)
# plt.savefig(fnfig)
# mylib.myshell('gv {}&'.format(fnfig))
# import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
# print('h(yi,yg) with min{} max{} mean{:.3g} median{:.3g} std{:.3g}'.format(np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha)))
#print 'np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)',np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)
#np.sum np.nansum Compute sum of elements
#np.prod np.nanprod Compute product of elements
#np.mean np.nanmean Compute mean of elements
#np.std np.nanstd Compute standard deviation
#np.var np.nanvar Compute variance
#np.min np.nanmin Find minimum value
#np.max np.nanmax Find maximum value
#np.argmin np.nanargmin Find index of minimum value
#np.argmax np.nanargmax Find index of maximum value
#np.median np.nanmedian Compute median of elements
#np.percentile np.nanpercentile Compute rank-based statistics of elements
#np.any N/A Evaluate whether any elements are true
#np.all N/A Evaluate whether all elements are true
if DISP:
plt.show()
###
L2ctoLig[str(L)]=ctoLig
###
# for i,strc in enumerate(ctoLig):
# c=int(strc) #
# if strc in ctoLig.keys():
# Li=ctoLig[strc]
# i1=Li[0]+1
# col=colsplt[(2**L+c+1)%9]
# col=colsplt[(L)%9+1]
# lw=(Lmax-L)*2
# lw=2
# lt=2*L+1
# if L==0:
# fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# else:
# fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
####
if L==Lmax-1:
# import pdb;pdb.set_trace(); #for debug
envN=envN[:-1] #remove the last char ','
fp=open('{}/N-L{}.env'.format(args.d1,L),'w')
fp.write('export N=N:{} #{}'.format(int(min(Nenv)),envN))
fp.close()
for Lmax1 in range(1,Lmax):
fpyLc.write('\nset term tgif;set output \"%sL%d.obj\"\n' % (fnyLc,Lmax1))
i1=1;lt=1;lw=2;col=colsplt[0]
fpyLc.write('plot \"'+fnyg+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
for L in range(Lmin,Lmax1+1):
strL=str(L)
if strL in L2ctoLig.keys():
ctoLig=L2ctoLig[str(L)]
for c in range(K**L):
strc=str(c)
if strc in ctoLig.keys():
Li=ctoLig[strc]
i1=Li[0]+1
col=colsplt[(L)%9+1]
col=colsplt[(2**L+c)%9]
lw=(Lmax-L)*2
lw=2
lt=2*L+1
if L==0:
fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# fpyLc.write('plot \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
elif L == Lmax1:
fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
# fpyLc.write('\n');
fpyLc.write('\nset term postscript eps enhanced color;set output \"%sL%d.eps\";replot\n' % (fnyLc,Lmax1))
#####
fpyLc.close()
print args.tp0,
Lctodegcuml['-1-0']=1
for L in range(0,Lmax):
c=0;
key=str(L-1)+'-'+str(c)
for c in range(K**L): #clas=0 or 1
degs=[]
hgs=[]
key=str(L-1)+'-'+str(c)
print 'L%s' % (key),
if key in Lctodegcuml:
degs.append(Lctodegcuml[key])
else:
degs.append(0)
if key in Lctohg.keys():
hgs.append(Lctohg[key])
else:
hgs.append(0)
adegs=np.array(degs)
ilist=np.argsort(degs)[::-1]
for i in ilist:
print ' %.3f %.3f' % (hgs[i],degs[i]),
### display the y-t in L-c by gnuplot
print '#class hg deg ...'
print 'Lctohg:',Lctohg
print 'Lc-hg %d ' % (tp0),L2ctoLig
print 'dtodegcuml:',Lctodegcuml
print('#Results2 are saved in d1={}'.format(args.d1));
if __name__ == "__main__":
argv=sys.argv
cmd=''
for a in argv:# for i,a in enumerate(argv):
cmd+=a+' '
print('#start:python {}'.format(cmd))
main()
#
| en | 0.18333 | #!/bin/python # -*- coding: utf-8 -*- # #http://pythondatascience.plavox.info/scikit-learn/%E3%82%AF%E3%83%A9%E3%82%B9%E3%82%BF%E5%88%86%E6%9E%90-k-means # #python kmeans2_kuro.py -fn ~/sotu/2017/can2b/tmp/tspSth.dat -K 2 -L 6 # #sudo pip install pandas #import matplotlib.cm as cm #import os.path #import os #script_dir = os.path.abspath(os.path.dirname(__file__)) #os.path.abspath(__file__) #plt.show() # # import pdb;pdb.set_trace(); #for debug # import os.path # fnyp=os.path.expanduser(args.fnyp) # read dataset # import pdb;pdb.set_trace(); #for debug # Y = np.loadtxt(fnyp, delimiter=' ') # import pdb;pdb.set_trace(); #for debug # if len(fnyp) == 0: # transpose #N:number of time series, T:horizon #threshold for predictable horizon # H=np.zeros((L+1,2**L)).astype('float32') # fnyg=os.path.expanduser(args.fnyg) # import pdb;pdb.set_trace(); #for debug # plt.plot(x, yg,color=cols[0]) #ground truth ##best yp for ygt # fig2=plt.figure(figsize=(8,6)) # plt.subplots_adjust(wspace=0.5, hspace=0.5) #dummy #delete dummy #hierarchical clustering # import pdb;pdb.set_trace(); #for debug #clas=0 or 1 # strc=str(L)+'-'+str(c) # ctoc[strc]=c #dummy #delete dummy # import pdb;pdb.set_trace(); #for debug #T1=T # for execute k-means cluster >=2:?? # usemean=0 #use mean #leave-one-out # H[l,c]=h # y=y[:,:h] # y=y[:,:h+10] ##? ######################## K-means bellow ##? #set all y in class 0 # Li=[] #List of i #List of i #List of i # Li.append(i) # if L == 3 and c == 0: # import pdb;pdb.set_trace(); #for debug #list of n #list of n #list of n #check191212 if L>=1: ###check191212 #check191212 y_=Y[Li,:] #check191212 y0=Y[Lic0,:] #check191212 y1=Y[Lic1,:] #check191212 xax = np.arange(0, Y.shape[1], 1) #check191212 plt.clf() #check191212 fig.add_subplot(3,1,1) #check191212 plt.plot(xax, y_.T) #check191212 plt.title('L{}c{}'.format(L,c)) #check191212 fig.add_subplot(3,1,2) #check191212 plt.plot(xax, y0.T) #check191212 plt.title('L{}c{}-0'.format(L,c)) #check191212 fig.add_subplot(3,1,3) #check191212 plt.plot(xax, y1.T) #check191212 plt.title('L{}c{}-1'.format(L,c)) #check191212 plt.pause(0.05) #plt.show() # #check191212 import pdb;pdb.set_trace(); #for debug ######################## K-means above #if nY > 3: --> nY<=3 # print '#####################nY=%d<2,c=%d,L=%d' % (nY,c,L) #list of n #list of n # for L in range(Lmin,Lmax): ##close for L? #####close L? NO # import pdb;pdb.set_trace(); #for debug # if L==3: # import pdb;pdb.set_trace(); #for debug # print'L%d C%3d ' % (l,C[n,l]), #??????? # import pdb;pdb.set_trace(); #for debug # for c in cton.keys(): # for i in range(len(ctoLi)): # c=str(i) ###' # import pdb;pdb.set_trace(); #for debug # cols=matplotlib.colors.cnames.keys() # for i in range(len(ctoLi)): # c=str(i) # plt.pause(0.05) # import pdb;pdb.set_trace(); #for debug # Ns=all_id[Sth_id[ctoLi[c]]] # import pdb;pdb.set_trace(); #for debug # h_Lc=h_all_A[ctoLi[c]] # Nsh=np.concatenate([Ns,h_Lc.reshape((-1,1))],axis=1) # iNsh=np.concatenate([np.array(ctoLi[c]).reshape(-1,1),Nsh],axis=1) # print('i N s h:\n{}'.format(iNsh)) # import pdb;pdb.set_trace(); #for debug # # nc1=len(ctoLi)+1 # plt.plot(xax, yg) # print '#check nc1,1,int(c)+2={},{},{}'.format(nc1,1,int(c)+2) # import pdb;pdb.set_trace(); #for debug #ygdisp=- # fig.add_subplot(nc1,1,int(c)+2) #ygdisp=1 # plt.plot(xax, y_.T,linewidth=1,color=cols[ctoLi[c][0]%6]) # plt.plot(xax, yg,linewidth=2,linestyle='dashdot',color='r') # plt.pause(0.05) #plt.show() # # plt.xlim(0,500);plt.ylim(0,500) # fig.add_subplot(nc1,1,1) # plt.xlim(0,500); plt.ylim(0,500) # fig.add_subplot(nc1,1,int(c)+1) # import pdb;pdb.set_trace(); #for debug ## hh=np.concatenate([h_all_A.reshape(-1,1),LOOH_all_A.reshape(-1,1)],axis=1) # # import pdb;pdb.set_trace(); #for debug # plt.ylim(0,40) # plt.hist(ha, bins=20, histtype='barstacked', ec='black') ################## # import pdb;pdb.set_trace(); #for debug # import pdb;pdb.set_trace(); #for debug # if L>0: # import pdb;pdb.set_trace(); #for debug #number of N and c for check\n') # print 'L%d c%s n=%d hc=%d' % (L,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c] ## ##mean' #dummy # for i in range(len(ctoLi)): # c=str(i) # if ctonY[c] > 1: # plt.plot(x, ymean[i],color=cols[i]) # print 'i=%d c=%s nY=%d hm=%d' % (i,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c] # plt.show() ### # normalize cumulative deg # # for l in range(L): # # import pdb;pdb.set_trace(); #for debug # print '%s:%.2f' % (keycuml,Lctodegcuml[keycuml]), # fnyg=os.path.expanduser(args.fnyg) ### fnyg='{}/{}'.format(args.d1,args.fnyg) #### fnyg=args.fnyg #### import pdb;pdb.set_trace(); #for debug # plt.plot(x, yg,color=cols[0]) #ground truth ##best yp for ygt ## hb=0 ## ib=0 ## for i in range(N): ## h=horizon(Y[i],yg,Ey) ## h_all.append(h) ## if h>hb: ## hb=h ## ib=i ## print 'max h(y%d,yg)=%d' % (ib,hb), # import pdb;pdb.set_trace(); #for debug # plt.plot(x, Y[ib],color=cols[1]) ##mean # for i in range(len(ctoLi)): # c=str(i) # import pdb;pdb.set_trace(); #for debug #????? #tag1 #best yp via LOOCV horizon # #######sum of horizon #LOOCVH # if L==3: # import pdb;pdb.set_trace(); #for debug # print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1) #best yp via LOOCV horizon for c0 # #######sum of horizon # print 'LOOHc0(len=%d)' % nLOOHc0, LOOHc0 #LOOCVHc0 #best yp via LOOCV horizon for c1 # #######sum of horizon # print 'LOOHc1(len=%d)' % nLOOHc1, LOOHc1 #LOOCVHc0 #### # if (deg0+deg1)==0 == 0: # print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1) # print 'deg0=%g = %d*(%g-%g)' % (deg0,nLOOHc0,LOOHc0max,ctoh0[strc]) # print 'deg1=%g = %d*(%g-%g)' % (deg1,nLOOHc1,LOOHc1max,ctoh0[strc]) # import pdb;pdb.set_trace(); #for debug #if nLOOHc0 >=3 and nLOOHc1 >=2: #0.5 #1 #0.5 #1 #### #search all maximum #n?? #last ig ###???use max? ####disp degs # for l in range(1): # print 'degs=', Lctodeg, # print 'LOOCVh(yi%d)=%.1f h(yi%d,yg)=%.1f' % (LOOH.argmax(),LOOH.max(),n,horizon(Y[n],yg,Ey)), # plt.plot(x, Y[n],color="black") # print ' LOOCVh=%g nLOOH=%d ' % (LOOH,nLOOH), # if ctonY[strc] >=3: tag1 # LOOH=0;nLOOH=0 # print ' LOOCVh=%g nLOOH=%d' % (LOOH,nLOOH), # #only last one # import pdb;pdb.set_trace(); #for debug # import pdb;pdb.set_trace(); #for debug #hist,bin_edges=np.histogram(hp,bins=10) # plt.clf() # plt.xlim(50,500) # plt.ylim(0,50) # plt.hist(ha, bins=20, histtype='barstacked', ec='black') # plt.title('hV@L{}c{}'.format(L,c)) # fnfig='{}/hV_L{}c{}.eps'.format(args.d1,L,c) # plt.savefig(fnfig) # mylib.myshell('gv {}&'.format(fnfig)) # plt.clf() # plt.xlim(50,500) # plt.ylim(0,50) # plt.hist(ha, bins=20, histtype='barstacked', ec='black') # plt.title('hg@L{}c{}'.format(L,c)) # fnfig='{}/hg_L{}c{}.eps'.format(args.d1,L,c) # plt.savefig(fnfig) # mylib.myshell('gv {}&'.format(fnfig)) # import pdb;pdb.set_trace(); #for debug # import pdb;pdb.set_trace(); #for debug # print('h(yi,yg) with min{} max{} mean{:.3g} median{:.3g} std{:.3g}'.format(np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha))) #print 'np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)',np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha) #np.sum np.nansum Compute sum of elements #np.prod np.nanprod Compute product of elements #np.mean np.nanmean Compute mean of elements #np.std np.nanstd Compute standard deviation #np.var np.nanvar Compute variance #np.min np.nanmin Find minimum value #np.max np.nanmax Find maximum value #np.argmin np.nanargmin Find index of minimum value #np.argmax np.nanargmax Find index of maximum value #np.median np.nanmedian Compute median of elements #np.percentile np.nanpercentile Compute rank-based statistics of elements #np.any N/A Evaluate whether any elements are true #np.all N/A Evaluate whether all elements are true ### ### # for i,strc in enumerate(ctoLig): # c=int(strc) # # if strc in ctoLig.keys(): # Li=ctoLig[strc] # i1=Li[0]+1 # col=colsplt[(2**L+c+1)%9] # col=colsplt[(L)%9+1] # lw=(Lmax-L)*2 # lw=2 # lt=2*L+1 # if L==0: # fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n')) # else: # fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n')) #### # import pdb;pdb.set_trace(); #for debug #remove the last char ',' #{}'.format(int(min(Nenv)),envN)) # fpyLc.write('plot \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n')) # fpyLc.write('\n'); ##### #clas=0 or 1 ### display the y-t in L-c by gnuplot # for i,a in enumerate(argv): # | 2.400818 | 2 |
generate_sim3C_filter_list.py | sc-zhang/ALLHiC_Eval_Data_Generators | 0 | 6630927 | <gh_stars>0
#!/usr/bin/env python
import os
import sys
import gc
from math import *
import time
import random
# Get position of read based on chr with sam or bam file
def get_read_pos_with_sam_bam_file(sam_bam_file, chr_len_db, bin_size, out_list):
long_bin_size = bin_size.upper()
long_bin_size = long_bin_size.replace('K', '000')
long_bin_size = long_bin_size.replace('M', '000000')
long_bin_size = long_bin_size.replace('G', '000000000')
long_bin_size = int(long_bin_size)
random.seed()
read_on_chr = {}
if sam_bam_file[-3:] == "bam":
f_in = os.popen("samtools view "+sam_bam_file, 'r')
else:
f_in = open(sam_bam_file, 'r')
with open(bin_size.upper()+"_"+out_list, 'w') as fout:
for line in f_in:
if line.strip() == '' or line[0] == '@':
continue
if 'WGS' in line:
fout.write(line.strip().split()[0]+"\n")
continue
data = line.strip().split()
read_id = data[0]
if data[2] == '*' or data[6] == '*':
continue
chr1 = data[2].replace('_pilon', '')
read_pos1 = int(data[3])
if data[6] != '=':
chr2 = data[6].replace('_pilon', '')
else:
chr2 = chr1
read_pos2 = int(data[7])
if chr1 == chr2 and chr1 in chr_len_db:
bin_count_of_chr = int(round((chr_len_db[chr1]*1.0/long_bin_size+0.5)))
pos1_index = int(read_pos1/long_bin_size)
pos2_index = int(read_pos2/long_bin_size)
if (pos1_index+pos2_index) in range(bin_count_of_chr-11, bin_count_of_chr+10):
border = abs(bin_count_of_chr-1-(pos1_index+pos2_index))
if abs(pos1_index-pos2_index) < 4:
border = 0
elif abs(pos1_index-pos2_index) < 7:
border = 1
else:
border = 4.0-int((border+1)/2)
if random.random() > 1.0/(2**border):
fout.write(read_id+'\n')
if read_id in read_on_chr:
read_on_chr.pop(read_id)
continue
read_on_chr[read_id] = [chr1, read_pos1, chr2, read_pos2]
f_in.close()
return read_on_chr
# Get chromosome length
def get_chr_len(chr_list):
chr_len_db = {}
chr_order = []
with open(chr_list, 'r') as f_in:
for line in f_in:
if line.strip() == '':
continue
data = line.strip().split()
chr_order.append(data[0])
chr_len_db[data[0]] = int(data[1])
return chr_len_db, chr_order
# Calc read counts on each bin
def calc_read_count_per_bin(chr_len_db, chr_order, read_on_chr, bin_size):
long_bin_size = bin_size.upper()
long_bin_size = long_bin_size.replace('K', '000')
long_bin_size = long_bin_size.replace('M', '000000')
long_bin_size = long_bin_size.replace('G', '000000000')
long_bin_size = int(long_bin_size)
read_count_per_chr = {}
read_count_whole_genome = {}
bin_offset = [0 for i in range(0, len(chr_order)+1)]
bin_count = [0 for i in range(0, len(chr_order)+1)]
total_bin_count = 0
for chrn in chr_len_db:
bin_count_of_chr = int(round((chr_len_db[chrn]*1.0/long_bin_size+0.5)))
total_bin_count += bin_count_of_chr
bin_count[chr_order.index(chrn)+1] = bin_count_of_chr
read_count_per_chr[chrn] = [[0 for i in range(0, bin_count_of_chr)] for j in range(0, bin_count_of_chr)]
for i in range(0, len(bin_count)):
for j in range(0, i+1):
bin_offset[i] += bin_count[j]
read_count_whole_genome = [[0 for i in range(0, total_bin_count)] for j in range(0, total_bin_count)]
for read in read_on_chr:
chr1, pos1, chr2, pos2 = read_on_chr[read]
if chr1 not in chr_len_db or chr2 not in chr_len_db:
continue
pos1_index = int(pos1/long_bin_size)
pos2_index = int(pos2/long_bin_size)
if chr1 == chr2 and chr1 in read_count_per_chr:
read_count_per_chr[chr1][pos1_index][pos2_index] += 1
read_count_per_chr[chr1][pos2_index][pos1_index] += 1
chr1_index = chr_order.index(chr1)
chr2_index = chr_order.index(chr2)
whole_pos1 = bin_offset[chr1_index] + pos1_index
whole_pos2 = bin_offset[chr2_index] + pos2_index
read_count_whole_genome[whole_pos1][whole_pos2] += 1
read_count_whole_genome[whole_pos2][whole_pos1] += 1
for chrn in read_count_per_chr:
for i in range(0, len(read_count_per_chr[chrn])):
for j in range(0, len(read_count_per_chr[chrn][i])):
if read_count_per_chr[chrn][i][j] != 0:
read_count_per_chr[chrn][i][j] = log(read_count_per_chr[chrn][i][j], 2)
else:
read_count_per_chr[chrn][i][j] = -float('inf')
for i in range(0, len(read_count_whole_genome)):
for j in range(0, len(read_count_whole_genome[i])):
if read_count_whole_genome[i][j] != 0:
read_count_whole_genome[i][j] = log(read_count_whole_genome[i][j], 2)
else:
read_count_whole_genome[i][j] = -float('inf')
return read_count_per_chr, read_count_whole_genome
# Draw heatmap of allhic result with matplotlib
def draw_heatmap(data, chrn, bin_size, ext):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
short_bin_size = bin_size.upper()
if bin_size[-9:] == '000000000':
short_bin_size = bin_size[:-9]+'G'
elif bin_size[-6:] == '000000':
short_bin_size = bin_size[:-6]+'M'
elif bin_size[-3:] == '000':
short_bin_size = bin_size[:-3]+'K'
ax = plt.gca()
if chrn != 'all':
file_prefix = short_bin_size + "_" + chrn
else:
file_prefix = short_bin_size + '_Whole_genome'
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+' Draw '+file_prefix)
with open('log.csv', 'w') as flog:
for i in range(0, len(data)):
for j in range(0, len(data[i])):
flog.write("%s,"%(str(data[i][j])))
flog.write('\n')
# mpl.cm.YlOrRd
cmap = plt.get_cmap('YlOrRd')
cmap.set_over('black')
if chrn != 'all':
hmap = ax.imshow(data, interpolation='nearest', origin='lower', cmap=cmap, aspect='auto')
else:
hmap = ax.imshow(data, interpolation='nearest', cmap=cmap, aspect='auto')
plt.colorbar(mappable=hmap,cax=None, ax=None, shrink=0.5)
plt.tick_params(labelsize=6)
for ticks in ax.get_xticklabels():
ticks.set_rotation(90)
for ticks in ax.get_yticklabels():
ticks.set_rotation(0)
if chrn != 'all':
title = chrn+'_'+short_bin_size
else:
title = 'Whole_genome_'+short_bin_size
plt.xlabel("Bins ("+short_bin_size.lower()+"b per bin)", fontsize=8)
if chrn == 'all':
plt.xticks([])
plt.yticks([])
plt.title(title, y=1.01, fontsize=12)
else:
plt.title(title, y=1.1, fontsize=12)
plt.savefig(file_prefix+'.'+ext, filetype=ext, bbox_inches='tight', dpi=200)
plt.close('all')
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Notice: This script is using for drawing heatmap of the all-hic reasult")
print("Usage: python "+sys.argv[0]+" <sam/bam file> <chr_list> <pic_ext> <out_filter_list>")
print("\t<sam/bam_file> is the sam or bam file filtered by allhic")
print("\t<chr_list> is the file contain ordered chrs and length")
print("\t<pic_ext> is the file type of picture")
print("\t<out_filter_list> is the list of reads to be filtered")
else:
sam_bam_file = sys.argv[1]
chr_list = sys.argv[2]
ext = sys.argv[3]
out_list = sys.argv[4]
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Step 1: Get chromosome length")
chr_len_db, chr_order = get_chr_len(chr_list)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Step 2: Calculating and Drawing heatmap")
bin_size = "150k"
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Get read position based on chromosome")
read_on_chr = get_read_pos_with_sam_bam_file(sam_bam_file, chr_len_db, bin_size, out_list)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Calculating")
read_count_per_chr, read_count_whole_genome = calc_read_count_per_bin(chr_len_db, chr_order, read_on_chr, bin_size)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Drawing heatmap")
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Drawing with bin size "+str(bin_size))
for chrn in read_count_per_chr:
draw_heatmap(read_count_per_chr[chrn], chrn, bin_size, ext)
draw_heatmap(read_count_whole_genome, 'all', bin_size, ext)
del read_count_per_chr, read_count_whole_genome
gc.collect()
del read_on_chr
gc.collect()
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Success")
| #!/usr/bin/env python
import os
import sys
import gc
from math import *
import time
import random
# Get position of read based on chr with sam or bam file
def get_read_pos_with_sam_bam_file(sam_bam_file, chr_len_db, bin_size, out_list):
long_bin_size = bin_size.upper()
long_bin_size = long_bin_size.replace('K', '000')
long_bin_size = long_bin_size.replace('M', '000000')
long_bin_size = long_bin_size.replace('G', '000000000')
long_bin_size = int(long_bin_size)
random.seed()
read_on_chr = {}
if sam_bam_file[-3:] == "bam":
f_in = os.popen("samtools view "+sam_bam_file, 'r')
else:
f_in = open(sam_bam_file, 'r')
with open(bin_size.upper()+"_"+out_list, 'w') as fout:
for line in f_in:
if line.strip() == '' or line[0] == '@':
continue
if 'WGS' in line:
fout.write(line.strip().split()[0]+"\n")
continue
data = line.strip().split()
read_id = data[0]
if data[2] == '*' or data[6] == '*':
continue
chr1 = data[2].replace('_pilon', '')
read_pos1 = int(data[3])
if data[6] != '=':
chr2 = data[6].replace('_pilon', '')
else:
chr2 = chr1
read_pos2 = int(data[7])
if chr1 == chr2 and chr1 in chr_len_db:
bin_count_of_chr = int(round((chr_len_db[chr1]*1.0/long_bin_size+0.5)))
pos1_index = int(read_pos1/long_bin_size)
pos2_index = int(read_pos2/long_bin_size)
if (pos1_index+pos2_index) in range(bin_count_of_chr-11, bin_count_of_chr+10):
border = abs(bin_count_of_chr-1-(pos1_index+pos2_index))
if abs(pos1_index-pos2_index) < 4:
border = 0
elif abs(pos1_index-pos2_index) < 7:
border = 1
else:
border = 4.0-int((border+1)/2)
if random.random() > 1.0/(2**border):
fout.write(read_id+'\n')
if read_id in read_on_chr:
read_on_chr.pop(read_id)
continue
read_on_chr[read_id] = [chr1, read_pos1, chr2, read_pos2]
f_in.close()
return read_on_chr
# Get chromosome length
def get_chr_len(chr_list):
chr_len_db = {}
chr_order = []
with open(chr_list, 'r') as f_in:
for line in f_in:
if line.strip() == '':
continue
data = line.strip().split()
chr_order.append(data[0])
chr_len_db[data[0]] = int(data[1])
return chr_len_db, chr_order
# Calc read counts on each bin
def calc_read_count_per_bin(chr_len_db, chr_order, read_on_chr, bin_size):
long_bin_size = bin_size.upper()
long_bin_size = long_bin_size.replace('K', '000')
long_bin_size = long_bin_size.replace('M', '000000')
long_bin_size = long_bin_size.replace('G', '000000000')
long_bin_size = int(long_bin_size)
read_count_per_chr = {}
read_count_whole_genome = {}
bin_offset = [0 for i in range(0, len(chr_order)+1)]
bin_count = [0 for i in range(0, len(chr_order)+1)]
total_bin_count = 0
for chrn in chr_len_db:
bin_count_of_chr = int(round((chr_len_db[chrn]*1.0/long_bin_size+0.5)))
total_bin_count += bin_count_of_chr
bin_count[chr_order.index(chrn)+1] = bin_count_of_chr
read_count_per_chr[chrn] = [[0 for i in range(0, bin_count_of_chr)] for j in range(0, bin_count_of_chr)]
for i in range(0, len(bin_count)):
for j in range(0, i+1):
bin_offset[i] += bin_count[j]
read_count_whole_genome = [[0 for i in range(0, total_bin_count)] for j in range(0, total_bin_count)]
for read in read_on_chr:
chr1, pos1, chr2, pos2 = read_on_chr[read]
if chr1 not in chr_len_db or chr2 not in chr_len_db:
continue
pos1_index = int(pos1/long_bin_size)
pos2_index = int(pos2/long_bin_size)
if chr1 == chr2 and chr1 in read_count_per_chr:
read_count_per_chr[chr1][pos1_index][pos2_index] += 1
read_count_per_chr[chr1][pos2_index][pos1_index] += 1
chr1_index = chr_order.index(chr1)
chr2_index = chr_order.index(chr2)
whole_pos1 = bin_offset[chr1_index] + pos1_index
whole_pos2 = bin_offset[chr2_index] + pos2_index
read_count_whole_genome[whole_pos1][whole_pos2] += 1
read_count_whole_genome[whole_pos2][whole_pos1] += 1
for chrn in read_count_per_chr:
for i in range(0, len(read_count_per_chr[chrn])):
for j in range(0, len(read_count_per_chr[chrn][i])):
if read_count_per_chr[chrn][i][j] != 0:
read_count_per_chr[chrn][i][j] = log(read_count_per_chr[chrn][i][j], 2)
else:
read_count_per_chr[chrn][i][j] = -float('inf')
for i in range(0, len(read_count_whole_genome)):
for j in range(0, len(read_count_whole_genome[i])):
if read_count_whole_genome[i][j] != 0:
read_count_whole_genome[i][j] = log(read_count_whole_genome[i][j], 2)
else:
read_count_whole_genome[i][j] = -float('inf')
return read_count_per_chr, read_count_whole_genome
# Draw heatmap of allhic result with matplotlib
def draw_heatmap(data, chrn, bin_size, ext):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
short_bin_size = bin_size.upper()
if bin_size[-9:] == '000000000':
short_bin_size = bin_size[:-9]+'G'
elif bin_size[-6:] == '000000':
short_bin_size = bin_size[:-6]+'M'
elif bin_size[-3:] == '000':
short_bin_size = bin_size[:-3]+'K'
ax = plt.gca()
if chrn != 'all':
file_prefix = short_bin_size + "_" + chrn
else:
file_prefix = short_bin_size + '_Whole_genome'
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+' Draw '+file_prefix)
with open('log.csv', 'w') as flog:
for i in range(0, len(data)):
for j in range(0, len(data[i])):
flog.write("%s,"%(str(data[i][j])))
flog.write('\n')
# mpl.cm.YlOrRd
cmap = plt.get_cmap('YlOrRd')
cmap.set_over('black')
if chrn != 'all':
hmap = ax.imshow(data, interpolation='nearest', origin='lower', cmap=cmap, aspect='auto')
else:
hmap = ax.imshow(data, interpolation='nearest', cmap=cmap, aspect='auto')
plt.colorbar(mappable=hmap,cax=None, ax=None, shrink=0.5)
plt.tick_params(labelsize=6)
for ticks in ax.get_xticklabels():
ticks.set_rotation(90)
for ticks in ax.get_yticklabels():
ticks.set_rotation(0)
if chrn != 'all':
title = chrn+'_'+short_bin_size
else:
title = 'Whole_genome_'+short_bin_size
plt.xlabel("Bins ("+short_bin_size.lower()+"b per bin)", fontsize=8)
if chrn == 'all':
plt.xticks([])
plt.yticks([])
plt.title(title, y=1.01, fontsize=12)
else:
plt.title(title, y=1.1, fontsize=12)
plt.savefig(file_prefix+'.'+ext, filetype=ext, bbox_inches='tight', dpi=200)
plt.close('all')
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Notice: This script is using for drawing heatmap of the all-hic reasult")
print("Usage: python "+sys.argv[0]+" <sam/bam file> <chr_list> <pic_ext> <out_filter_list>")
print("\t<sam/bam_file> is the sam or bam file filtered by allhic")
print("\t<chr_list> is the file contain ordered chrs and length")
print("\t<pic_ext> is the file type of picture")
print("\t<out_filter_list> is the list of reads to be filtered")
else:
sam_bam_file = sys.argv[1]
chr_list = sys.argv[2]
ext = sys.argv[3]
out_list = sys.argv[4]
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Step 1: Get chromosome length")
chr_len_db, chr_order = get_chr_len(chr_list)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Step 2: Calculating and Drawing heatmap")
bin_size = "150k"
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Get read position based on chromosome")
read_on_chr = get_read_pos_with_sam_bam_file(sam_bam_file, chr_len_db, bin_size, out_list)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Calculating")
read_count_per_chr, read_count_whole_genome = calc_read_count_per_bin(chr_len_db, chr_order, read_on_chr, bin_size)
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Drawing heatmap")
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Drawing with bin size "+str(bin_size))
for chrn in read_count_per_chr:
draw_heatmap(read_count_per_chr[chrn], chrn, bin_size, ext)
draw_heatmap(read_count_whole_genome, 'all', bin_size, ext)
del read_count_per_chr, read_count_whole_genome
gc.collect()
del read_on_chr
gc.collect()
print(time.strftime('[%H:%M:%S]',time.localtime(time.time()))+" Success") | en | 0.74856 | #!/usr/bin/env python # Get position of read based on chr with sam or bam file # Get chromosome length # Calc read counts on each bin # Draw heatmap of allhic result with matplotlib # mpl.cm.YlOrRd | 2.315361 | 2 |
netket/custom/fermion_operator.py | yannra/netket | 0 | 6630928 | <gh_stars>0
import numpy as np
from numba import jit
from numba.typed import List
from netket.operator._abstract_operator import AbstractOperator
import numbers
class SparseHermitianFermionOperator(AbstractOperator):
def __init__(self, hilbert, prefactors, sites, spins):
self._hilbert = hilbert
self._prefactors = prefactors
self._sites = sites
self._spins = spins
super().__init__()
@property
def size(self):
return self._hilbert.size
@property
def hilbert(self):
return self._hilbert
def get_conn_flattened(self, x, sections):
x = x.astype(np.uint8)
x_primes, mels = self._get_conn_flattened_kernel(
x,
sections,
self._prefactors,
self._sites,
self._spins,
)
pruning = np.nonzero(mels == 0.)[0]
if len(pruning) > 0:
pruning = np.sort(pruning)
self.prune_elements_sections(pruning, sections)
x_primes = np.delete(x_primes, pruning, axis=0)
mels = np.delete(mels, pruning, axis=0)
return x_primes, mels
@staticmethod
@jit(nopython=True)
def prune_elements_sections(pruning, sections):
count = 0
for b in range(sections.shape[0]):
while count < len(pruning):
if pruning[count] >= sections[b]:
break
count += 1
sections[b] -= count
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(x, sections, prefactors, sites, spins):
x_prime = np.empty((x.shape[0]*prefactors.shape[0], x.shape[1]), dtype=np.uint8)
mels = np.empty(x.shape[0]*prefactors.shape[0], dtype=np.complex128)
c = 0
for b in range(x.shape[0]):
for j in range(prefactors.shape[0]):
x_prime[c + j] = np.copy(x[b])
mels[c + j] = prefactors[j]
for i in range(sites.shape[1]-1, -1, -1):
annihilate_site = sites[j, i, 0]
create_site = sites[j, i, 1]
if annihilate_site != create_site:
left_limit = min(annihilate_site, create_site)
right_limit = max(annihilate_site, create_site)
if left_limit+1 < right_limit:
if spins[j,i] == 0:
mels[c + j] *= (-1)**np.sum((x_prime[c + j][left_limit+1:right_limit])%2)
else:
mels[c + j] *= (-1)**np.sum((x_prime[c + j][left_limit+1:right_limit] > 1))
target_occ = x_prime[c+j, annihilate_site]
if spins[j, i] == 0:
if target_occ%2 == 1:
target_occ -= 1
else:
mels[c + j] = 0
break
else:
if target_occ > 1:
target_occ -= 2
else:
mels[c + j] = 0
break
x_prime[c + j, annihilate_site] = target_occ
target_occ = x_prime[c+j, create_site]
if spins[j, i] == 0:
if target_occ%2 == 0:
target_occ += 1
else:
mels[c + j] = 0
break
else:
if target_occ <= 1:
target_occ += 2
else:
mels[c + j] = 0
break
x_prime[c + j, create_site] = target_occ
else:
target_occ = x_prime[c+j, annihilate_site]
if ((spins[j, i] != 0 or target_occ%2 == 0) and (spins[j, i] != 1 or target_occ <= 1)):
mels[c + j] = 0
break
c += prefactors.shape[0]
sections[b] = c
return x_prime, mels
class FermionTotalSpinNumberOperator(SparseHermitianFermionOperator):
def __init__(self, hilbert, prefactor):
self._prefactor = prefactor
super().__init__(hilbert, np.empty(0, dtype=np.complex128), np.empty((0,0,0), dtype=int), np.empty((0,0), dtype=int))
def get_conn_flattened(self, x, sections):
np.copyto(sections, np.arange(x.shape[0])+1)
x = x.astype(np.uint8)
x_primes = np.copy(x)
mels = self._prefactor*np.sum(x_primes==3, axis=1).astype(np.complex128)
pruning = np.nonzero(mels == 0.)[0]
if len(pruning) > 0:
pruning = np.sort(pruning)
self.prune_elements_sections(pruning, sections)
x_primes = np.delete(x_primes, pruning, axis=0)
mels = np.delete(mels, pruning, axis=0)
return x_primes, mels
class FermionSumOperators(AbstractOperator):
def __init__(self, operators):
assert len(operators) > 0
self._operators = operators
for op in operators:
assert(op._hilbert == operators[0].hilbert)
super().__init__()
@property
def size(self):
return self._operators[0].size
@property
def hilbert(self):
return self._operators[0]._hilbert
def get_conn_flattened(self, x, sections):
x_primes = List()
mels = List()
sections_result = List()
total_configs = 0
for op in self._operators:
secs = sections.copy()
x_primes_local, mels_local = op.get_conn_flattened(x, secs)
x_primes.append(x_primes_local)
mels.append(mels_local)
sections_result.append(secs)
total_configs += len(mels_local)
return self.merge_operators(sections, total_configs, x_primes, mels, sections_result)
@staticmethod
@jit(nopython=True)
def merge_operators(sections, total_configs, x_primes, mels, sections_result):
x_primes_final = np.empty((total_configs, x_primes[0].shape[1]), dtype=np.uint8)
mels_final = np.empty(total_configs, dtype=np.complex128)
lower_lim = 0
for j in range(len(sections)):
for i in range(len(x_primes)):
if j == 0:
local_lower_lim = 0
else:
local_lower_lim = sections_result[i][j-1]
local_upper_lim = sections_result[i][j]
upper_lim = lower_lim + (local_upper_lim-local_lower_lim)
x_primes_final[lower_lim:upper_lim,:] = x_primes[i][local_lower_lim:sections_result[i][j],:]
mels_final[lower_lim:upper_lim] = mels[i][local_lower_lim:sections_result[i][j]]
lower_lim = upper_lim
sections[j] = upper_lim
# TODO: remove duplicates?
return x_primes_final, mels_final
| import numpy as np
from numba import jit
from numba.typed import List
from netket.operator._abstract_operator import AbstractOperator
import numbers
class SparseHermitianFermionOperator(AbstractOperator):
def __init__(self, hilbert, prefactors, sites, spins):
self._hilbert = hilbert
self._prefactors = prefactors
self._sites = sites
self._spins = spins
super().__init__()
@property
def size(self):
return self._hilbert.size
@property
def hilbert(self):
return self._hilbert
def get_conn_flattened(self, x, sections):
x = x.astype(np.uint8)
x_primes, mels = self._get_conn_flattened_kernel(
x,
sections,
self._prefactors,
self._sites,
self._spins,
)
pruning = np.nonzero(mels == 0.)[0]
if len(pruning) > 0:
pruning = np.sort(pruning)
self.prune_elements_sections(pruning, sections)
x_primes = np.delete(x_primes, pruning, axis=0)
mels = np.delete(mels, pruning, axis=0)
return x_primes, mels
@staticmethod
@jit(nopython=True)
def prune_elements_sections(pruning, sections):
count = 0
for b in range(sections.shape[0]):
while count < len(pruning):
if pruning[count] >= sections[b]:
break
count += 1
sections[b] -= count
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(x, sections, prefactors, sites, spins):
x_prime = np.empty((x.shape[0]*prefactors.shape[0], x.shape[1]), dtype=np.uint8)
mels = np.empty(x.shape[0]*prefactors.shape[0], dtype=np.complex128)
c = 0
for b in range(x.shape[0]):
for j in range(prefactors.shape[0]):
x_prime[c + j] = np.copy(x[b])
mels[c + j] = prefactors[j]
for i in range(sites.shape[1]-1, -1, -1):
annihilate_site = sites[j, i, 0]
create_site = sites[j, i, 1]
if annihilate_site != create_site:
left_limit = min(annihilate_site, create_site)
right_limit = max(annihilate_site, create_site)
if left_limit+1 < right_limit:
if spins[j,i] == 0:
mels[c + j] *= (-1)**np.sum((x_prime[c + j][left_limit+1:right_limit])%2)
else:
mels[c + j] *= (-1)**np.sum((x_prime[c + j][left_limit+1:right_limit] > 1))
target_occ = x_prime[c+j, annihilate_site]
if spins[j, i] == 0:
if target_occ%2 == 1:
target_occ -= 1
else:
mels[c + j] = 0
break
else:
if target_occ > 1:
target_occ -= 2
else:
mels[c + j] = 0
break
x_prime[c + j, annihilate_site] = target_occ
target_occ = x_prime[c+j, create_site]
if spins[j, i] == 0:
if target_occ%2 == 0:
target_occ += 1
else:
mels[c + j] = 0
break
else:
if target_occ <= 1:
target_occ += 2
else:
mels[c + j] = 0
break
x_prime[c + j, create_site] = target_occ
else:
target_occ = x_prime[c+j, annihilate_site]
if ((spins[j, i] != 0 or target_occ%2 == 0) and (spins[j, i] != 1 or target_occ <= 1)):
mels[c + j] = 0
break
c += prefactors.shape[0]
sections[b] = c
return x_prime, mels
class FermionTotalSpinNumberOperator(SparseHermitianFermionOperator):
def __init__(self, hilbert, prefactor):
self._prefactor = prefactor
super().__init__(hilbert, np.empty(0, dtype=np.complex128), np.empty((0,0,0), dtype=int), np.empty((0,0), dtype=int))
def get_conn_flattened(self, x, sections):
np.copyto(sections, np.arange(x.shape[0])+1)
x = x.astype(np.uint8)
x_primes = np.copy(x)
mels = self._prefactor*np.sum(x_primes==3, axis=1).astype(np.complex128)
pruning = np.nonzero(mels == 0.)[0]
if len(pruning) > 0:
pruning = np.sort(pruning)
self.prune_elements_sections(pruning, sections)
x_primes = np.delete(x_primes, pruning, axis=0)
mels = np.delete(mels, pruning, axis=0)
return x_primes, mels
class FermionSumOperators(AbstractOperator):
def __init__(self, operators):
assert len(operators) > 0
self._operators = operators
for op in operators:
assert(op._hilbert == operators[0].hilbert)
super().__init__()
@property
def size(self):
return self._operators[0].size
@property
def hilbert(self):
return self._operators[0]._hilbert
def get_conn_flattened(self, x, sections):
x_primes = List()
mels = List()
sections_result = List()
total_configs = 0
for op in self._operators:
secs = sections.copy()
x_primes_local, mels_local = op.get_conn_flattened(x, secs)
x_primes.append(x_primes_local)
mels.append(mels_local)
sections_result.append(secs)
total_configs += len(mels_local)
return self.merge_operators(sections, total_configs, x_primes, mels, sections_result)
@staticmethod
@jit(nopython=True)
def merge_operators(sections, total_configs, x_primes, mels, sections_result):
x_primes_final = np.empty((total_configs, x_primes[0].shape[1]), dtype=np.uint8)
mels_final = np.empty(total_configs, dtype=np.complex128)
lower_lim = 0
for j in range(len(sections)):
for i in range(len(x_primes)):
if j == 0:
local_lower_lim = 0
else:
local_lower_lim = sections_result[i][j-1]
local_upper_lim = sections_result[i][j]
upper_lim = lower_lim + (local_upper_lim-local_lower_lim)
x_primes_final[lower_lim:upper_lim,:] = x_primes[i][local_lower_lim:sections_result[i][j],:]
mels_final[lower_lim:upper_lim] = mels[i][local_lower_lim:sections_result[i][j]]
lower_lim = upper_lim
sections[j] = upper_lim
# TODO: remove duplicates?
return x_primes_final, mels_final | gu | 0.065878 | # TODO: remove duplicates? | 2.100439 | 2 |
core/migrations/0002_auto_20191223_1155.py | majestylink/majestyAccencis | 0 | 6630929 | # Generated by Django 2.2.7 on 2019-12-23 10:55
import ckeditor.fields
import core.helpers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HomePageSlider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('active', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Home Page Slider',
'verbose_name_plural': 'Home Page Sliders',
'ordering': ['-updated'],
},
),
migrations.CreateModel(
name='MainPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=100, unique=True)),
('sub_title', models.CharField(blank=True, max_length=255, null=True)),
('img', models.ImageField(blank=True, help_text='Image size should be 922x731 px', null=True, upload_to='pages/img', verbose_name='Main Image')),
('body', ckeditor.fields.RichTextField(blank=True, null=True)),
('vid_file', models.FileField(blank=True, help_text='Upload Video File', null=True, upload_to='blog/videos')),
('youtube_video_id', models.CharField(blank=True, help_text='Youtube Video ID e.g L0I7i_lE5zA. Not Complete Url', max_length=20, null=True)),
('extra_info', tinymce.models.HTMLField(blank=True, null=True)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('on_navigation', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Main Page',
'verbose_name_plural': 'Main Pages',
'ordering': ['-title'],
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('logo', models.ImageField(help_text='Image size is 340x145 px', upload_to='partners')),
('website', models.CharField(help_text='Start with http:// or https://', max_length=200)),
],
),
migrations.CreateModel(
name='SiteInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=100, unique=True)),
('info', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Site Information',
'verbose_name_plural': 'Site Informations',
'ordering': ['-updated'],
},
),
migrations.RenameField(
model_name='address',
old_name='zip',
new_name='zip_code',
),
migrations.AddField(
model_name='userprofile',
name='active',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userprofile',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='bio',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='dob',
field=models.DateField(blank=True, help_text='Date of Birth', null=True),
),
migrations.AddField(
model_name='userprofile',
name='gender',
field=models.CharField(blank=True, choices=[('Male', 'Male'), ('Female', 'Female'), ('Others', 'Others')], default='Others', help_text='Gender', max_length=6, null=True),
),
migrations.AddField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, help_text='Image size is 270x308 px', null=True, upload_to='profile_pics'),
),
migrations.AddField(
model_name='userprofile',
name='payant_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='paystack_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='userprofile',
name='reg_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='state',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='uid',
field=models.CharField(default=core.helpers.getUniqueId, editable=False, max_length=20),
),
migrations.AddField(
model_name='userprofile',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='core.UserProfile'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='SliderImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.ImageField(help_text='Image size is 1900px width and 1267px height', upload_to='sliders/img')),
('header', models.CharField(max_length=100)),
('sub_title', models.CharField(blank=True, max_length=300, null=True)),
('button_1', models.CharField(blank=True, max_length=50, null=True)),
('button_1_url', models.CharField(blank=True, max_length=100, null=True)),
('button_2', models.CharField(blank=True, max_length=50, null=True)),
('button_2_url', models.CharField(blank=True, max_length=100, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('slider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sliders', to='core.HomePageSlider')),
],
options={
'verbose_name': 'Slider Image',
'verbose_name_plural': 'Slider Images',
'ordering': ['-updated'],
},
),
]
| # Generated by Django 2.2.7 on 2019-12-23 10:55
import ckeditor.fields
import core.helpers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HomePageSlider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('active', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Home Page Slider',
'verbose_name_plural': 'Home Page Sliders',
'ordering': ['-updated'],
},
),
migrations.CreateModel(
name='MainPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=100, unique=True)),
('sub_title', models.CharField(blank=True, max_length=255, null=True)),
('img', models.ImageField(blank=True, help_text='Image size should be 922x731 px', null=True, upload_to='pages/img', verbose_name='Main Image')),
('body', ckeditor.fields.RichTextField(blank=True, null=True)),
('vid_file', models.FileField(blank=True, help_text='Upload Video File', null=True, upload_to='blog/videos')),
('youtube_video_id', models.CharField(blank=True, help_text='Youtube Video ID e.g L0I7i_lE5zA. Not Complete Url', max_length=20, null=True)),
('extra_info', tinymce.models.HTMLField(blank=True, null=True)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('on_navigation', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Main Page',
'verbose_name_plural': 'Main Pages',
'ordering': ['-title'],
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('logo', models.ImageField(help_text='Image size is 340x145 px', upload_to='partners')),
('website', models.CharField(help_text='Start with http:// or https://', max_length=200)),
],
),
migrations.CreateModel(
name='SiteInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=100, unique=True)),
('info', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Site Information',
'verbose_name_plural': 'Site Informations',
'ordering': ['-updated'],
},
),
migrations.RenameField(
model_name='address',
old_name='zip',
new_name='zip_code',
),
migrations.AddField(
model_name='userprofile',
name='active',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userprofile',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='bio',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='dob',
field=models.DateField(blank=True, help_text='Date of Birth', null=True),
),
migrations.AddField(
model_name='userprofile',
name='gender',
field=models.CharField(blank=True, choices=[('Male', 'Male'), ('Female', 'Female'), ('Others', 'Others')], default='Others', help_text='Gender', max_length=6, null=True),
),
migrations.AddField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, help_text='Image size is 270x308 px', null=True, upload_to='profile_pics'),
),
migrations.AddField(
model_name='userprofile',
name='payant_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='paystack_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='userprofile',
name='reg_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='state',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='userprofile',
name='uid',
field=models.CharField(default=core.helpers.getUniqueId, editable=False, max_length=20),
),
migrations.AddField(
model_name='userprofile',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='core.UserProfile'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='SliderImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.ImageField(help_text='Image size is 1900px width and 1267px height', upload_to='sliders/img')),
('header', models.CharField(max_length=100)),
('sub_title', models.CharField(blank=True, max_length=300, null=True)),
('button_1', models.CharField(blank=True, max_length=50, null=True)),
('button_1_url', models.CharField(blank=True, max_length=100, null=True)),
('button_2', models.CharField(blank=True, max_length=50, null=True)),
('button_2_url', models.CharField(blank=True, max_length=100, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('slider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sliders', to='core.HomePageSlider')),
],
options={
'verbose_name': 'Slider Image',
'verbose_name_plural': 'Slider Images',
'ordering': ['-updated'],
},
),
]
| en | 0.821583 | # Generated by Django 2.2.7 on 2019-12-23 10:55 | 1.86156 | 2 |
spider/commands/shell.py | fakegit/boris-spider | 68 | 6630930 | # -*- coding: utf-8 -*-
"""
Created on 2020/5/9 12:37 AM
---------
@summary:
---------
@author: Boris
@email: <EMAIL>
"""
import json
import re
import sys
import IPython
from spider import Request
def request(**kwargs):
kwargs.setdefault("proxies", None)
response = Request(**kwargs).get_response()
print(response)
IPython.embed(header="now you can use response")
def fetch_url(url):
request(url=url)
def fetch_curl(curl_args):
"""
解析及抓取curl请求
:param curl_args:
[url, '-H', 'xxx', '-H', 'xxx', '--data-binary', '{"xxx":"xxx"}', '--compressed']
:return:
"""
url = curl_args[0]
curl_args.pop(0)
headers = {}
data = {}
for i in range(0, len(curl_args), 2):
if curl_args[i] == "-H":
regex = "([^:\s]*)[:|\s]*(.*)"
result = re.search(regex, curl_args[i + 1], re.S).groups()
if result[0] in headers:
headers[result[0]] = headers[result[0]] + "&" + result[1]
else:
headers[result[0]] = result[1].strip()
elif curl_args[i] == "--data-binary":
data = json.loads(curl_args[i + 1])
request(url=url, data=data, headers=headers)
def usage():
"""
下载调试器
usage: spider shell [options] [args]
optional arguments:
-u, --url 抓取指定url
-c, --curl 抓取curl格式的请求
"""
print(usage.__doc__)
sys.exit()
def main():
args = sys.argv
if len(args) < 3:
usage()
elif args[1] in ("-h", "--help"):
usage()
elif args[1] in ("-u", "--url"):
fetch_url(args[2])
elif args[1] in ("-c", "--curl"):
fetch_curl(args[2:])
else:
usage()
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
Created on 2020/5/9 12:37 AM
---------
@summary:
---------
@author: Boris
@email: <EMAIL>
"""
import json
import re
import sys
import IPython
from spider import Request
def request(**kwargs):
kwargs.setdefault("proxies", None)
response = Request(**kwargs).get_response()
print(response)
IPython.embed(header="now you can use response")
def fetch_url(url):
request(url=url)
def fetch_curl(curl_args):
"""
解析及抓取curl请求
:param curl_args:
[url, '-H', 'xxx', '-H', 'xxx', '--data-binary', '{"xxx":"xxx"}', '--compressed']
:return:
"""
url = curl_args[0]
curl_args.pop(0)
headers = {}
data = {}
for i in range(0, len(curl_args), 2):
if curl_args[i] == "-H":
regex = "([^:\s]*)[:|\s]*(.*)"
result = re.search(regex, curl_args[i + 1], re.S).groups()
if result[0] in headers:
headers[result[0]] = headers[result[0]] + "&" + result[1]
else:
headers[result[0]] = result[1].strip()
elif curl_args[i] == "--data-binary":
data = json.loads(curl_args[i + 1])
request(url=url, data=data, headers=headers)
def usage():
"""
下载调试器
usage: spider shell [options] [args]
optional arguments:
-u, --url 抓取指定url
-c, --curl 抓取curl格式的请求
"""
print(usage.__doc__)
sys.exit()
def main():
args = sys.argv
if len(args) < 3:
usage()
elif args[1] in ("-h", "--help"):
usage()
elif args[1] in ("-u", "--url"):
fetch_url(args[2])
elif args[1] in ("-c", "--curl"):
fetch_curl(args[2:])
else:
usage()
if __name__ == "__main__":
main()
| zh | 0.193992 | # -*- coding: utf-8 -*- Created on 2020/5/9 12:37 AM --------- @summary: --------- @author: Boris @email: <EMAIL> 解析及抓取curl请求 :param curl_args: [url, '-H', 'xxx', '-H', 'xxx', '--data-binary', '{"xxx":"xxx"}', '--compressed'] :return: 下载调试器 usage: spider shell [options] [args] optional arguments: -u, --url 抓取指定url -c, --curl 抓取curl格式的请求 | 2.920787 | 3 |
Course I/Алгоритмы Python/Part1/семинары/pract5/task1.py | GeorgiyDemo/FA | 27 | 6630931 | """
Реализовать решение квадратного уравнения через дискриминант
"""
import math
class DescrSolver:
def __init__(self, input_str):
self.input_str = input_str
self.parse_exp()
self.get_descriminant()
def is_digital(self, number):
try:
float(number)
return True
except:
return False
def parse_exp(self):
input_str = self.input_str
# Определяем коэффициент a
symbols = 1
return_flag = False
while return_flag == False:
index_a = input_str[:symbols]
if self.is_digital(index_a) == False and symbols != 1:
return_flag = True
symbols += 1
index_a = input_str[: symbols - 2]
# Определяем коэффициент b
symbols = input_str.rindex("x")
buf_index_b = input_str[:symbols]
symbols = 1
return_flag = False
while return_flag == False:
index_b = buf_index_b[symbols:]
if self.is_digital(index_b) == True:
return_flag = True
symbols += 1
# Определяем коэффициент c
symbols = input_str.rindex("=")
buf_index_c = input_str[:symbols]
symbols = 1
return_flag = False
while return_flag == False:
index_c = buf_index_c[symbols:]
if self.is_digital(index_c) == True:
return_flag = True
symbols += 1
print("Коэффициент a =", index_a)
print("Коэффициент b =", index_b)
print("Коэффициент c =", index_c)
self.index_a = float(index_a)
self.index_b = float(index_b)
self.index_c = float(index_c)
def get_descriminant(self):
a = self.index_a
b = self.index_b
c = self.index_c
D = b ** 2 - 4 * a * c
print("D =", D)
if D == 0:
A1 = -b / (2 * a)
A2 = A1
print("\nОтвет: ")
print("Корень А1 =", A1)
print("Корень A2 =", A2)
elif D > 0:
A1 = (-b + math.sqrt(D)) / (2 * a)
A2 = (-b - math.sqrt(D)) / (2 * a)
print("\nОтвет: ")
print("Корень А1 =", A1)
print("Корень A2 =", A2)
else:
print("Нет решения, D < 0")
if __name__ == "__main__":
input_str = input("Введите квадратное выражение вида ax^2+bx+c=0 -> ")
DescrSolver(input_str)
| """
Реализовать решение квадратного уравнения через дискриминант
"""
import math
class DescrSolver:
def __init__(self, input_str):
self.input_str = input_str
self.parse_exp()
self.get_descriminant()
def is_digital(self, number):
try:
float(number)
return True
except:
return False
def parse_exp(self):
input_str = self.input_str
# Определяем коэффициент a
symbols = 1
return_flag = False
while return_flag == False:
index_a = input_str[:symbols]
if self.is_digital(index_a) == False and symbols != 1:
return_flag = True
symbols += 1
index_a = input_str[: symbols - 2]
# Определяем коэффициент b
symbols = input_str.rindex("x")
buf_index_b = input_str[:symbols]
symbols = 1
return_flag = False
while return_flag == False:
index_b = buf_index_b[symbols:]
if self.is_digital(index_b) == True:
return_flag = True
symbols += 1
# Определяем коэффициент c
symbols = input_str.rindex("=")
buf_index_c = input_str[:symbols]
symbols = 1
return_flag = False
while return_flag == False:
index_c = buf_index_c[symbols:]
if self.is_digital(index_c) == True:
return_flag = True
symbols += 1
print("Коэффициент a =", index_a)
print("Коэффициент b =", index_b)
print("Коэффициент c =", index_c)
self.index_a = float(index_a)
self.index_b = float(index_b)
self.index_c = float(index_c)
def get_descriminant(self):
a = self.index_a
b = self.index_b
c = self.index_c
D = b ** 2 - 4 * a * c
print("D =", D)
if D == 0:
A1 = -b / (2 * a)
A2 = A1
print("\nОтвет: ")
print("Корень А1 =", A1)
print("Корень A2 =", A2)
elif D > 0:
A1 = (-b + math.sqrt(D)) / (2 * a)
A2 = (-b - math.sqrt(D)) / (2 * a)
print("\nОтвет: ")
print("Корень А1 =", A1)
print("Корень A2 =", A2)
else:
print("Нет решения, D < 0")
if __name__ == "__main__":
input_str = input("Введите квадратное выражение вида ax^2+bx+c=0 -> ")
DescrSolver(input_str)
| ru | 0.958644 | Реализовать решение квадратного уравнения через дискриминант # Определяем коэффициент a # Определяем коэффициент b # Определяем коэффициент c | 3.747272 | 4 |
app/app.py | dcwangmit01/stock-check | 0 | 6630932 | <filename>app/app.py<gh_stars>0
import logging
import os
import re
from app import utils
log = logging.getLogger(__name__)
class App(object):
_singleton = dict()
_jinja_dict = None
def __init__(self):
# Return a singleton
self.__dict__ = App._singleton
def get_config_dict(self, ctx, list_of_files=[], initial_dict={}):
# Manually cache, since memoization doesn't work with dict values
if App._jinja_dict is not None:
return App._jinja_dict
d = initial_dict
# Make all environment variables starting with 'STOCKCHECK_'
# accessible from the dict.
for k, v in os.environ.items():
if k.startswith('STOCKCHECK_'):
if 'env' not in d:
d['env'] = {}
d['env'][k] = v
# Add the config files as part of the dict
for filename in list_of_files:
m = re.match(r"^(.*)\.yaml$", filename)
assert m is not None, ("Unable to parse config base name from file {}".format(filename))
key = m.group(1)
d[key] = utils.YamlUtils.yaml_dict_from_file(os.path.join(ctx.home, filename))
# Render values containing nested jinja variables
r = utils.JinjaUtils.dict_self_render(d)
# Set the cache
App._jinja_dict = r
return r
| <filename>app/app.py<gh_stars>0
import logging
import os
import re
from app import utils
log = logging.getLogger(__name__)
class App(object):
_singleton = dict()
_jinja_dict = None
def __init__(self):
# Return a singleton
self.__dict__ = App._singleton
def get_config_dict(self, ctx, list_of_files=[], initial_dict={}):
# Manually cache, since memoization doesn't work with dict values
if App._jinja_dict is not None:
return App._jinja_dict
d = initial_dict
# Make all environment variables starting with 'STOCKCHECK_'
# accessible from the dict.
for k, v in os.environ.items():
if k.startswith('STOCKCHECK_'):
if 'env' not in d:
d['env'] = {}
d['env'][k] = v
# Add the config files as part of the dict
for filename in list_of_files:
m = re.match(r"^(.*)\.yaml$", filename)
assert m is not None, ("Unable to parse config base name from file {}".format(filename))
key = m.group(1)
d[key] = utils.YamlUtils.yaml_dict_from_file(os.path.join(ctx.home, filename))
# Render values containing nested jinja variables
r = utils.JinjaUtils.dict_self_render(d)
# Set the cache
App._jinja_dict = r
return r
| en | 0.789333 | # Return a singleton # Manually cache, since memoization doesn't work with dict values # Make all environment variables starting with 'STOCKCHECK_' # accessible from the dict. # Add the config files as part of the dict # Render values containing nested jinja variables # Set the cache | 2.389971 | 2 |
eoncloud_web/biz/urls.py | eoncloud-dev/eoncloud_web | 10 | 6630933 | <filename>eoncloud_web/biz/urls.py
from django.conf.urls import patterns, include, url
from rest_framework.urlpatterns import format_suffix_patterns
from biz.instance import views as instance_view
from biz.image import views as image_view
from biz.network import views as network_view
from biz.lbaas import views as lb_view
from biz.volume import views as volume_view
from biz.floating import views as floating_view
from biz.firewall import views as firewall_view
from biz.forum import views as forums_view
from biz.account import views as account_view
from biz.idc import views as idc_views
from biz.overview import views as overview_views
from biz.backup import views as backup_view
from biz.workflow import views as workflow_view
# various options and configurations
urlpatterns = [
url(r'^settings/monitor/$', instance_view.monitor_settings),
url(r'^settings/resource_types/$', workflow_view.resource_types),
url(r'^settings/data-centers/switch/$', idc_views.switch_list),
]
# instance&flavor
urlpatterns += [
url(r'^management-summary/$', overview_views.summary),
url(r'^init/data_center/$', overview_views.init_data_center),
url(r'^init/flavors/$', overview_views.init_flavors),
url(r'^init/images/$', overview_views.init_images),
url(r'^instances/$', instance_view.InstanceList.as_view()),
url(r'^instances/(?P<pk>[0-9]+)/$', instance_view.InstanceDetail.as_view()),
url(r'^instances/details/(?P<pk>[0-9]+)/$', instance_view.instance_detail_view),
url(r'^instances/status/$', instance_view.instance_status_view),
url(r'^instances/create/$', instance_view.instance_create_view),
url(r'^instances/search/$', instance_view.instance_search_view),
url(r'^instances/(?P<pk>[0-9]+)/action/$', instance_view.instance_action_view),
url(r'^instances/monitor/(?P<url>.*)$', instance_view.monitor_proxy),
url(r'^flavors/$', instance_view.FlavorList.as_view()),
url(r'^flavors/create/$', instance_view.create_flavor),
url(r'^flavors/update/$', instance_view.update_flavor),
url(r'^flavors/batch-delete/$', instance_view.delete_flavors),
url(r'^flavors/(?P<pk>[0-9]+)/$', instance_view.FlavorDetail.as_view()),
]
# image
urlpatterns += format_suffix_patterns([
url(r'^images/$', image_view.ImageList.as_view()),
url(r'^images/(?P<pk>[0-9]+)/$', image_view.ImageDetail.as_view()),
url(r'^images/create/$', image_view.create_image),
url(r'^images/update/$', image_view.update_image),
url(r'^images/batch-delete/$', image_view.delete_images),
])
# network
urlpatterns += format_suffix_patterns([
url(r'^networks/$', network_view.network_list_view),
url(r'^networks/create/$', network_view.network_create_view),
url(r'^networks/update/$', network_view.network_update),
url(r'^networks/status/$', network_view.network_status_view),
url(r'^networks/subnets/$', network_view.subnet_list_view),
url(r'^networks/delete/$', network_view.delete_network),
url(r'^networks/attach-router/$', network_view.attach_network_to_router),
url(r'^networks/detach-router/$', network_view.detach_network_from_router),
url(r'^networks/topology/$', network_view.network_topology_data_view),
])
# router
urlpatterns += format_suffix_patterns([
url(r'^routers/$', network_view.router_list_view),
url(r'^routers/create/$', network_view.router_create_view),
url(r'^routers/delete/$', network_view.router_delete_view),
url(r'^routers/search/$', network_view.router_search_view),
])
# LB
urlpatterns += format_suffix_patterns([
url(r'^lbs/$', lb_view.pool_list_view),
url(r'^lbs/(?P<pk>[0-9]+)/$', lb_view.pool_get_view),
url(r'^lbs/create/$', lb_view.pool_create_view),
url(r'^lbs/delete/$', lb_view.pool_delete_view),
url(r'^lbs/getavmonitor/(?P<pool_id>[0-9]+)/$', lb_view.get_available_monitor_view),
url(r'^lbs/poolmonitoraction/$', lb_view.pool_monitor_association_option_view),
url(r'^lbs/monitors/$', lb_view.pool_monitor_list_view),
url(r'^lbs/monitors/create/$', lb_view.pool_monitor_create_view),
url(r'^lbs/monitors/delete/$', lb_view.pool_monitor_delete_view),
url(r'^lbs/vip/create/$', lb_view.pool_vip_create_view),
url(r'^lbs/vip/update/$', lb_view.pool_vip_create_view),
url(r'^lbs/vip/delete/$', lb_view.pool_vip_delete_view),
url(r'^lbs/vip/floating/$', lb_view.pool_vip_associate_view),
url(r'^lbs/members/(?P<balancer_id>[0-9]+)/$', lb_view.pool_member_list_view),
url(r'^lbs/members/create/$', lb_view.pool_member_create_view),
url(r'^lbs/members/delete/$', lb_view.pool_member_delete_view),
url(r'^lbs/constant/$', lb_view.get_constant_view),
url(r'^lbs/status/$', lb_view.get_status_view),
])
# volume
urlpatterns += format_suffix_patterns([
url(r'^volumes/$', volume_view.volume_list_view),
url(r'^volumes/search/$', volume_view.volume_list_view_by_instance),
url(r'^volumes/create/$', volume_view.volume_create_view),
url(r'^volumes/update/$', volume_view.volume_update_view),
url(r'^volumes/action/$', volume_view.volume_action_view),
url(r'^volumes/status/$', volume_view.volume_status_view),
])
# floating
urlpatterns += format_suffix_patterns([
url(r'^floatings/$', floating_view.list_view),
#url(r'^floatings/search/$', floating_view.volume_list_view_by_instance),
#url(r'^floatings/update/$', floating_view.volume_update_view),
url(r'^floatings/create/$', floating_view.create_view),
url(r'^floatings/action/$', floating_view.floating_action_view),
url(r'^floatings/status/$', floating_view.floating_status_view),
url(r'^floatings/target_list/$', floating_view.floating_ip_target_list_view),
])
# FIREWALL
urlpatterns += format_suffix_patterns([
url(r'^firewall/$', firewall_view.firewall_list_view),
url(r'^firewall/create/$', firewall_view.firewall_create_view),
url(r'^firewall/delete/$', firewall_view.firewall_delete_view),
url(r'^firewall/firewall_rules/(?P<firewall_id>[0-9]+)/$', firewall_view.firewall_rule_list_view),
url(r'^firewall/firewall_rules/create/$', firewall_view.firewall_rule_create_view),
url(r'^firewall/firewall_rules/delete/$', firewall_view.firewall_rule_delete_view),
url(r'^firewall/default_rules/$', firewall_view.firewall_rule_view),
url(r'^firewall/server_change_firewall/$', firewall_view.instance_change_firewall_view),
])
# account
urlpatterns += format_suffix_patterns([
url(r'^account/contract/$', account_view.contract_view),
url(r'^account/quota/$', account_view.quota_view),
url(r'^account/create/$', account_view.create_user),
url(r'^account/is-name-unique/$', account_view.is_username_unique),
url(r'^account/is-email-unique/$', account_view.is_email_unique),
url(r'^account/is-mobile-unique/$', account_view.is_mobile_unique),
url(r'^operation/$', account_view.OperationList.as_view()),
url(r'^operation/filters$', account_view.operation_filters),
url(r'^users/$', account_view.UserList.as_view()),
url(r'^users/active/$', account_view.active_users),
url(r'^users/(?P<pk>[0-9]+)/$', account_view.UserDetail.as_view()),
url(r'^users/initialize/$', account_view.initialize_user),
url(r'^users/deactivate/$', account_view.deactivate_user),
url(r'^users/activate/$', account_view.activate_user),
url(r'^users/change-password/$', account_view.change_password),
url(r'^users/grant-workflow-approve/$', account_view.grant_workflow_approve),
url(r'^users/revoke-workflow-approve/$', account_view.revoke_workflow_approve),
url(r'^users/workflow-approvers/$', account_view.workflow_approvers),
url(r'^quotas/$', account_view.QuotaList.as_view()),
url(r'^quotas/(?P<pk>[0-9]+)/$', account_view.QuotaDetail.as_view()),
url(r'^quotas/batch-create/$', account_view.create_quotas),
url(r'^quotas/create/$', account_view.create_quota),
url(r'^quotas/delete/$', account_view.delete_quota),
url(r'^quota-resource-options/$', account_view.resource_options),
url(r'^notifications/broadcast/$', account_view.broadcast),
url(r'^notifications/data-center-broadcast/$', account_view.data_center_broadcast),
url(r'^notifications/announce/$', account_view.announce),
url(r'^notifications/$', account_view.NotificationList.as_view()),
url(r'^notifications/options/$', account_view.notification_options),
url(r'^notifications/(?P<pk>[0-9]+)/$', account_view.NotificationDetail.as_view()),
url(r'^feeds/$', account_view.FeedList.as_view()),
url(r'^feeds/(?P<pk>[0-9]+)/$', account_view.FeedDetail.as_view()),
url(r'^feeds/(?P<pk>[0-9]+)/mark-read/$', account_view.mark_read),
url(r'^feeds/status/$', account_view.feed_status),
])
# image
urlpatterns += format_suffix_patterns([
url(r'^contracts/$', account_view.ContractList.as_view()),
url(r'^contracts/create$', account_view.create_contract),
url(r'^contracts/update/$', account_view.update_contract),
url(r'^contracts/batch-delete/$', account_view.delete_contracts),
url(r'^contracts/(?P<pk>[0-9]+)/$', account_view.ContractDetail.as_view()),
])
# forum
urlpatterns += format_suffix_patterns([
url(r'^forums/$', forums_view.forum_list_view),
url(r'^forums/create/$', forums_view.forum_create_view),
url(r'^forums/delete/$', forums_view.forum_create_view),
url(r'^forums/close/$', forums_view.forum_close_forum_view),
url(r'^forums/reply/create/$', forums_view.forum_reply_create_view),
url(r'^forums/reply/$', forums_view.forum_reply_list_view),
url(r'^forum-replies/$', forums_view.forum_reply_list_view),
])
# idc
urlpatterns += format_suffix_patterns([
url(r'^data-centers/$', idc_views.DataCenterList.as_view()),
url(r'^data-centers/is-host-unique/$', idc_views.is_host_unique),
url(r'^data-centers/create/$', idc_views.create_data_center),
url(r'^data-centers/update/$', idc_views.update_data_center),
url(r'^data-centers/batch-delete/$', idc_views.delete_data_centers),
url(r'^user-data-centers/$', idc_views.UserDataCenterList.as_view()),
url(r'^user-data-centers/(?P<pk>[0-9]+)/$', idc_views.UserDataCenterDetail.as_view())
])
# backup
urlpatterns += format_suffix_patterns([
url(r'^backup/$', backup_view.BackupList.as_view()),
url(r'^backup/status/$', backup_view.backup_status_view),
url(r'^backup/create/$', backup_view.backup_create_view),
url(r'^backup/action/$', backup_view.backup_action_view),
])
# workflow
urlpatterns += [
url(r'^workflows/$', workflow_view.workflow_list),
url(r'^workflows/define/$', workflow_view.define_workflow),
url(r'^workflows/delete/$', workflow_view.delete_workflow),
url(r'^workflows/set-default/$', workflow_view.set_default_workflow),
url(r'^workflows/cancel-default/$', workflow_view.cancel_default_workflow),
url(r'^workflow-instances/$', workflow_view.flow_instances),
url(r'^workflow-instances/approve/$', workflow_view.approve),
url(r'^workflow-instances/rejected/$', workflow_view.reject),
url(r'^workflow-instances/status/$', workflow_view.workflow_status),
]
| <filename>eoncloud_web/biz/urls.py
from django.conf.urls import patterns, include, url
from rest_framework.urlpatterns import format_suffix_patterns
from biz.instance import views as instance_view
from biz.image import views as image_view
from biz.network import views as network_view
from biz.lbaas import views as lb_view
from biz.volume import views as volume_view
from biz.floating import views as floating_view
from biz.firewall import views as firewall_view
from biz.forum import views as forums_view
from biz.account import views as account_view
from biz.idc import views as idc_views
from biz.overview import views as overview_views
from biz.backup import views as backup_view
from biz.workflow import views as workflow_view
# various options and configurations
urlpatterns = [
url(r'^settings/monitor/$', instance_view.monitor_settings),
url(r'^settings/resource_types/$', workflow_view.resource_types),
url(r'^settings/data-centers/switch/$', idc_views.switch_list),
]
# instance&flavor
urlpatterns += [
url(r'^management-summary/$', overview_views.summary),
url(r'^init/data_center/$', overview_views.init_data_center),
url(r'^init/flavors/$', overview_views.init_flavors),
url(r'^init/images/$', overview_views.init_images),
url(r'^instances/$', instance_view.InstanceList.as_view()),
url(r'^instances/(?P<pk>[0-9]+)/$', instance_view.InstanceDetail.as_view()),
url(r'^instances/details/(?P<pk>[0-9]+)/$', instance_view.instance_detail_view),
url(r'^instances/status/$', instance_view.instance_status_view),
url(r'^instances/create/$', instance_view.instance_create_view),
url(r'^instances/search/$', instance_view.instance_search_view),
url(r'^instances/(?P<pk>[0-9]+)/action/$', instance_view.instance_action_view),
url(r'^instances/monitor/(?P<url>.*)$', instance_view.monitor_proxy),
url(r'^flavors/$', instance_view.FlavorList.as_view()),
url(r'^flavors/create/$', instance_view.create_flavor),
url(r'^flavors/update/$', instance_view.update_flavor),
url(r'^flavors/batch-delete/$', instance_view.delete_flavors),
url(r'^flavors/(?P<pk>[0-9]+)/$', instance_view.FlavorDetail.as_view()),
]
# image
urlpatterns += format_suffix_patterns([
url(r'^images/$', image_view.ImageList.as_view()),
url(r'^images/(?P<pk>[0-9]+)/$', image_view.ImageDetail.as_view()),
url(r'^images/create/$', image_view.create_image),
url(r'^images/update/$', image_view.update_image),
url(r'^images/batch-delete/$', image_view.delete_images),
])
# network
urlpatterns += format_suffix_patterns([
url(r'^networks/$', network_view.network_list_view),
url(r'^networks/create/$', network_view.network_create_view),
url(r'^networks/update/$', network_view.network_update),
url(r'^networks/status/$', network_view.network_status_view),
url(r'^networks/subnets/$', network_view.subnet_list_view),
url(r'^networks/delete/$', network_view.delete_network),
url(r'^networks/attach-router/$', network_view.attach_network_to_router),
url(r'^networks/detach-router/$', network_view.detach_network_from_router),
url(r'^networks/topology/$', network_view.network_topology_data_view),
])
# router
urlpatterns += format_suffix_patterns([
url(r'^routers/$', network_view.router_list_view),
url(r'^routers/create/$', network_view.router_create_view),
url(r'^routers/delete/$', network_view.router_delete_view),
url(r'^routers/search/$', network_view.router_search_view),
])
# LB
urlpatterns += format_suffix_patterns([
url(r'^lbs/$', lb_view.pool_list_view),
url(r'^lbs/(?P<pk>[0-9]+)/$', lb_view.pool_get_view),
url(r'^lbs/create/$', lb_view.pool_create_view),
url(r'^lbs/delete/$', lb_view.pool_delete_view),
url(r'^lbs/getavmonitor/(?P<pool_id>[0-9]+)/$', lb_view.get_available_monitor_view),
url(r'^lbs/poolmonitoraction/$', lb_view.pool_monitor_association_option_view),
url(r'^lbs/monitors/$', lb_view.pool_monitor_list_view),
url(r'^lbs/monitors/create/$', lb_view.pool_monitor_create_view),
url(r'^lbs/monitors/delete/$', lb_view.pool_monitor_delete_view),
url(r'^lbs/vip/create/$', lb_view.pool_vip_create_view),
url(r'^lbs/vip/update/$', lb_view.pool_vip_create_view),
url(r'^lbs/vip/delete/$', lb_view.pool_vip_delete_view),
url(r'^lbs/vip/floating/$', lb_view.pool_vip_associate_view),
url(r'^lbs/members/(?P<balancer_id>[0-9]+)/$', lb_view.pool_member_list_view),
url(r'^lbs/members/create/$', lb_view.pool_member_create_view),
url(r'^lbs/members/delete/$', lb_view.pool_member_delete_view),
url(r'^lbs/constant/$', lb_view.get_constant_view),
url(r'^lbs/status/$', lb_view.get_status_view),
])
# volume
urlpatterns += format_suffix_patterns([
url(r'^volumes/$', volume_view.volume_list_view),
url(r'^volumes/search/$', volume_view.volume_list_view_by_instance),
url(r'^volumes/create/$', volume_view.volume_create_view),
url(r'^volumes/update/$', volume_view.volume_update_view),
url(r'^volumes/action/$', volume_view.volume_action_view),
url(r'^volumes/status/$', volume_view.volume_status_view),
])
# floating
urlpatterns += format_suffix_patterns([
url(r'^floatings/$', floating_view.list_view),
#url(r'^floatings/search/$', floating_view.volume_list_view_by_instance),
#url(r'^floatings/update/$', floating_view.volume_update_view),
url(r'^floatings/create/$', floating_view.create_view),
url(r'^floatings/action/$', floating_view.floating_action_view),
url(r'^floatings/status/$', floating_view.floating_status_view),
url(r'^floatings/target_list/$', floating_view.floating_ip_target_list_view),
])
# FIREWALL
urlpatterns += format_suffix_patterns([
url(r'^firewall/$', firewall_view.firewall_list_view),
url(r'^firewall/create/$', firewall_view.firewall_create_view),
url(r'^firewall/delete/$', firewall_view.firewall_delete_view),
url(r'^firewall/firewall_rules/(?P<firewall_id>[0-9]+)/$', firewall_view.firewall_rule_list_view),
url(r'^firewall/firewall_rules/create/$', firewall_view.firewall_rule_create_view),
url(r'^firewall/firewall_rules/delete/$', firewall_view.firewall_rule_delete_view),
url(r'^firewall/default_rules/$', firewall_view.firewall_rule_view),
url(r'^firewall/server_change_firewall/$', firewall_view.instance_change_firewall_view),
])
# account
urlpatterns += format_suffix_patterns([
url(r'^account/contract/$', account_view.contract_view),
url(r'^account/quota/$', account_view.quota_view),
url(r'^account/create/$', account_view.create_user),
url(r'^account/is-name-unique/$', account_view.is_username_unique),
url(r'^account/is-email-unique/$', account_view.is_email_unique),
url(r'^account/is-mobile-unique/$', account_view.is_mobile_unique),
url(r'^operation/$', account_view.OperationList.as_view()),
url(r'^operation/filters$', account_view.operation_filters),
url(r'^users/$', account_view.UserList.as_view()),
url(r'^users/active/$', account_view.active_users),
url(r'^users/(?P<pk>[0-9]+)/$', account_view.UserDetail.as_view()),
url(r'^users/initialize/$', account_view.initialize_user),
url(r'^users/deactivate/$', account_view.deactivate_user),
url(r'^users/activate/$', account_view.activate_user),
url(r'^users/change-password/$', account_view.change_password),
url(r'^users/grant-workflow-approve/$', account_view.grant_workflow_approve),
url(r'^users/revoke-workflow-approve/$', account_view.revoke_workflow_approve),
url(r'^users/workflow-approvers/$', account_view.workflow_approvers),
url(r'^quotas/$', account_view.QuotaList.as_view()),
url(r'^quotas/(?P<pk>[0-9]+)/$', account_view.QuotaDetail.as_view()),
url(r'^quotas/batch-create/$', account_view.create_quotas),
url(r'^quotas/create/$', account_view.create_quota),
url(r'^quotas/delete/$', account_view.delete_quota),
url(r'^quota-resource-options/$', account_view.resource_options),
url(r'^notifications/broadcast/$', account_view.broadcast),
url(r'^notifications/data-center-broadcast/$', account_view.data_center_broadcast),
url(r'^notifications/announce/$', account_view.announce),
url(r'^notifications/$', account_view.NotificationList.as_view()),
url(r'^notifications/options/$', account_view.notification_options),
url(r'^notifications/(?P<pk>[0-9]+)/$', account_view.NotificationDetail.as_view()),
url(r'^feeds/$', account_view.FeedList.as_view()),
url(r'^feeds/(?P<pk>[0-9]+)/$', account_view.FeedDetail.as_view()),
url(r'^feeds/(?P<pk>[0-9]+)/mark-read/$', account_view.mark_read),
url(r'^feeds/status/$', account_view.feed_status),
])
# image
urlpatterns += format_suffix_patterns([
url(r'^contracts/$', account_view.ContractList.as_view()),
url(r'^contracts/create$', account_view.create_contract),
url(r'^contracts/update/$', account_view.update_contract),
url(r'^contracts/batch-delete/$', account_view.delete_contracts),
url(r'^contracts/(?P<pk>[0-9]+)/$', account_view.ContractDetail.as_view()),
])
# forum
urlpatterns += format_suffix_patterns([
url(r'^forums/$', forums_view.forum_list_view),
url(r'^forums/create/$', forums_view.forum_create_view),
url(r'^forums/delete/$', forums_view.forum_create_view),
url(r'^forums/close/$', forums_view.forum_close_forum_view),
url(r'^forums/reply/create/$', forums_view.forum_reply_create_view),
url(r'^forums/reply/$', forums_view.forum_reply_list_view),
url(r'^forum-replies/$', forums_view.forum_reply_list_view),
])
# idc
urlpatterns += format_suffix_patterns([
url(r'^data-centers/$', idc_views.DataCenterList.as_view()),
url(r'^data-centers/is-host-unique/$', idc_views.is_host_unique),
url(r'^data-centers/create/$', idc_views.create_data_center),
url(r'^data-centers/update/$', idc_views.update_data_center),
url(r'^data-centers/batch-delete/$', idc_views.delete_data_centers),
url(r'^user-data-centers/$', idc_views.UserDataCenterList.as_view()),
url(r'^user-data-centers/(?P<pk>[0-9]+)/$', idc_views.UserDataCenterDetail.as_view())
])
# backup
urlpatterns += format_suffix_patterns([
url(r'^backup/$', backup_view.BackupList.as_view()),
url(r'^backup/status/$', backup_view.backup_status_view),
url(r'^backup/create/$', backup_view.backup_create_view),
url(r'^backup/action/$', backup_view.backup_action_view),
])
# workflow
urlpatterns += [
url(r'^workflows/$', workflow_view.workflow_list),
url(r'^workflows/define/$', workflow_view.define_workflow),
url(r'^workflows/delete/$', workflow_view.delete_workflow),
url(r'^workflows/set-default/$', workflow_view.set_default_workflow),
url(r'^workflows/cancel-default/$', workflow_view.cancel_default_workflow),
url(r'^workflow-instances/$', workflow_view.flow_instances),
url(r'^workflow-instances/approve/$', workflow_view.approve),
url(r'^workflow-instances/rejected/$', workflow_view.reject),
url(r'^workflow-instances/status/$', workflow_view.workflow_status),
]
| en | 0.508781 | # various options and configurations # instance&flavor # image # network # router # LB # volume # floating #url(r'^floatings/search/$', floating_view.volume_list_view_by_instance), #url(r'^floatings/update/$', floating_view.volume_update_view), # FIREWALL # account # image # forum # idc # backup # workflow | 1.757774 | 2 |
scripts/perf/timing.py | mhbliao/rocFFT | 0 | 6630934 | #!/usr/bin/env python3
# a timing script for FFTs and convolutions using OpenMP
import sys, getopt
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
usage = '''A timing script for rocfft
Usage:
\ttiming.py
\t\t-w <string> set working directory for rocfft-rider
\t\t-i <string> directory for dloaded libs (appendable)
\t\t-o <string> name of output file (appendable for dload)
\t\t-D <-1,1> default: -1 (forward). Direction of transform
\t\t-I make transform in-place
\t\t-N <int> number of tests per problem size
\t\t-R set transform to be real/complex or complex/real
\t\t-d <1,2,3> default: dimension of transform
\t\t-x <int> minimum problem size in x direction
\t\t-X <int> maximum problem size in x direction
\t\t-y <int> minimum problem size in y direction
\t\t-Y <int> maximum problem size in Y direction
\t\t-z <int> minimum problem size in z direction
\t\t-Z <int> maximum problem size in Z direction
\t\t-f <string> precision: float(default) or double
\t\t-b <int> batch size
\t\t-g <int> device number
'''
def runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, ntrial,
precision, nbatch, devicenum, logfilename):
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
cmd = []
cmd.append(prog)
cmd.append("--verbose")
cmd.append("0")
if dload:
cmd.append("--lib")
for val in libdir:
cmd.append(val)
cmd.append("-N")
cmd.append(str(ntrial))
cmd.append("--length")
for val in length:
cmd.append(str(val))
print(precision)
if precision == "double":
cmd.append("--double")
cmd.append("-b")
cmd.append(str(nbatch))
cmd.append("--device")
cmd.append(str(devicenum))
ttype = -1
itype = ""
otype = ""
if rcfft:
if (direction == -1):
ttype = 2
itype = 2
otype = 3
if (direction == 1):
ttype = 3
itype = 3
otype = 2
else:
itype = 0
otype = 0
if (direction == -1):
ttype = 0
if (direction == 1):
ttype = 1
cmd.append("-t")
cmd.append(str(ttype))
cmd.append("--itype")
cmd.append(str(itype))
cmd.append("--otype")
cmd.append(str(otype))
print(cmd)
print(" ".join(cmd))
fout = tempfile.TemporaryFile(mode="w+")
proc = subprocess.Popen(cmd, cwd=os.path.join(workingdir,"..",".."),
stdout=fout, stderr=fout,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
vals = []
fout.seek(0)
cout = fout.read()
logfile = open(logfilename, "a")
logfile.write(" ".join(cmd))
logfile.write(cout)
logfile.close()
if rc == 0:
# ferr.seek(0)
# cerr = ferr.read()
searchstr = "Execution gpu time: "
for line in cout.split("\n"):
#print(line)
if line.startswith(searchstr):
vals.append([])
# Line ends with "ms", so remove that.
ms_string = line[len(searchstr): -2]
#print(ms_string)
for val in ms_string.split():
#print(val)
vals[len(vals) - 1].append(1e-3 * float(val))
print("seconds: ", vals)
else:
print("\twell, that didn't work")
print(rc)
print(" ".join(cmd))
return []
fout.close()
return vals
def main(argv):
# Options to determine which binary is to be run:
workingdir = "."
libdir = []
outfilename = []
logfilename = "timing.log"
# GPU device number:
devicenum = 0
# Experiment parameters:
ntrial = 10
# Problem size parameters:
direction = -1
inplace = False
rcfft = False
precision = "float"
dimension = 1
xmin = 2
xmax = 1024
ymin = 2
ymax = 1024
zmin = 2
zmax = 1024
radix = 2
nbatch = 1
try:
opts, args = getopt.getopt(argv,"hb:d:i:D:IN:o:Rw:x:X:y:Y:z:Z:f:r:g:")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-w"):
workingdir = arg
elif opt in ("-o"):
outfilename.append(arg)
elif opt in ("-i"):
libdir.append(arg)
elif opt in ("-g"):
devicenum = int(arg)
elif opt in ("-N"):
ntrial = int(arg)
elif opt in ("-D"):
if(int(arg) in [-1,1]):
direction = int(arg)
else:
print("invalid direction: " + arg)
print(usage)
sys.exit(1)
elif opt in ("-I"):
inplace = True
elif opt in ("-R"):
rcfft = True
elif opt in ("-f"):
if arg not in ["float", "double"]:
print("precision must be float or double")
print(usage)
sys.exit(1)
precision = arg
elif opt in ("-d"):
dimension = int(arg)
if not dimension in {1,2,3}:
print("invalid dimension")
print(usage)
sys.exit(1)
elif opt in ("-x"):
xmin = int(arg)
elif opt in ("-X"):
xmax = int(arg)
elif opt in ("-y"):
ymin = int(arg)
elif opt in ("-Y"):
ymax = int(arg)
elif opt in ("-z"):
zmin = int(arg)
elif opt in ("-Z"):
zmax = int(arg)
elif opt in ("-b"):
nbatch = int(arg)
elif opt in ("-r"):
radix = int(arg)
dload = len(libdir) > 0
if dload:
print("Using dyna-rider")
else:
print("Using normal rider")
print("workingdir: "+ workingdir)
print("outfilename: "+ ",".join(outfilename))
print("libdir: "+ ",".join(libdir))
print("device number: " + str(devicenum))
print("ntrial: " + str(ntrial))
print("dimension: " + str(dimension))
print("xmin: "+ str(xmin) + " xmax: " + str(xmax))
if dimension > 1:
print("ymin: "+ str(ymin) + " ymax: " + str(ymax))
if dimension > 2:
print("zmin: "+ str(zmin) + " zmax: " + str(zmax))
print("direction: " + str(direction))
print("real/complex FFT? " + str(rcfft))
print("in-place? " + str(inplace))
print("batch-size: " + str(nbatch))
print("radix: " + str(radix))
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
if not os.path.isfile(prog):
print("**** Error: unable to find " + prog)
sys.exit(1)
metadatastring = "# " + " ".join(sys.argv) + "\n"
metadatastring += "# "
metadatastring += "dimension"
metadatastring += "\txlength"
if(dimension > 1):
metadatastring += "\tylength"
if(dimension > 2):
metadatastring += "\tzlength"
metadatastring += "\tnbatch"
metadatastring += "\tnsample"
metadatastring += "\tsamples ..."
metadatastring += "\n"
# The log file is stored alongside each data output file.
for idx in range(len(outfilename)):
logfilename = outfilename[idx] + ".log"
if not os.path.exists(os.path.dirname(logfilename)):
os.makedirs(os.path.dirname(logfilename))
print("log filename: " + logfilename)
logfile = open(logfilename, "w+")
logfile.write(metadatastring)
logfile.close()
outfile = open(outfilename[idx], "w+")
outfile.write(metadatastring)
outfile.close()
maxtrial = ntrial * xmax * ymax * zmax
xval = xmin
yval = ymin
zval = zmin
while(xval <= xmax and yval <= ymax and zval <= zmax):
print(xval)
length = [xval]
if dimension > 1:
length.append(yval)
if dimension > 2:
length.append(zval)
#N = max(ntrial, min(maxtrial // (xval * yval * zval), 20)) # FIXME: set upper bound to higher
N = ntrial
print(N)
seconds = runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, N,
precision, nbatch, devicenum, logfilename)
#print(seconds)
for idx, vals in enumerate(seconds):
with open(outfilename[idx], 'a') as outfile:
outfile.write(str(dimension))
outfile.write("\t")
outfile.write(str(xval))
outfile.write("\t")
if(dimension > 1):
outfile.write(str(yval))
outfile.write("\t")
if(dimension > 2):
outfile.write(str(zval))
outfile.write("\t")
outfile.write(str(nbatch))
outfile.write("\t")
outfile.write(str(len(seconds[idx])))
for second in seconds[idx]:
outfile.write("\t")
outfile.write(str(second))
outfile.write("\n")
xval *= radix
if dimension > 1:
yval *= radix
if dimension > 2:
zval *= radix
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python3
# a timing script for FFTs and convolutions using OpenMP
import sys, getopt
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
usage = '''A timing script for rocfft
Usage:
\ttiming.py
\t\t-w <string> set working directory for rocfft-rider
\t\t-i <string> directory for dloaded libs (appendable)
\t\t-o <string> name of output file (appendable for dload)
\t\t-D <-1,1> default: -1 (forward). Direction of transform
\t\t-I make transform in-place
\t\t-N <int> number of tests per problem size
\t\t-R set transform to be real/complex or complex/real
\t\t-d <1,2,3> default: dimension of transform
\t\t-x <int> minimum problem size in x direction
\t\t-X <int> maximum problem size in x direction
\t\t-y <int> minimum problem size in y direction
\t\t-Y <int> maximum problem size in Y direction
\t\t-z <int> minimum problem size in z direction
\t\t-Z <int> maximum problem size in Z direction
\t\t-f <string> precision: float(default) or double
\t\t-b <int> batch size
\t\t-g <int> device number
'''
def runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, ntrial,
precision, nbatch, devicenum, logfilename):
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
cmd = []
cmd.append(prog)
cmd.append("--verbose")
cmd.append("0")
if dload:
cmd.append("--lib")
for val in libdir:
cmd.append(val)
cmd.append("-N")
cmd.append(str(ntrial))
cmd.append("--length")
for val in length:
cmd.append(str(val))
print(precision)
if precision == "double":
cmd.append("--double")
cmd.append("-b")
cmd.append(str(nbatch))
cmd.append("--device")
cmd.append(str(devicenum))
ttype = -1
itype = ""
otype = ""
if rcfft:
if (direction == -1):
ttype = 2
itype = 2
otype = 3
if (direction == 1):
ttype = 3
itype = 3
otype = 2
else:
itype = 0
otype = 0
if (direction == -1):
ttype = 0
if (direction == 1):
ttype = 1
cmd.append("-t")
cmd.append(str(ttype))
cmd.append("--itype")
cmd.append(str(itype))
cmd.append("--otype")
cmd.append(str(otype))
print(cmd)
print(" ".join(cmd))
fout = tempfile.TemporaryFile(mode="w+")
proc = subprocess.Popen(cmd, cwd=os.path.join(workingdir,"..",".."),
stdout=fout, stderr=fout,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
vals = []
fout.seek(0)
cout = fout.read()
logfile = open(logfilename, "a")
logfile.write(" ".join(cmd))
logfile.write(cout)
logfile.close()
if rc == 0:
# ferr.seek(0)
# cerr = ferr.read()
searchstr = "Execution gpu time: "
for line in cout.split("\n"):
#print(line)
if line.startswith(searchstr):
vals.append([])
# Line ends with "ms", so remove that.
ms_string = line[len(searchstr): -2]
#print(ms_string)
for val in ms_string.split():
#print(val)
vals[len(vals) - 1].append(1e-3 * float(val))
print("seconds: ", vals)
else:
print("\twell, that didn't work")
print(rc)
print(" ".join(cmd))
return []
fout.close()
return vals
def main(argv):
# Options to determine which binary is to be run:
workingdir = "."
libdir = []
outfilename = []
logfilename = "timing.log"
# GPU device number:
devicenum = 0
# Experiment parameters:
ntrial = 10
# Problem size parameters:
direction = -1
inplace = False
rcfft = False
precision = "float"
dimension = 1
xmin = 2
xmax = 1024
ymin = 2
ymax = 1024
zmin = 2
zmax = 1024
radix = 2
nbatch = 1
try:
opts, args = getopt.getopt(argv,"hb:d:i:D:IN:o:Rw:x:X:y:Y:z:Z:f:r:g:")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-w"):
workingdir = arg
elif opt in ("-o"):
outfilename.append(arg)
elif opt in ("-i"):
libdir.append(arg)
elif opt in ("-g"):
devicenum = int(arg)
elif opt in ("-N"):
ntrial = int(arg)
elif opt in ("-D"):
if(int(arg) in [-1,1]):
direction = int(arg)
else:
print("invalid direction: " + arg)
print(usage)
sys.exit(1)
elif opt in ("-I"):
inplace = True
elif opt in ("-R"):
rcfft = True
elif opt in ("-f"):
if arg not in ["float", "double"]:
print("precision must be float or double")
print(usage)
sys.exit(1)
precision = arg
elif opt in ("-d"):
dimension = int(arg)
if not dimension in {1,2,3}:
print("invalid dimension")
print(usage)
sys.exit(1)
elif opt in ("-x"):
xmin = int(arg)
elif opt in ("-X"):
xmax = int(arg)
elif opt in ("-y"):
ymin = int(arg)
elif opt in ("-Y"):
ymax = int(arg)
elif opt in ("-z"):
zmin = int(arg)
elif opt in ("-Z"):
zmax = int(arg)
elif opt in ("-b"):
nbatch = int(arg)
elif opt in ("-r"):
radix = int(arg)
dload = len(libdir) > 0
if dload:
print("Using dyna-rider")
else:
print("Using normal rider")
print("workingdir: "+ workingdir)
print("outfilename: "+ ",".join(outfilename))
print("libdir: "+ ",".join(libdir))
print("device number: " + str(devicenum))
print("ntrial: " + str(ntrial))
print("dimension: " + str(dimension))
print("xmin: "+ str(xmin) + " xmax: " + str(xmax))
if dimension > 1:
print("ymin: "+ str(ymin) + " ymax: " + str(ymax))
if dimension > 2:
print("zmin: "+ str(zmin) + " zmax: " + str(zmax))
print("direction: " + str(direction))
print("real/complex FFT? " + str(rcfft))
print("in-place? " + str(inplace))
print("batch-size: " + str(nbatch))
print("radix: " + str(radix))
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
if not os.path.isfile(prog):
print("**** Error: unable to find " + prog)
sys.exit(1)
metadatastring = "# " + " ".join(sys.argv) + "\n"
metadatastring += "# "
metadatastring += "dimension"
metadatastring += "\txlength"
if(dimension > 1):
metadatastring += "\tylength"
if(dimension > 2):
metadatastring += "\tzlength"
metadatastring += "\tnbatch"
metadatastring += "\tnsample"
metadatastring += "\tsamples ..."
metadatastring += "\n"
# The log file is stored alongside each data output file.
for idx in range(len(outfilename)):
logfilename = outfilename[idx] + ".log"
if not os.path.exists(os.path.dirname(logfilename)):
os.makedirs(os.path.dirname(logfilename))
print("log filename: " + logfilename)
logfile = open(logfilename, "w+")
logfile.write(metadatastring)
logfile.close()
outfile = open(outfilename[idx], "w+")
outfile.write(metadatastring)
outfile.close()
maxtrial = ntrial * xmax * ymax * zmax
xval = xmin
yval = ymin
zval = zmin
while(xval <= xmax and yval <= ymax and zval <= zmax):
print(xval)
length = [xval]
if dimension > 1:
length.append(yval)
if dimension > 2:
length.append(zval)
#N = max(ntrial, min(maxtrial // (xval * yval * zval), 20)) # FIXME: set upper bound to higher
N = ntrial
print(N)
seconds = runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, N,
precision, nbatch, devicenum, logfilename)
#print(seconds)
for idx, vals in enumerate(seconds):
with open(outfilename[idx], 'a') as outfile:
outfile.write(str(dimension))
outfile.write("\t")
outfile.write(str(xval))
outfile.write("\t")
if(dimension > 1):
outfile.write(str(yval))
outfile.write("\t")
if(dimension > 2):
outfile.write(str(zval))
outfile.write("\t")
outfile.write(str(nbatch))
outfile.write("\t")
outfile.write(str(len(seconds[idx])))
for second in seconds[idx]:
outfile.write("\t")
outfile.write(str(second))
outfile.write("\n")
xval *= radix
if dimension > 1:
yval *= radix
if dimension > 2:
zval *= radix
if __name__ == "__main__":
main(sys.argv[1:])
| en | 0.600546 | #!/usr/bin/env python3 # a timing script for FFTs and convolutions using OpenMP # regexp package A timing script for rocfft Usage: \ttiming.py \t\t-w <string> set working directory for rocfft-rider \t\t-i <string> directory for dloaded libs (appendable) \t\t-o <string> name of output file (appendable for dload) \t\t-D <-1,1> default: -1 (forward). Direction of transform \t\t-I make transform in-place \t\t-N <int> number of tests per problem size \t\t-R set transform to be real/complex or complex/real \t\t-d <1,2,3> default: dimension of transform \t\t-x <int> minimum problem size in x direction \t\t-X <int> maximum problem size in x direction \t\t-y <int> minimum problem size in y direction \t\t-Y <int> maximum problem size in Y direction \t\t-z <int> minimum problem size in z direction \t\t-Z <int> maximum problem size in Z direction \t\t-f <string> precision: float(default) or double \t\t-b <int> batch size \t\t-g <int> device number # ferr.seek(0) # cerr = ferr.read() #print(line) # Line ends with "ms", so remove that. #print(ms_string) #print(val) # Options to determine which binary is to be run: # GPU device number: # Experiment parameters: # Problem size parameters: # The log file is stored alongside each data output file. #N = max(ntrial, min(maxtrial // (xval * yval * zval), 20)) # FIXME: set upper bound to higher #print(seconds) | 2.318631 | 2 |
abcpy/graphtools.py | vishalbelsare/abcpy | 89 | 6630935 | <gh_stars>10-100
import numpy as np
from abcpy.probabilisticmodels import Hyperparameter, ModelResultingFromOperation
class GraphTools:
"""This class implements all methods that will be called recursively on the graph structure."""
def sample_from_prior(self, model=None, rng=np.random.RandomState()):
"""
Samples values for all random variables of the model.
Commonly used to sample new parameter values on the whole graph.
Parameters
----------
model: abcpy.ProbabilisticModel object
The root model for which sample_from_prior should be called.
rng: Random number generator
Defines the random number generator to be used
"""
if model is None:
model = self.model
# If it was at some point not possible to sample (due to incompatible parameter values provided by the parents), we start from scratch
while not (self._sample_from_prior(model, rng=rng)):
self._reset_flags(model)
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
self._reset_flags(model)
def _sample_from_prior(self, models, is_not_root=False, was_accepted=True, rng=np.random.RandomState()):
"""
Recursive version of sample_from_prior. Commonly called from within sample_from_prior.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which, together with their parents, new parameters will be sampled
is_root: boolean
Whether the probabilistic models provided in models are root models.
was_accepted: boolean
Whether the sampled values for all previous/parent models were accepted.
rng: Random number generator
Defines the random number generator to be used
Returns
-------
boolean:
Whether it was possible to sample new values for all nodes of the graph.
"""
# If it was so far possible to sample parameters for all nodes, the current node as well as its parents are sampled, using depth-first search
if was_accepted:
for model in models:
for parent in model.get_input_models():
if not parent.visited:
parent.visited = True
was_accepted = self._sample_from_prior([parent], is_not_root=True, was_accepted=was_accepted,
rng=rng)
if not was_accepted:
return False
if is_not_root and not (model._forward_simulate_and_store_output(rng=rng)):
return False
model.visited = True
return was_accepted
def _reset_flags(self, models=None):
"""
Resets all flags that say that a probabilistic model has been updated. Commonly used after actions on the whole
graph, to ensure that new actions can take place.
Parameters
----------
models: list of abcpy.ProbabilisticModel
The models for which, together with their parents, the flags should be reset. If no value is provided, the
root models are assumed to be the model of the inference method.
"""
if not models:
models = self.model
# For each model, the flags of the parents get reset recursively.
for model in models:
for parent in model.get_input_models():
self._reset_flags([parent])
model.visited = False
model.calculated_pdf = None
def pdf_of_prior(self, models, parameters, mapping=None, is_root=True):
"""
Calculates the joint probability density function of the prior of the specified models at the given parameter values.
Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which the pdf of their prior should be evaluated
parameters: python list
The parameters at which the pdf should be evaluated
mapping: list of tuples
Defines the mapping of probabilistic models and index in a parameter list.
is_root: boolean
A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly.
Returns
-------
list
The resulting pdf,as well as the next index to be considered in the parameters list.
"""
self.set_parameters(parameters)
result = self._recursion_pdf_of_prior(models, parameters, mapping, is_root)
return result
def _recursion_pdf_of_prior(self, models, parameters, mapping=None, is_root=True):
"""
Calculates the joint probability density function of the prior of the specified models at the given parameter values.
Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which the pdf of their prior should be evaluated
parameters: python list
The parameters at which the pdf should be evaluated
mapping: list of tuples
Defines the mapping of probabilistic models and index in a parameter list.
is_root: boolean
A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly.
Returns
-------
list
The resulting pdf,as well as the next index to be considered in the parameters list.
"""
# At the beginning of calculation, obtain the mapping
if is_root:
mapping, garbage_index = self._get_mapping()
# The pdf of each root model is first calculated separately
result = [1.] * len(models)
for i, model in enumerate(models):
# If the model is not a root model, the pdf of this model, given the prior, should be calculated
if not is_root and not (isinstance(model, ModelResultingFromOperation)):
# Define a helper list which will contain the parameters relevant to the current model for pdf calculation
relevant_parameters = []
for mapped_model, model_index in mapping:
if mapped_model == model:
parameter_index = model_index
# for j in range(model.get_output_dimension()):
relevant_parameters.append(parameters[parameter_index])
# parameter_index+=1
break
if len(relevant_parameters) == 1:
relevant_parameters = relevant_parameters[0]
else:
relevant_parameters = np.array(relevant_parameters)
else:
relevant_parameters = []
# Mark whether the parents of each model have been visited before for this model to avoid repeated calculation.
visited_parents = [False for j in range(len(model.get_input_models()))]
# For each parent, the pdf of this parent has to be calculated as well.
for parent_index, parent in enumerate(model.get_input_models()):
# Only calculate the pdf if the parent has never been visited for this model
if not (visited_parents[parent_index]):
pdf = self._recursion_pdf_of_prior([parent], parameters, mapping=mapping, is_root=False)
input_models = model.get_input_models()
for j in range(len(input_models)):
if input_models[j][0] == parent:
visited_parents[j] = True
result[i] *= pdf
if not is_root:
if model.calculated_pdf is None:
result[i] *= model.pdf(model.get_input_values(), relevant_parameters)
else:
result[i] *= 1
# Multiply the pdfs of all roots together to give an overall pdf.
temporary_result = result
result = 1.
for individual_result in temporary_result:
result *= individual_result
return result
def _get_mapping(self, models=None, index=0, is_not_root=False):
"""Returns a mapping of model and first index corresponding to the outputs in this model in parameter lists.
Parameters
----------
models: list
List of abcpy.ProbabilisticModel objects
index: integer
Next index to be mapped in a parameter list
is_not_root: boolean
Specifies whether the models specified are root models.
Returns
-------
list
A list containing two entries. The first entry corresponds to the mapping of the root models, including their parents. The second entry corresponds to the next index to be considered in a parameter list.
"""
if models is None:
models = self.model
mapping = []
for model in models:
# If this model corresponds to an unvisited free parameter, add it to the mapping
if is_not_root and not model.visited and not (isinstance(model, Hyperparameter)) and not (
isinstance(model, ModelResultingFromOperation)):
mapping.append((model, index))
index += 1 # model.get_output_dimension()
# Add all parents to the mapping, if applicable
for parent in model.get_input_models():
parent_mapping, index = self._get_mapping([parent], index=index, is_not_root=True)
parent.visited = True
for mappings in parent_mapping:
mapping.append(mappings)
model.visited = True
# At the end of the algorithm, reset all flags such that another method can act on the graph freely.
if not is_not_root:
self._reset_flags()
return [mapping, index]
def _get_names_and_parameters(self):
"""
A function returning the name of each model and the corresponding parameters to this model
Returns
-------
list:
Each entry is a tuple, the first entry of which is the name of the model and the second entry is the parameter values associated with it
"""
mapping = self._get_mapping()[0]
return_value = []
for model, index in mapping:
return_value.append(
(model.name, self.accepted_parameters_manager.get_accepted_parameters_bds_values([model])))
return return_value
def get_parameters(self, models=None, is_root=True):
"""
Returns the current values of all free parameters in the model. Commonly used before perturbing the parameters
of the model.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
The models for which, together with their parents, the parameter values should be returned. If no value is
provided, the root models are assumed to be the model of the inference method.
is_root: boolean
Specifies whether the current models are at the root. This ensures that the values corresponding to
simulated observations will not be returned.
Returns
-------
list
A list containing all currently sampled values of the free parameters.
"""
parameters = []
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
# If we are not at the root, the sampled values for the current node should be returned
if is_root == False and not isinstance(model, (ModelResultingFromOperation, Hyperparameter)):
parameters.append(model.get_stored_output_values())
model.visited = True
# Implement a depth-first search to return also the sampled values associated with each parent of the model
for parent in model.get_input_models():
if not parent.visited:
parameters += self.get_parameters(models=[parent], is_root=False)
parent.visited = True
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return parameters
def set_parameters(self, parameters, models=None, index=0, is_root=True):
"""
Sets new values for the currently used values of each random variable.
Commonly used after perturbing the parameter values using a kernel.
Parameters
----------
parameters: list
Defines the values to which the respective parameter values of the models should be set
model: list of abcpy.ProbabilisticModel objects
Defines all models for which, together with their parents, new values should be set. If no value is provided, the root models are assumed to be the model of the inference method.
index: integer
The current index to be considered in the parameters list
is_root: boolean
Defines whether the current models are at the root. This ensures that only values corresponding to random variables will be set.
Returns
-------
list: [boolean, integer]
Returns whether it was possible to set all parameters and the next index to be considered in the parameters list.
"""
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
# New parameters should only be set in case we are not at the root
if not is_root and not isinstance(model, ModelResultingFromOperation):
# new_output_values = np.array(parameters[index:index + model.get_output_dimension()])
new_output_values = np.array(parameters[index]).reshape(-1, )
if not model.set_output_values(new_output_values):
return [False, index]
index += 1 # model.get_output_dimension()
model.visited = True
# New parameters for all parents are set using a depth-first search
for parent in model.get_input_models():
if not parent.visited and not isinstance(parent, Hyperparameter):
is_set, index = self.set_parameters(parameters, models=[parent], index=index, is_root=False)
if not is_set:
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return [False, index]
model.visited = True
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return [True, index]
def get_correct_ordering(self, parameters_and_models, models=None, is_root=True):
"""
Orders the parameters returned by a kernel in the order required by the graph.
Commonly used when perturbing the parameters.
Parameters
----------
parameters_and_models: list of tuples
Contains tuples containing as the first entry the probabilistic model to be considered and as the second entry the parameter values associated with this model
models: list
Contains the root probabilistic models that make up the graph. If no value is provided, the root models are assumed to be the model of the inference method.
Returns
-------
list
The ordering which can be used by recursive functions on the graph.
"""
ordered_parameters = []
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
if not model.visited:
model.visited = True
# Check all entries in parameters_and_models to determine whether the current model is contained within it
for corresponding_model, parameter in parameters_and_models:
if corresponding_model == model:
for param in parameter:
ordered_parameters.append(param)
break
# Recursively order all the parents of the current model
for parent in model.get_input_models():
if not parent.visited:
parent_ordering = self.get_correct_ordering(parameters_and_models, models=[parent],
is_root=False)
for parent_parameters in parent_ordering:
ordered_parameters.append(parent_parameters)
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return ordered_parameters
def simulate(self, n_samples_per_param, rng=np.random.RandomState(), npc=None):
"""Simulates data of each model using the currently sampled or perturbed parameters.
Parameters
----------
rng: random number generator
The random number generator to be used.
Returns
-------
list
Each entry corresponds to the simulated data of one model.
"""
result = []
for model in self.model:
parameters_compatible = model._check_input(model.get_input_values())
if parameters_compatible:
if npc is not None and npc.communicator().Get_size() > 1:
simulation_result = npc.run_nested(model.forward_simulate, model.get_input_values(),
n_samples_per_param, rng=rng)
else:
simulation_result = model.forward_simulate(model.get_input_values(), n_samples_per_param, rng=rng)
result.append(simulation_result)
else:
return None
return result
| import numpy as np
from abcpy.probabilisticmodels import Hyperparameter, ModelResultingFromOperation
class GraphTools:
"""This class implements all methods that will be called recursively on the graph structure."""
def sample_from_prior(self, model=None, rng=np.random.RandomState()):
"""
Samples values for all random variables of the model.
Commonly used to sample new parameter values on the whole graph.
Parameters
----------
model: abcpy.ProbabilisticModel object
The root model for which sample_from_prior should be called.
rng: Random number generator
Defines the random number generator to be used
"""
if model is None:
model = self.model
# If it was at some point not possible to sample (due to incompatible parameter values provided by the parents), we start from scratch
while not (self._sample_from_prior(model, rng=rng)):
self._reset_flags(model)
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
self._reset_flags(model)
def _sample_from_prior(self, models, is_not_root=False, was_accepted=True, rng=np.random.RandomState()):
"""
Recursive version of sample_from_prior. Commonly called from within sample_from_prior.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which, together with their parents, new parameters will be sampled
is_root: boolean
Whether the probabilistic models provided in models are root models.
was_accepted: boolean
Whether the sampled values for all previous/parent models were accepted.
rng: Random number generator
Defines the random number generator to be used
Returns
-------
boolean:
Whether it was possible to sample new values for all nodes of the graph.
"""
# If it was so far possible to sample parameters for all nodes, the current node as well as its parents are sampled, using depth-first search
if was_accepted:
for model in models:
for parent in model.get_input_models():
if not parent.visited:
parent.visited = True
was_accepted = self._sample_from_prior([parent], is_not_root=True, was_accepted=was_accepted,
rng=rng)
if not was_accepted:
return False
if is_not_root and not (model._forward_simulate_and_store_output(rng=rng)):
return False
model.visited = True
return was_accepted
def _reset_flags(self, models=None):
"""
Resets all flags that say that a probabilistic model has been updated. Commonly used after actions on the whole
graph, to ensure that new actions can take place.
Parameters
----------
models: list of abcpy.ProbabilisticModel
The models for which, together with their parents, the flags should be reset. If no value is provided, the
root models are assumed to be the model of the inference method.
"""
if not models:
models = self.model
# For each model, the flags of the parents get reset recursively.
for model in models:
for parent in model.get_input_models():
self._reset_flags([parent])
model.visited = False
model.calculated_pdf = None
def pdf_of_prior(self, models, parameters, mapping=None, is_root=True):
"""
Calculates the joint probability density function of the prior of the specified models at the given parameter values.
Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which the pdf of their prior should be evaluated
parameters: python list
The parameters at which the pdf should be evaluated
mapping: list of tuples
Defines the mapping of probabilistic models and index in a parameter list.
is_root: boolean
A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly.
Returns
-------
list
The resulting pdf,as well as the next index to be considered in the parameters list.
"""
self.set_parameters(parameters)
result = self._recursion_pdf_of_prior(models, parameters, mapping, is_root)
return result
def _recursion_pdf_of_prior(self, models, parameters, mapping=None, is_root=True):
"""
Calculates the joint probability density function of the prior of the specified models at the given parameter values.
Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
Defines the models for which the pdf of their prior should be evaluated
parameters: python list
The parameters at which the pdf should be evaluated
mapping: list of tuples
Defines the mapping of probabilistic models and index in a parameter list.
is_root: boolean
A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly.
Returns
-------
list
The resulting pdf,as well as the next index to be considered in the parameters list.
"""
# At the beginning of calculation, obtain the mapping
if is_root:
mapping, garbage_index = self._get_mapping()
# The pdf of each root model is first calculated separately
result = [1.] * len(models)
for i, model in enumerate(models):
# If the model is not a root model, the pdf of this model, given the prior, should be calculated
if not is_root and not (isinstance(model, ModelResultingFromOperation)):
# Define a helper list which will contain the parameters relevant to the current model for pdf calculation
relevant_parameters = []
for mapped_model, model_index in mapping:
if mapped_model == model:
parameter_index = model_index
# for j in range(model.get_output_dimension()):
relevant_parameters.append(parameters[parameter_index])
# parameter_index+=1
break
if len(relevant_parameters) == 1:
relevant_parameters = relevant_parameters[0]
else:
relevant_parameters = np.array(relevant_parameters)
else:
relevant_parameters = []
# Mark whether the parents of each model have been visited before for this model to avoid repeated calculation.
visited_parents = [False for j in range(len(model.get_input_models()))]
# For each parent, the pdf of this parent has to be calculated as well.
for parent_index, parent in enumerate(model.get_input_models()):
# Only calculate the pdf if the parent has never been visited for this model
if not (visited_parents[parent_index]):
pdf = self._recursion_pdf_of_prior([parent], parameters, mapping=mapping, is_root=False)
input_models = model.get_input_models()
for j in range(len(input_models)):
if input_models[j][0] == parent:
visited_parents[j] = True
result[i] *= pdf
if not is_root:
if model.calculated_pdf is None:
result[i] *= model.pdf(model.get_input_values(), relevant_parameters)
else:
result[i] *= 1
# Multiply the pdfs of all roots together to give an overall pdf.
temporary_result = result
result = 1.
for individual_result in temporary_result:
result *= individual_result
return result
def _get_mapping(self, models=None, index=0, is_not_root=False):
"""Returns a mapping of model and first index corresponding to the outputs in this model in parameter lists.
Parameters
----------
models: list
List of abcpy.ProbabilisticModel objects
index: integer
Next index to be mapped in a parameter list
is_not_root: boolean
Specifies whether the models specified are root models.
Returns
-------
list
A list containing two entries. The first entry corresponds to the mapping of the root models, including their parents. The second entry corresponds to the next index to be considered in a parameter list.
"""
if models is None:
models = self.model
mapping = []
for model in models:
# If this model corresponds to an unvisited free parameter, add it to the mapping
if is_not_root and not model.visited and not (isinstance(model, Hyperparameter)) and not (
isinstance(model, ModelResultingFromOperation)):
mapping.append((model, index))
index += 1 # model.get_output_dimension()
# Add all parents to the mapping, if applicable
for parent in model.get_input_models():
parent_mapping, index = self._get_mapping([parent], index=index, is_not_root=True)
parent.visited = True
for mappings in parent_mapping:
mapping.append(mappings)
model.visited = True
# At the end of the algorithm, reset all flags such that another method can act on the graph freely.
if not is_not_root:
self._reset_flags()
return [mapping, index]
def _get_names_and_parameters(self):
"""
A function returning the name of each model and the corresponding parameters to this model
Returns
-------
list:
Each entry is a tuple, the first entry of which is the name of the model and the second entry is the parameter values associated with it
"""
mapping = self._get_mapping()[0]
return_value = []
for model, index in mapping:
return_value.append(
(model.name, self.accepted_parameters_manager.get_accepted_parameters_bds_values([model])))
return return_value
def get_parameters(self, models=None, is_root=True):
"""
Returns the current values of all free parameters in the model. Commonly used before perturbing the parameters
of the model.
Parameters
----------
models: list of abcpy.ProbabilisticModel objects
The models for which, together with their parents, the parameter values should be returned. If no value is
provided, the root models are assumed to be the model of the inference method.
is_root: boolean
Specifies whether the current models are at the root. This ensures that the values corresponding to
simulated observations will not be returned.
Returns
-------
list
A list containing all currently sampled values of the free parameters.
"""
parameters = []
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
# If we are not at the root, the sampled values for the current node should be returned
if is_root == False and not isinstance(model, (ModelResultingFromOperation, Hyperparameter)):
parameters.append(model.get_stored_output_values())
model.visited = True
# Implement a depth-first search to return also the sampled values associated with each parent of the model
for parent in model.get_input_models():
if not parent.visited:
parameters += self.get_parameters(models=[parent], is_root=False)
parent.visited = True
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return parameters
def set_parameters(self, parameters, models=None, index=0, is_root=True):
"""
Sets new values for the currently used values of each random variable.
Commonly used after perturbing the parameter values using a kernel.
Parameters
----------
parameters: list
Defines the values to which the respective parameter values of the models should be set
model: list of abcpy.ProbabilisticModel objects
Defines all models for which, together with their parents, new values should be set. If no value is provided, the root models are assumed to be the model of the inference method.
index: integer
The current index to be considered in the parameters list
is_root: boolean
Defines whether the current models are at the root. This ensures that only values corresponding to random variables will be set.
Returns
-------
list: [boolean, integer]
Returns whether it was possible to set all parameters and the next index to be considered in the parameters list.
"""
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
# New parameters should only be set in case we are not at the root
if not is_root and not isinstance(model, ModelResultingFromOperation):
# new_output_values = np.array(parameters[index:index + model.get_output_dimension()])
new_output_values = np.array(parameters[index]).reshape(-1, )
if not model.set_output_values(new_output_values):
return [False, index]
index += 1 # model.get_output_dimension()
model.visited = True
# New parameters for all parents are set using a depth-first search
for parent in model.get_input_models():
if not parent.visited and not isinstance(parent, Hyperparameter):
is_set, index = self.set_parameters(parameters, models=[parent], index=index, is_root=False)
if not is_set:
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return [False, index]
model.visited = True
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return [True, index]
def get_correct_ordering(self, parameters_and_models, models=None, is_root=True):
"""
Orders the parameters returned by a kernel in the order required by the graph.
Commonly used when perturbing the parameters.
Parameters
----------
parameters_and_models: list of tuples
Contains tuples containing as the first entry the probabilistic model to be considered and as the second entry the parameter values associated with this model
models: list
Contains the root probabilistic models that make up the graph. If no value is provided, the root models are assumed to be the model of the inference method.
Returns
-------
list
The ordering which can be used by recursive functions on the graph.
"""
ordered_parameters = []
# If we are at the root, we set models to the model attribute of the inference method
if is_root:
models = self.model
for model in models:
if not model.visited:
model.visited = True
# Check all entries in parameters_and_models to determine whether the current model is contained within it
for corresponding_model, parameter in parameters_and_models:
if corresponding_model == model:
for param in parameter:
ordered_parameters.append(param)
break
# Recursively order all the parents of the current model
for parent in model.get_input_models():
if not parent.visited:
parent_ordering = self.get_correct_ordering(parameters_and_models, models=[parent],
is_root=False)
for parent_parameters in parent_ordering:
ordered_parameters.append(parent_parameters)
# At the end of the algorithm, are flags are reset such that new methods can act on the graph freely
if is_root:
self._reset_flags()
return ordered_parameters
def simulate(self, n_samples_per_param, rng=np.random.RandomState(), npc=None):
"""Simulates data of each model using the currently sampled or perturbed parameters.
Parameters
----------
rng: random number generator
The random number generator to be used.
Returns
-------
list
Each entry corresponds to the simulated data of one model.
"""
result = []
for model in self.model:
parameters_compatible = model._check_input(model.get_input_values())
if parameters_compatible:
if npc is not None and npc.communicator().Get_size() > 1:
simulation_result = npc.run_nested(model.forward_simulate, model.get_input_values(),
n_samples_per_param, rng=rng)
else:
simulation_result = model.forward_simulate(model.get_input_values(), n_samples_per_param, rng=rng)
result.append(simulation_result)
else:
return None
return result | en | 0.788383 | This class implements all methods that will be called recursively on the graph structure. Samples values for all random variables of the model. Commonly used to sample new parameter values on the whole graph. Parameters ---------- model: abcpy.ProbabilisticModel object The root model for which sample_from_prior should be called. rng: Random number generator Defines the random number generator to be used # If it was at some point not possible to sample (due to incompatible parameter values provided by the parents), we start from scratch # At the end of the algorithm, are flags are reset such that new methods can act on the graph freely Recursive version of sample_from_prior. Commonly called from within sample_from_prior. Parameters ---------- models: list of abcpy.ProbabilisticModel objects Defines the models for which, together with their parents, new parameters will be sampled is_root: boolean Whether the probabilistic models provided in models are root models. was_accepted: boolean Whether the sampled values for all previous/parent models were accepted. rng: Random number generator Defines the random number generator to be used Returns ------- boolean: Whether it was possible to sample new values for all nodes of the graph. # If it was so far possible to sample parameters for all nodes, the current node as well as its parents are sampled, using depth-first search Resets all flags that say that a probabilistic model has been updated. Commonly used after actions on the whole graph, to ensure that new actions can take place. Parameters ---------- models: list of abcpy.ProbabilisticModel The models for which, together with their parents, the flags should be reset. If no value is provided, the root models are assumed to be the model of the inference method. # For each model, the flags of the parents get reset recursively. Calculates the joint probability density function of the prior of the specified models at the given parameter values. Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities. Parameters ---------- models: list of abcpy.ProbabilisticModel objects Defines the models for which the pdf of their prior should be evaluated parameters: python list The parameters at which the pdf should be evaluated mapping: list of tuples Defines the mapping of probabilistic models and index in a parameter list. is_root: boolean A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly. Returns ------- list The resulting pdf,as well as the next index to be considered in the parameters list. Calculates the joint probability density function of the prior of the specified models at the given parameter values. Commonly used to check whether new parameters are valid given the prior, as well as to calculate acceptance probabilities. Parameters ---------- models: list of abcpy.ProbabilisticModel objects Defines the models for which the pdf of their prior should be evaluated parameters: python list The parameters at which the pdf should be evaluated mapping: list of tuples Defines the mapping of probabilistic models and index in a parameter list. is_root: boolean A flag specifying whether the provided models are the root models. This is to ensure that the pdf is calculated correctly. Returns ------- list The resulting pdf,as well as the next index to be considered in the parameters list. # At the beginning of calculation, obtain the mapping # The pdf of each root model is first calculated separately # If the model is not a root model, the pdf of this model, given the prior, should be calculated # Define a helper list which will contain the parameters relevant to the current model for pdf calculation # for j in range(model.get_output_dimension()): # parameter_index+=1 # Mark whether the parents of each model have been visited before for this model to avoid repeated calculation. # For each parent, the pdf of this parent has to be calculated as well. # Only calculate the pdf if the parent has never been visited for this model # Multiply the pdfs of all roots together to give an overall pdf. Returns a mapping of model and first index corresponding to the outputs in this model in parameter lists. Parameters ---------- models: list List of abcpy.ProbabilisticModel objects index: integer Next index to be mapped in a parameter list is_not_root: boolean Specifies whether the models specified are root models. Returns ------- list A list containing two entries. The first entry corresponds to the mapping of the root models, including their parents. The second entry corresponds to the next index to be considered in a parameter list. # If this model corresponds to an unvisited free parameter, add it to the mapping # model.get_output_dimension() # Add all parents to the mapping, if applicable # At the end of the algorithm, reset all flags such that another method can act on the graph freely. A function returning the name of each model and the corresponding parameters to this model Returns ------- list: Each entry is a tuple, the first entry of which is the name of the model and the second entry is the parameter values associated with it Returns the current values of all free parameters in the model. Commonly used before perturbing the parameters of the model. Parameters ---------- models: list of abcpy.ProbabilisticModel objects The models for which, together with their parents, the parameter values should be returned. If no value is provided, the root models are assumed to be the model of the inference method. is_root: boolean Specifies whether the current models are at the root. This ensures that the values corresponding to simulated observations will not be returned. Returns ------- list A list containing all currently sampled values of the free parameters. # If we are at the root, we set models to the model attribute of the inference method # If we are not at the root, the sampled values for the current node should be returned # Implement a depth-first search to return also the sampled values associated with each parent of the model # At the end of the algorithm, are flags are reset such that new methods can act on the graph freely Sets new values for the currently used values of each random variable. Commonly used after perturbing the parameter values using a kernel. Parameters ---------- parameters: list Defines the values to which the respective parameter values of the models should be set model: list of abcpy.ProbabilisticModel objects Defines all models for which, together with their parents, new values should be set. If no value is provided, the root models are assumed to be the model of the inference method. index: integer The current index to be considered in the parameters list is_root: boolean Defines whether the current models are at the root. This ensures that only values corresponding to random variables will be set. Returns ------- list: [boolean, integer] Returns whether it was possible to set all parameters and the next index to be considered in the parameters list. # If we are at the root, we set models to the model attribute of the inference method # New parameters should only be set in case we are not at the root # new_output_values = np.array(parameters[index:index + model.get_output_dimension()]) # model.get_output_dimension() # New parameters for all parents are set using a depth-first search # At the end of the algorithm, are flags are reset such that new methods can act on the graph freely # At the end of the algorithm, are flags are reset such that new methods can act on the graph freely Orders the parameters returned by a kernel in the order required by the graph. Commonly used when perturbing the parameters. Parameters ---------- parameters_and_models: list of tuples Contains tuples containing as the first entry the probabilistic model to be considered and as the second entry the parameter values associated with this model models: list Contains the root probabilistic models that make up the graph. If no value is provided, the root models are assumed to be the model of the inference method. Returns ------- list The ordering which can be used by recursive functions on the graph. # If we are at the root, we set models to the model attribute of the inference method # Check all entries in parameters_and_models to determine whether the current model is contained within it # Recursively order all the parents of the current model # At the end of the algorithm, are flags are reset such that new methods can act on the graph freely Simulates data of each model using the currently sampled or perturbed parameters. Parameters ---------- rng: random number generator The random number generator to be used. Returns ------- list Each entry corresponds to the simulated data of one model. | 2.790988 | 3 |
PyWidget3/widget.py | galaxyjim/PyWidget3 | 0 | 6630936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2009 <NAME>
# Copyright (c) 2015 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import collections
import pyglet
from pyglet.gl import *
from pyglet.event import EventDispatcher
from .shape import Rectangle, Ellipse, Cross, Star
# ------------------------------------------------------------------------------
class Widget(EventDispatcher):
''' Abstract widget.
This is the common abstract class for all widgets.
'''
# _________________________________________________________________ __init__
def __init__(self, x=0, y=0, z=0, width=100, height=100,
anchor_x='left', anchor_y='bottom', *args, **kwargs):
''' Create a displayable widget.
:Parameters:
`x` : float
X coordinate of the widget relative to anchor_x.
`y` : float
Y coordinate of the widget relative to anchor_y.
`z` : float
Z coordinate of the widget plane.
`width` : int
Width of the widget.
`height` : int
Height of the widget.
`anchor_x` : str
Horizontal alignment of the widget.
See `Widget.anchor_x` for details.
`anchor_y` : str
Vertical alignment of the widget.
See `Widget.anchor_y` for details.
'''
EventDispatcher.__init__(self)
self._x, self._y, self._z = x, y, z
self._root_x, self._root_y, self._root_z = 0,0,0
self._width = width
self._height = height
self.anchor_x = anchor_x
self.anchor_y = anchor_y
self._elements = collections.OrderedDict() # we need to preserve draw order
self._moveable = True
self._focusable = True
self._sizeable = True
self._hidden = False
self._parent = None
self.focus = None
# ________________________________________________________________________ x
def _get_x(self):
return self._x
def _set_x(self, x):
self._root_x += (x-self._x)
self._x = x
self.update_x()
x = property(_get_x, _set_x,
doc='''X cordinate of the widget.
:type: int
''')
def update_x(self):
pass
# ________________________________________________________________________ y
def _get_y(self):
return self._y
def _set_y(self, y):
self._root_y += (y-self._y)
self._y = y
self.update_y()
y = property(_get_y, _set_y,
doc='''Y coordinate of the widget.
:type: int
''')
def update_y(self):
pass
# ________________________________________________________________________ z
def _get_z(self):
return self._z
def _set_z(self, z):
self._z = z
self.update_z()
z = property(_get_z, _set_z,
doc='''Z coordinate of the widget.
:type: int
''')
def update_z(self):
pass
# ____________________________________________________________________ width
def _get_width(self):
return self._width
def _set_width(self, width):
self._width = width
self.update_width()
width = property(_get_width, _set_width,
doc='''Width of the widget.
:type: int
''')
def update_width(self):
pass
# ___________________________________________________________________ height
def _get_height(self):
return self._height
def _set_height(self, height):
self._height = height
self.update_height()
height = property(_get_height, _set_height,
doc='''Height of the widget.
:type: int
''')
def update_height(self):
pass
# _________________________________________________________________ anchor_x
def _get_anchor_x(self):
return self._anchor_x
def _set_anchor_x(self, anchor_x):
self._anchor_x = anchor_x
if self.anchor_x == 'center':
self._root_x = self.x-self.width/2
elif self.anchor_x == 'right':
self._root_x = self.x-self.width
else:
self._root_x = self.x
anchor_x = property(_get_anchor_x, _set_anchor_x,
doc='''Horizontal alignment of the widget.
The shape is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `left`,
`center` or `right`.
:type: str
''')
# _________________________________________________________________ anchor_y
def _get_anchor_y(self):
return self._anchor_y
def _set_anchor_y(self, anchor_y):
self._anchor_y = anchor_y
if self.anchor_y == 'center':
self._root_y = self.y-self.height/2
elif self.anchor_y == 'top':
self._root_y = self.y-self.height
else:
self._root_y = self.y
anchor_y = property(_get_anchor_y, _set_anchor_y,
doc='''Vertical alignment of the widget.
The shape is positioned relative to `y` according to this property,
which must be one of the alignment constants `bottom`, `center`
or `top`.
:type: str
''')
# ________________________________________________________________ SetParent
def set_parent(self, parent):
self._parent = parent
# ________________________________________________________________ SetTopmost
def set_topmost(self):
if self._parent != None:
key = None
for e in self._parent._elements:
if self._parent._elements[e] == self:
key = e
break
if key == None:
raise(Exception('Parent incorrectly set'))
self._parent._elements.move_to_end(key)
# pass message up chain, so the entire branch is moved up
self._parent.set_topmost()
# ________________________________________________________________ outer_box
def outer_box(self):
''' Returns the outer bounding box of the widget
The outer bounding box may be larger than given dimensions because some
labels or ornaments of the widget may extend actual dimensions.
'''
return self._root_x, self._root_y, self._width, self._height
# ________________________________________________________________ inner_box
def inner_box(self):
''' Returns the inner bounding box of the widget
The inner bounding box delimitates the available space for children.
'''
return self._root_x, self._root_y, self._width, self._height
# ___________________________________________________________ on_mouse_press
def on_mouse_press(self, x, y, button, modifiers):
''' Handles on_mouse_press events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_press'):
status = self._elements[i].on_mouse_press(x - self.x, y - self.y, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# ____________________________________________________________ on_mouse_drag
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
''' Handles on_mouse_drag events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`dx` : float
X deplacement.
`dy` : float
Y deplacement.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_drag'):
status = self._elements[i].on_mouse_drag(x - self.x, y - self.y, dx, dy, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# __________________________________________________________ on_mouse_motion
def on_mouse_motion(self, x, y, dx, dy):
''' Handles on_mouse_motion events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`dx` : float
X deplacement.
`dy` : float
Y deplacement.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_motion'):
status = self._elements[i].on_mouse_motion(x - self.x, y - self.y, dx, dy)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# _________________________________________________________ on_mouse_release
def on_mouse_release(self, x, y, button, modifiers):
''' Handles on_mouse_release events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_release'):
status = self._elements[i].on_mouse_release(x - self.x, y - self.y, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# _________________________________________________________________ hit_test
def hit_test(self,x,y):
''' Return True if x and y are inside the Widget
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
'''
return ((self._root_x <= x <= (self._root_x+self._width)) and
(self._root_y <= y <= (self._root_y+self._height)))
# _____________________________________________________________________ on_draw
def on_draw(self):
''' Handles on_draw events
'''
if not self._hidden:
glTranslatef(self._root_x, self._root_y, self._root_z)
for key in self._elements.keys():
self._elements[key].draw()
glTranslatef(-self._root_x, -self._root_y, -self._root_z)
# _____________________________________________________________________ on_draw
def draw(self):
self.on_draw()
# _____________________________________________________________________ on_text
def on_text(self, text):
if self.focus:
self.focus.caret.on_text(text)
# _____________________________________________________________________ on_text_motion
def on_text_motion(self, motion):
if self.focus:
self.focus.caret.on_text_motion(motion)
# _____________________________________________________________________ on_text_motion_select
def on_text_motion_select(self, motion):
if self.focus:
self.focus.caret.on_text_motion_select(motion)
# _____________________________________________________________________ set_focus
# set text input window.
# requires a root container window to route text messages.
def set_focus(self, focus):
# find root window
w = self
runaway = 10
while( w._parent != None and runaway > 0 ):
w = w._parent
runaway -= 1
w._apply_focus(focus)
# _____________________________________________________________________ _apply_focus
# internal: remove old focus. set new focus
def _apply_focus(self, focus):
if self.focus:
self.focus.on_unset_focus()
self.focus = focus
if self.focus:
self.focus.on_set_focus()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2009 <NAME>
# Copyright (c) 2015 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import collections
import pyglet
from pyglet.gl import *
from pyglet.event import EventDispatcher
from .shape import Rectangle, Ellipse, Cross, Star
# ------------------------------------------------------------------------------
class Widget(EventDispatcher):
''' Abstract widget.
This is the common abstract class for all widgets.
'''
# _________________________________________________________________ __init__
def __init__(self, x=0, y=0, z=0, width=100, height=100,
anchor_x='left', anchor_y='bottom', *args, **kwargs):
''' Create a displayable widget.
:Parameters:
`x` : float
X coordinate of the widget relative to anchor_x.
`y` : float
Y coordinate of the widget relative to anchor_y.
`z` : float
Z coordinate of the widget plane.
`width` : int
Width of the widget.
`height` : int
Height of the widget.
`anchor_x` : str
Horizontal alignment of the widget.
See `Widget.anchor_x` for details.
`anchor_y` : str
Vertical alignment of the widget.
See `Widget.anchor_y` for details.
'''
EventDispatcher.__init__(self)
self._x, self._y, self._z = x, y, z
self._root_x, self._root_y, self._root_z = 0,0,0
self._width = width
self._height = height
self.anchor_x = anchor_x
self.anchor_y = anchor_y
self._elements = collections.OrderedDict() # we need to preserve draw order
self._moveable = True
self._focusable = True
self._sizeable = True
self._hidden = False
self._parent = None
self.focus = None
# ________________________________________________________________________ x
def _get_x(self):
return self._x
def _set_x(self, x):
self._root_x += (x-self._x)
self._x = x
self.update_x()
x = property(_get_x, _set_x,
doc='''X cordinate of the widget.
:type: int
''')
def update_x(self):
pass
# ________________________________________________________________________ y
def _get_y(self):
return self._y
def _set_y(self, y):
self._root_y += (y-self._y)
self._y = y
self.update_y()
y = property(_get_y, _set_y,
doc='''Y coordinate of the widget.
:type: int
''')
def update_y(self):
pass
# ________________________________________________________________________ z
def _get_z(self):
return self._z
def _set_z(self, z):
self._z = z
self.update_z()
z = property(_get_z, _set_z,
doc='''Z coordinate of the widget.
:type: int
''')
def update_z(self):
pass
# ____________________________________________________________________ width
def _get_width(self):
return self._width
def _set_width(self, width):
self._width = width
self.update_width()
width = property(_get_width, _set_width,
doc='''Width of the widget.
:type: int
''')
def update_width(self):
pass
# ___________________________________________________________________ height
def _get_height(self):
return self._height
def _set_height(self, height):
self._height = height
self.update_height()
height = property(_get_height, _set_height,
doc='''Height of the widget.
:type: int
''')
def update_height(self):
pass
# _________________________________________________________________ anchor_x
def _get_anchor_x(self):
return self._anchor_x
def _set_anchor_x(self, anchor_x):
self._anchor_x = anchor_x
if self.anchor_x == 'center':
self._root_x = self.x-self.width/2
elif self.anchor_x == 'right':
self._root_x = self.x-self.width
else:
self._root_x = self.x
anchor_x = property(_get_anchor_x, _set_anchor_x,
doc='''Horizontal alignment of the widget.
The shape is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `left`,
`center` or `right`.
:type: str
''')
# _________________________________________________________________ anchor_y
def _get_anchor_y(self):
return self._anchor_y
def _set_anchor_y(self, anchor_y):
self._anchor_y = anchor_y
if self.anchor_y == 'center':
self._root_y = self.y-self.height/2
elif self.anchor_y == 'top':
self._root_y = self.y-self.height
else:
self._root_y = self.y
anchor_y = property(_get_anchor_y, _set_anchor_y,
doc='''Vertical alignment of the widget.
The shape is positioned relative to `y` according to this property,
which must be one of the alignment constants `bottom`, `center`
or `top`.
:type: str
''')
# ________________________________________________________________ SetParent
def set_parent(self, parent):
self._parent = parent
# ________________________________________________________________ SetTopmost
def set_topmost(self):
if self._parent != None:
key = None
for e in self._parent._elements:
if self._parent._elements[e] == self:
key = e
break
if key == None:
raise(Exception('Parent incorrectly set'))
self._parent._elements.move_to_end(key)
# pass message up chain, so the entire branch is moved up
self._parent.set_topmost()
# ________________________________________________________________ outer_box
def outer_box(self):
''' Returns the outer bounding box of the widget
The outer bounding box may be larger than given dimensions because some
labels or ornaments of the widget may extend actual dimensions.
'''
return self._root_x, self._root_y, self._width, self._height
# ________________________________________________________________ inner_box
def inner_box(self):
''' Returns the inner bounding box of the widget
The inner bounding box delimitates the available space for children.
'''
return self._root_x, self._root_y, self._width, self._height
# ___________________________________________________________ on_mouse_press
def on_mouse_press(self, x, y, button, modifiers):
''' Handles on_mouse_press events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_press'):
status = self._elements[i].on_mouse_press(x - self.x, y - self.y, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# ____________________________________________________________ on_mouse_drag
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
''' Handles on_mouse_drag events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`dx` : float
X deplacement.
`dy` : float
Y deplacement.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_drag'):
status = self._elements[i].on_mouse_drag(x - self.x, y - self.y, dx, dy, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# __________________________________________________________ on_mouse_motion
def on_mouse_motion(self, x, y, dx, dy):
''' Handles on_mouse_motion events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`dx` : float
X deplacement.
`dy` : float
Y deplacement.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_motion'):
status = self._elements[i].on_mouse_motion(x - self.x, y - self.y, dx, dy)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# _________________________________________________________ on_mouse_release
def on_mouse_release(self, x, y, button, modifiers):
''' Handles on_mouse_release events
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
`button` : int
Button identifier.
`modifiers` : int
Button modifiers.
'''
for i in reversed(self._elements):
if hasattr(self._elements[i], 'on_mouse_release'):
status = self._elements[i].on_mouse_release(x - self.x, y - self.y, button, modifiers)
if status == pyglet.event.EVENT_HANDLED:
return(status)
# _________________________________________________________________ hit_test
def hit_test(self,x,y):
''' Return True if x and y are inside the Widget
:Parameters:
`x` : float
X coordinate.
`y` : float
Y coordinate.
'''
return ((self._root_x <= x <= (self._root_x+self._width)) and
(self._root_y <= y <= (self._root_y+self._height)))
# _____________________________________________________________________ on_draw
def on_draw(self):
''' Handles on_draw events
'''
if not self._hidden:
glTranslatef(self._root_x, self._root_y, self._root_z)
for key in self._elements.keys():
self._elements[key].draw()
glTranslatef(-self._root_x, -self._root_y, -self._root_z)
# _____________________________________________________________________ on_draw
def draw(self):
self.on_draw()
# _____________________________________________________________________ on_text
def on_text(self, text):
if self.focus:
self.focus.caret.on_text(text)
# _____________________________________________________________________ on_text_motion
def on_text_motion(self, motion):
if self.focus:
self.focus.caret.on_text_motion(motion)
# _____________________________________________________________________ on_text_motion_select
def on_text_motion_select(self, motion):
if self.focus:
self.focus.caret.on_text_motion_select(motion)
# _____________________________________________________________________ set_focus
# set text input window.
# requires a root container window to route text messages.
def set_focus(self, focus):
# find root window
w = self
runaway = 10
while( w._parent != None and runaway > 0 ):
w = w._parent
runaway -= 1
w._apply_focus(focus)
# _____________________________________________________________________ _apply_focus
# internal: remove old focus. set new focus
def _apply_focus(self, focus):
if self.focus:
self.focus.on_unset_focus()
self.focus = focus
if self.focus:
self.focus.on_set_focus()
| en | 0.498708 | #!/usr/bin/env python # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # Copyright (c) 2009 <NAME> # Copyright (c) 2015 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # ------------------------------------------------------------------------------ Abstract widget. This is the common abstract class for all widgets. # _________________________________________________________________ __init__ Create a displayable widget. :Parameters: `x` : float X coordinate of the widget relative to anchor_x. `y` : float Y coordinate of the widget relative to anchor_y. `z` : float Z coordinate of the widget plane. `width` : int Width of the widget. `height` : int Height of the widget. `anchor_x` : str Horizontal alignment of the widget. See `Widget.anchor_x` for details. `anchor_y` : str Vertical alignment of the widget. See `Widget.anchor_y` for details. # we need to preserve draw order # ________________________________________________________________________ x X cordinate of the widget. :type: int # ________________________________________________________________________ y Y coordinate of the widget. :type: int # ________________________________________________________________________ z Z coordinate of the widget. :type: int # ____________________________________________________________________ width Width of the widget. :type: int # ___________________________________________________________________ height Height of the widget. :type: int # _________________________________________________________________ anchor_x Horizontal alignment of the widget. The shape is positioned relative to `x` and `width` according to this property, which must be one of the alignment constants `left`, `center` or `right`. :type: str # _________________________________________________________________ anchor_y Vertical alignment of the widget. The shape is positioned relative to `y` according to this property, which must be one of the alignment constants `bottom`, `center` or `top`. :type: str # ________________________________________________________________ SetParent # ________________________________________________________________ SetTopmost # pass message up chain, so the entire branch is moved up # ________________________________________________________________ outer_box Returns the outer bounding box of the widget The outer bounding box may be larger than given dimensions because some labels or ornaments of the widget may extend actual dimensions. # ________________________________________________________________ inner_box Returns the inner bounding box of the widget The inner bounding box delimitates the available space for children. # ___________________________________________________________ on_mouse_press Handles on_mouse_press events :Parameters: `x` : float X coordinate. `y` : float Y coordinate. `button` : int Button identifier. `modifiers` : int Button modifiers. # ____________________________________________________________ on_mouse_drag Handles on_mouse_drag events :Parameters: `x` : float X coordinate. `y` : float Y coordinate. `dx` : float X deplacement. `dy` : float Y deplacement. `button` : int Button identifier. `modifiers` : int Button modifiers. # __________________________________________________________ on_mouse_motion Handles on_mouse_motion events :Parameters: `x` : float X coordinate. `y` : float Y coordinate. `dx` : float X deplacement. `dy` : float Y deplacement. # _________________________________________________________ on_mouse_release Handles on_mouse_release events :Parameters: `x` : float X coordinate. `y` : float Y coordinate. `button` : int Button identifier. `modifiers` : int Button modifiers. # _________________________________________________________________ hit_test Return True if x and y are inside the Widget :Parameters: `x` : float X coordinate. `y` : float Y coordinate. # _____________________________________________________________________ on_draw Handles on_draw events # _____________________________________________________________________ on_draw # _____________________________________________________________________ on_text # _____________________________________________________________________ on_text_motion # _____________________________________________________________________ on_text_motion_select # _____________________________________________________________________ set_focus # set text input window. # requires a root container window to route text messages. # find root window # _____________________________________________________________________ _apply_focus # internal: remove old focus. set new focus | 1.201805 | 1 |
venv/lib/python2.7/site-packages/ansible/modules/windows/win_stat.py | haind27/test01 | 37 | 6630937 | <gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
required: yes
get_md5:
description:
- Whether to return the checksum sum of the file. Between Ansible 1.9
and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible
2.3 this is back to an MD5. Will return None if host is unable to
use specified algorithm.
- The default of this option changed from C(yes) to C(no) in Ansible 2.5
and will be removed altogether in Ansible 2.9.
- Use C(get_checksum=true) with C(checksum_algorithm=md5) to return an
md5 hash under the C(checksum) return value.
type: bool
default: 'no'
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: 'yes'
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if
the host is unable to use specified algorithm.
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
notes:
- For non-Windows targets, use the M(stat) module instead.
author:
- <NAME> (@cchurch)
'''
EXAMPLES = r'''
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
'''
RETURN = r'''
changed:
description: Whether anything was changed
returned: always
type: boolean
sample: True
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form
returned: success, path exists
type: string
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not
returned: success
type: boolean
sample: True
extension:
description: The extension of the file at path
returned: success, path exists, path is a file
type: string
sample: ".ps1"
filename:
description: The name of the file (without path)
returned: success, path exists, path is a file
type: string
sammple: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not
returned: success, path exists
type: boolean
sample: True
isdir:
description: If the path is a directory or not
returned: success, path exists
type: boolean
sample: True
ishidden:
description: If the path is hidden or not
returned: success, path exists
type: boolean
sample: True
isjunction:
description: If the path is a junction point or not
returned: success, path exists
type: boolean
sample: True
islnk:
description: If the path is a symbolic link or not
returned: success, path exists
type: boolean
sample: True
isreadonly:
description: If the path is read only or not
returned: success, path exists
type: boolean
sample: True
isreg:
description: If the path is a regular file
returned: success, path exists
type: boolean
sample: True
isshared:
description: If the path is shared or not
returned: success, path exists
type: boolean
sample: True
lastaccesstime:
description: The last access time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: ..\link
md5:
description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash), will be removed in 2.9
returned: success, path exist, path is a file, get_md5 == True
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
nlink:
description: Number of links to the file (hard links)
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file
returned: success, path exists, file exists
type: string
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared
returned: success, path exists, file is a directory and isshared == True
type: string
sample: file-share
size:
description: The size in bytes of a file or folder
returned: success, path exists, file is not a link
type: int
sample: 1024
'''
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
required: yes
get_md5:
description:
- Whether to return the checksum sum of the file. Between Ansible 1.9
and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible
2.3 this is back to an MD5. Will return None if host is unable to
use specified algorithm.
- The default of this option changed from C(yes) to C(no) in Ansible 2.5
and will be removed altogether in Ansible 2.9.
- Use C(get_checksum=true) with C(checksum_algorithm=md5) to return an
md5 hash under the C(checksum) return value.
type: bool
default: 'no'
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: 'yes'
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if
the host is unable to use specified algorithm.
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
notes:
- For non-Windows targets, use the M(stat) module instead.
author:
- <NAME> (@cchurch)
'''
EXAMPLES = r'''
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
'''
RETURN = r'''
changed:
description: Whether anything was changed
returned: always
type: boolean
sample: True
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form
returned: success, path exists
type: string
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not
returned: success
type: boolean
sample: True
extension:
description: The extension of the file at path
returned: success, path exists, path is a file
type: string
sample: ".ps1"
filename:
description: The name of the file (without path)
returned: success, path exists, path is a file
type: string
sammple: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not
returned: success, path exists
type: boolean
sample: True
isdir:
description: If the path is a directory or not
returned: success, path exists
type: boolean
sample: True
ishidden:
description: If the path is hidden or not
returned: success, path exists
type: boolean
sample: True
isjunction:
description: If the path is a junction point or not
returned: success, path exists
type: boolean
sample: True
islnk:
description: If the path is a symbolic link or not
returned: success, path exists
type: boolean
sample: True
isreadonly:
description: If the path is read only or not
returned: success, path exists
type: boolean
sample: True
isreg:
description: If the path is a regular file
returned: success, path exists
type: boolean
sample: True
isshared:
description: If the path is shared or not
returned: success, path exists
type: boolean
sample: True
lastaccesstime:
description: The last access time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: ..\link
md5:
description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash), will be removed in 2.9
returned: success, path exist, path is a file, get_md5 == True
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
nlink:
description: Number of links to the file (hard links)
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file
returned: success, path exists, file exists
type: string
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared
returned: success, path exists, file is a directory and isshared == True
type: string
sample: file-share
size:
description: The size in bytes of a file or folder
returned: success, path exists, file is not a link
type: int
sample: 1024
''' | en | 0.701708 | #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name --- module: win_stat version_added: "1.7" short_description: Get information about Windows files description: - Returns information about a Windows file. - For non-Windows targets, use the M(stat) module instead. options: path: description: - The full path of the file/object to get the facts of; both forward and back slashes are accepted. required: yes get_md5: description: - Whether to return the checksum sum of the file. Between Ansible 1.9 and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible 2.3 this is back to an MD5. Will return None if host is unable to use specified algorithm. - The default of this option changed from C(yes) to C(no) in Ansible 2.5 and will be removed altogether in Ansible 2.9. - Use C(get_checksum=true) with C(checksum_algorithm=md5) to return an md5 hash under the C(checksum) return value. type: bool default: 'no' get_checksum: description: - Whether to return a checksum of the file (default sha1) type: bool default: 'yes' version_added: "2.1" checksum_algorithm: description: - Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm. default: sha1 choices: [ md5, sha1, sha256, sha384, sha512 ] version_added: "2.3" notes: - For non-Windows targets, use the M(stat) module instead. author: - <NAME> (@cchurch) - name: Obtain information about a file win_stat: path: C:\foo.ini register: file_info - name: Obtain information about a folder win_stat: path: C:\bar register: folder_info - name: Get MD5 checksum of a file win_stat: path: C:\foo.ini get_checksum: yes checksum_algorithm: md5 register: md5_checksum - debug: var: md5_checksum.stat.checksum - name: Get SHA1 checksum of file win_stat: path: C:\foo.ini get_checksum: yes register: sha1_checksum - debug: var: sha1_checksum.stat.checksum - name: Get SHA256 checksum of file win_stat: path: C:\foo.ini get_checksum: yes checksum_algorithm: sha256 register: sha256_checksum - debug: var: sha256_checksum.stat.checksum changed: description: Whether anything was changed returned: always type: boolean sample: True stat: description: dictionary containing all the stat data returned: success type: complex contains: attributes: description: Attributes of the file at path in raw form returned: success, path exists type: string sample: "Archive, Hidden" checksum: description: The checksum of a file based on checksum_algorithm specified returned: success, path exist, path is a file, get_checksum == True checksum_algorithm specified is supported type: string sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 creationtime: description: The create time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 exists: description: If the path exists or not returned: success type: boolean sample: True extension: description: The extension of the file at path returned: success, path exists, path is a file type: string sample: ".ps1" filename: description: The name of the file (without path) returned: success, path exists, path is a file type: string sammple: foo.ini hlnk_targets: description: List of other files pointing to the same file (hard links), excludes the current file returned: success, path exists type: list sample: - C:\temp\file.txt - C:\Windows\update.log isarchive: description: If the path is ready for archiving or not returned: success, path exists type: boolean sample: True isdir: description: If the path is a directory or not returned: success, path exists type: boolean sample: True ishidden: description: If the path is hidden or not returned: success, path exists type: boolean sample: True isjunction: description: If the path is a junction point or not returned: success, path exists type: boolean sample: True islnk: description: If the path is a symbolic link or not returned: success, path exists type: boolean sample: True isreadonly: description: If the path is read only or not returned: success, path exists type: boolean sample: True isreg: description: If the path is a regular file returned: success, path exists type: boolean sample: True isshared: description: If the path is shared or not returned: success, path exists type: boolean sample: True lastaccesstime: description: The last access time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lastwritetime: description: The last modification time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lnk_source: description: Target of the symlink normalized for the remote filesystem returned: success, path exists and the path is a symbolic link or junction point type: string sample: C:\temp\link lnk_target: description: Target of the symlink. Note that relative paths remain relative returned: success, path exists and the path is a symbolic link or junction point type: string sample: ..\link md5: description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash), will be removed in 2.9 returned: success, path exist, path is a file, get_md5 == True type: string sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 nlink: description: Number of links to the file (hard links) returned: success, path exists type: int sample: 1 owner: description: The owner of the file returned: success, path exists type: string sample: BUILTIN\Administrators path: description: The full absolute path to the file returned: success, path exists, file exists type: string sample: C:\foo.ini sharename: description: The name of share if folder is shared returned: success, path exists, file is a directory and isshared == True type: string sample: file-share size: description: The size in bytes of a file or folder returned: success, path exists, file is not a link type: int sample: 1024 | 2.072232 | 2 |
tests/algorithm/common/test_model.py | zbzhu99/malib | 0 | 6630938 | import pytest
from malib.algorithm.common.model import Model, get_model, mlp, RNN, MLP
from gym import spaces
import numpy as np
@pytest.fixture
def layers_config():
return [
{"units": 32, "activation": "ReLU"},
{"units": 32, "activation": "Tanh"},
{"units": 32, "activation": "LeakyReLU"},
]
def test_mlp_func(layers_config):
mlp(layers_config)
@pytest.mark.parametrize(
"model_fn, model_config",
[
(
MLP,
{
"layers": [
{"units": 32, "activation": "ReLU"},
{"units": 32, "activation": "Tanh"},
{"units": 32, "activation": "LeakyReLU"},
],
"output": {"activation": "Identity"},
},
),
(RNN, {"rnn_hidden_dim": 32}),
],
scope="class",
)
@pytest.mark.parametrize(
"obs_space",
[
spaces.Box(0.0, 1.0, (8,)),
spaces.Dict({"1": spaces.Box(0.0, 1.0, (8,)), "2": spaces.Box(0.0, 1.0, (6,))}),
],
scope="class",
)
@pytest.mark.parametrize(
"act_space",
[
spaces.Box(0.0, 1.0, (8,)),
spaces.Dict({"1": spaces.Box(0.0, 1.0, (8,)), "2": spaces.Box(0.0, 1.0, (6,))}),
],
scope="class",
)
class TestModel:
@pytest.fixture(autouse=True)
def setUp(self, model_fn, model_config, obs_space, act_space):
self.model = model_fn(obs_space, act_space, model_config)
assert isinstance(self.model, Model)
def test_initial_state(self):
init_state = self.model.get_initial_state()
assert isinstance(init_state, list)
def test_forward(self):
batch_size = 32
if isinstance(self.model, MLP):
inputs = np.zeros((batch_size, self.model.input_dim))
outputs = self.model(inputs)
assert outputs.shape[0] == batch_size
elif isinstance(self.model, RNN):
inputs = (
np.zeros((batch_size, self.model.input_dim)),
self.model.get_initial_state(batch_size)[0],
)
outputs = self.model(*inputs)
assert outputs[0].shape[0] == outputs[1].shape[0] == batch_size
else:
raise ValueError("Please add tests for {}".format(type(self.model)))
@pytest.mark.parametrize("model_type", ["mlp", "rnn", "cnn", "rcnn"])
def test_get_model(model_type, layers_config):
model_config = {"network": model_type, "layers": layers_config}
if model_type in ["rnn", "rcnn"]:
model_config.update({"rnn_hidden_dim": 32})
try:
get_model(model_config)
except NotImplementedError:
print("model_type {} is not implement yet.".format(model_type))
| import pytest
from malib.algorithm.common.model import Model, get_model, mlp, RNN, MLP
from gym import spaces
import numpy as np
@pytest.fixture
def layers_config():
return [
{"units": 32, "activation": "ReLU"},
{"units": 32, "activation": "Tanh"},
{"units": 32, "activation": "LeakyReLU"},
]
def test_mlp_func(layers_config):
mlp(layers_config)
@pytest.mark.parametrize(
"model_fn, model_config",
[
(
MLP,
{
"layers": [
{"units": 32, "activation": "ReLU"},
{"units": 32, "activation": "Tanh"},
{"units": 32, "activation": "LeakyReLU"},
],
"output": {"activation": "Identity"},
},
),
(RNN, {"rnn_hidden_dim": 32}),
],
scope="class",
)
@pytest.mark.parametrize(
"obs_space",
[
spaces.Box(0.0, 1.0, (8,)),
spaces.Dict({"1": spaces.Box(0.0, 1.0, (8,)), "2": spaces.Box(0.0, 1.0, (6,))}),
],
scope="class",
)
@pytest.mark.parametrize(
"act_space",
[
spaces.Box(0.0, 1.0, (8,)),
spaces.Dict({"1": spaces.Box(0.0, 1.0, (8,)), "2": spaces.Box(0.0, 1.0, (6,))}),
],
scope="class",
)
class TestModel:
@pytest.fixture(autouse=True)
def setUp(self, model_fn, model_config, obs_space, act_space):
self.model = model_fn(obs_space, act_space, model_config)
assert isinstance(self.model, Model)
def test_initial_state(self):
init_state = self.model.get_initial_state()
assert isinstance(init_state, list)
def test_forward(self):
batch_size = 32
if isinstance(self.model, MLP):
inputs = np.zeros((batch_size, self.model.input_dim))
outputs = self.model(inputs)
assert outputs.shape[0] == batch_size
elif isinstance(self.model, RNN):
inputs = (
np.zeros((batch_size, self.model.input_dim)),
self.model.get_initial_state(batch_size)[0],
)
outputs = self.model(*inputs)
assert outputs[0].shape[0] == outputs[1].shape[0] == batch_size
else:
raise ValueError("Please add tests for {}".format(type(self.model)))
@pytest.mark.parametrize("model_type", ["mlp", "rnn", "cnn", "rcnn"])
def test_get_model(model_type, layers_config):
model_config = {"network": model_type, "layers": layers_config}
if model_type in ["rnn", "rcnn"]:
model_config.update({"rnn_hidden_dim": 32})
try:
get_model(model_config)
except NotImplementedError:
print("model_type {} is not implement yet.".format(model_type))
| none | 1 | 2.116259 | 2 |
|
phigros/view/__init__.py | BETTEY-developers/pygros | 1 | 6630939 | import copy
from os import path, stat
import cocos
import cocos.actions
import cocos.sprite
import cocos.layer
import pyglet
from . import data, settings
from ..chart import Line, BaseNote, Drag, Flick, Hold, chart
__all__ = ['preview']
combo_label = None
score_label = None
def update_labels():
global combo_label, score_label
parent = None
if combo_label is not None:
parent = combo_label.parent
combo_label.kill()
if data.combo > 2:
text = str(data.combo)
else:
text = ''
combo_label = cocos.text.Label(text, (settings.width / 2, settings.height),
font_name=settings.font_name, font_size=20,
anchor_x='center', anchor_y='top')
combo_label.add(cocos.text.Label('AUTOPLAY' if data.combo > 2 else '', (0, -30),
font_name=settings.font_name, font_size=10,
anchor_x='center', anchor_y='top'))
if parent is not None:
parent.add(combo_label)
parent = None
if score_label is not None:
parent = score_label.parent
score_label.kill()
text = str(round(data.score)).zfill(7)
score_label = cocos.text.Label(text,
(min(settings.width, (settings.width + settings.size) / 2), settings.height),
font_name=settings.font_name, font_size=15,
anchor_x='right', anchor_y='top')
if parent is not None:
parent.add(score_label)
def score():
data.combo += 1
data.score += 1000000 / data.all_combo
update_labels()
class NoteExplode(cocos.sprite.Sprite):
def __init__(self, pos):
images=[pyglet.resource.image('Explode-1_1.png'),
pyglet.resource.image('Explode-1_2.png'),
pyglet.resource.image('Explode-1_3.png'),
pyglet.resource.image('Explode-1_4.png'),
pyglet.resource.image('Explode-1_5.png'),
pyglet.resource.image('Explode-1_6.png'),
pyglet.resource.image('Explode-1_7.png'),
pyglet.resource.image('Explode-1_8.png'),
pyglet.resource.image('Explode-1_9.png'),
pyglet.resource.image('Explode-1_10.png'),
pyglet.resource.image('Explode-1_11.png'),
pyglet.resource.image('Explode-1_12.png'),
pyglet.resource.image('Explode-1_13.png'),
pyglet.resource.image('Explode-1_14.png'),
pyglet.resource.image('Explode-1_15.png'),
pyglet.resource.image('Explode-1_16.png'),
pyglet.resource.image('Explode-1_17.png'),
pyglet.resource.image('Explode-1_18.png'),
pyglet.resource.image('Explode-1_19.png'),
pyglet.resource.image('Explode-1_20.png'),
pyglet.resource.image('Explode-1_21.png'),
pyglet.resource.image('Explode-1_22.png'),
pyglet.resource.image('Explode-1_23.png'),
pyglet.resource.image('Explode-1_24.png'),
pyglet.resource.image('Explode-1_25.png'),
pyglet.resource.image('Explode-1_26.png'),
pyglet.resource.image('Explode-1_27.png'),
pyglet.resource.image('Explode-1_28.png'),
pyglet.resource.image('Explode-1_29.png')]
ani = pyglet.image.Animation.from_image_sequence(images, duration=0.01, loop=False)
super().__init__(ani, pos)
self.do(cocos.actions.ScaleBy(1.3, 0.1) + cocos.actions.Delay(0.05) + cocos.actions.FadeOut(0.05) + cocos.actions.CallFuncS(lambda e: e.kill()))
class NoteSprite(cocos.sprite.Sprite):
def __init__(self, note: BaseNote):
def play_sound():
sound = 'click.wav'
if isinstance(note, Drag):
sound = 'drag.wav'
elif isinstance(note, Flick):
sound = 'flick.wav'
return pyglet.resource.media(sound).play()
def p(state: BaseNote.NoteState):
res = copy.copy(state)
res.pos *= settings.size
res.speed *= settings.size
return res
note.states.sort(key=lambda e:e.sec)
for state in note.states:
state.pos *=settings.size
state.speed *=settings.size
states = note.states
dis = 0
img = 'tap.png'
if isinstance(note, Drag):
img = 'drag.png'
elif isinstance(note, Flick):
img = 'flick.png'
elif isinstance(note, Hold):
img = 'hold.png'
sec = note.tap_sec
length = 0
for i in states:
if i.sec <= note.tap_sec:
continue
length += i.speed * (i.sec - sec)
sec = i.sec
length += states[-1].speed * (note.end_sec - sec)
dis += length // 2
sec = note.tap_sec
for i in states[::-1]:
if i.sec > note.tap_sec:
break
note.show_sec = min(
note.show_sec,
sec - (settings.size * 2 - abs(dis) + (length if isinstance(note, Hold) else 0)) / abs(i.speed)
)
dis += (sec - i.sec) * i.speed
sec = i.sec
note.show_sec = min(
note.show_sec,
sec - (settings.size * 2 - abs(dis) + (length if isinstance(note, Hold) else 0)) / abs(states[0].speed)
)
dis += sec * states[0].speed
super().__init__(img, (states[0].pos, dis))
if isinstance(note, Hold):
self.scale_y = length / self.image.height
action = cocos.actions.Hide()
sec = 0
speed = states[0].speed
for i in states:
if i.sec > note.tap_sec:
break
dis -= (i.sec - sec) * speed
act = cocos.actions.MoveTo((i.pos, dis), i.sec - sec)
if sec <= note.show_sec < i.sec:
act |= cocos.actions.Delay(note.show_sec - sec) + cocos.actions.Show()
action += act
sec = i.sec
speed = i.speed
act = cocos.actions.MoveTo((states[-1].pos, length // 2 if isinstance(note, Hold) else 0), note.tap_sec - sec)
if sec <= note.show_sec < note.tap_sec:
act |= cocos.actions.Delay(note.show_sec - sec) + cocos.actions.Show()
action += act
action += cocos.actions.CallFunc(play_sound)
if isinstance(note, Hold):
class Qwq(cocos.actions.IntervalAction):
def init(self, length, duration):
self._length = length
self.duration = duration
def start(self):
self._cur = self.target.scale_y
def update(self, t):
from random import randint
if randint(0, 6) < 3:
self.target.parent.add(NoteExplode((self.target.x, 0)))
self.target.scale_y = (self._cur - self._length) * (1 - t) + self._length
nowlen = length // 2
sec = note.tap_sec
for i in states:
if i.sec <= note.tap_sec:
continue
nowlen -= (i.sec - sec) * i.speed
action += cocos.actions.MoveTo((states[-1].pos, nowlen // 2), i.sec - sec) | \
Qwq(nowlen / self.image.height, i.sec - sec)
sec = i.sec
action += cocos.actions.MoveTo((states[-1].pos, 0), note.end_sec - sec) | \
Qwq(0, note.end_sec - sec)
def explode(e: NoteSprite):
e.kill()
e.parent.add(NoteExplode((e.x, e.y)))
score()
action += cocos.actions.CallFuncS(explode)
self.do(action)
class LineSprite(cocos.sprite.Sprite):
def __init__(self, line: Line):
class width_adjustment(cocos.actions.IntervalAction):
def init(self, width, duration):
self._width = width
self.duration = duration
def start(self):
self._cur = self.target.scale_y
def update(self, t):
self.target.scale_y = (self._cur - self._width) * (1 - t) + self._width
line.states.sort(key=lambda e:e.sec)
for state in line.states:
state.x*=settings.size
state.x += (settings.width - settings.size) / 2
state.y *= settings.size
state.y += (settings.height - settings.size) / 2
states = line.states
super().__init__('line_empty.png', (states[0].x, states[0].y), states[0].angle)
line_sprite = cocos.sprite.Sprite('line.png')
self.add(line_sprite)
for note in line.notes:
self.add(NoteSprite(note))
action = None
pre = states[0]
for i in states[1:]:
act = cocos.actions.MoveTo((i.x, i.y), i.sec - pre.sec) | cocos.actions.RotateBy(i.angle - pre.angle, i.sec - pre.sec)
if i.rev != pre.rev:
act |= cocos.actions.Delay(i.sec - pre.sec) + cocos.actions.FlipY(duration=0.01)
if not action:
action = act
else:
action += act
pre = i
if action:
self.do(action)
action = None
sec = 0
for i in states:
act = width_adjustment(i.width, i.sec - sec)
if not action:
action = act
else:
action += act
sec = i.sec
if action:
line_sprite.do(action)
class Player(cocos.layer.Layer):
def __init__(self):
super().__init__()
if settings.background is not None:
back = cocos.sprite.Sprite(settings.background, (settings.width / 2, settings.height / 2))
back.opacity = settings.opacity
back.scale = settings.height / back.image.height
self.add(back)
for line in chart.lines:
self.add(LineSprite(line))
update_labels()
self.add(combo_label)
self.add(score_label)
self.add(cocos.text.Label(settings.name,
(max(0, (settings.width - settings.size) / 2), 0),
font_name=settings.font_name, font_size=15,
anchor_x='left', anchor_y='bottom'))
self.add(cocos.text.Label(settings.diff,
(min(settings.width, (settings.width + settings.size) / 2), 0),
font_name=settings.font_name, font_size=15,
anchor_x='right', anchor_y='bottom'))
if settings.music is not None:
pyglet.resource.media(settings.music).play()
def preview(
name='test',
diff='SP Lv ?',
music=None,
background=None,
*,
height=600,
width=800,
size=600,
opacity=127,
font_name=['Electrolize'],
):
settings.name = name
settings.diff = diff
settings.music = music
settings.background = background
settings.height = height
settings.width = width
settings.size = size
settings.opacity = opacity
settings.font_name = font_name
data.all_combo = len(chart.notes)
pyglet.resource.path.extend([path.join(path.dirname(__file__), 'resources')])
pyglet.resource.reindex()
pyglet.resource.add_font('Electrolize.ttf')
cocos.director.director.init(width=width, height=height)
main_scene = cocos.scene.Scene(Player())
cocos.director.director.run(main_scene)
| import copy
from os import path, stat
import cocos
import cocos.actions
import cocos.sprite
import cocos.layer
import pyglet
from . import data, settings
from ..chart import Line, BaseNote, Drag, Flick, Hold, chart
__all__ = ['preview']
combo_label = None
score_label = None
def update_labels():
global combo_label, score_label
parent = None
if combo_label is not None:
parent = combo_label.parent
combo_label.kill()
if data.combo > 2:
text = str(data.combo)
else:
text = ''
combo_label = cocos.text.Label(text, (settings.width / 2, settings.height),
font_name=settings.font_name, font_size=20,
anchor_x='center', anchor_y='top')
combo_label.add(cocos.text.Label('AUTOPLAY' if data.combo > 2 else '', (0, -30),
font_name=settings.font_name, font_size=10,
anchor_x='center', anchor_y='top'))
if parent is not None:
parent.add(combo_label)
parent = None
if score_label is not None:
parent = score_label.parent
score_label.kill()
text = str(round(data.score)).zfill(7)
score_label = cocos.text.Label(text,
(min(settings.width, (settings.width + settings.size) / 2), settings.height),
font_name=settings.font_name, font_size=15,
anchor_x='right', anchor_y='top')
if parent is not None:
parent.add(score_label)
def score():
data.combo += 1
data.score += 1000000 / data.all_combo
update_labels()
class NoteExplode(cocos.sprite.Sprite):
def __init__(self, pos):
images=[pyglet.resource.image('Explode-1_1.png'),
pyglet.resource.image('Explode-1_2.png'),
pyglet.resource.image('Explode-1_3.png'),
pyglet.resource.image('Explode-1_4.png'),
pyglet.resource.image('Explode-1_5.png'),
pyglet.resource.image('Explode-1_6.png'),
pyglet.resource.image('Explode-1_7.png'),
pyglet.resource.image('Explode-1_8.png'),
pyglet.resource.image('Explode-1_9.png'),
pyglet.resource.image('Explode-1_10.png'),
pyglet.resource.image('Explode-1_11.png'),
pyglet.resource.image('Explode-1_12.png'),
pyglet.resource.image('Explode-1_13.png'),
pyglet.resource.image('Explode-1_14.png'),
pyglet.resource.image('Explode-1_15.png'),
pyglet.resource.image('Explode-1_16.png'),
pyglet.resource.image('Explode-1_17.png'),
pyglet.resource.image('Explode-1_18.png'),
pyglet.resource.image('Explode-1_19.png'),
pyglet.resource.image('Explode-1_20.png'),
pyglet.resource.image('Explode-1_21.png'),
pyglet.resource.image('Explode-1_22.png'),
pyglet.resource.image('Explode-1_23.png'),
pyglet.resource.image('Explode-1_24.png'),
pyglet.resource.image('Explode-1_25.png'),
pyglet.resource.image('Explode-1_26.png'),
pyglet.resource.image('Explode-1_27.png'),
pyglet.resource.image('Explode-1_28.png'),
pyglet.resource.image('Explode-1_29.png')]
ani = pyglet.image.Animation.from_image_sequence(images, duration=0.01, loop=False)
super().__init__(ani, pos)
self.do(cocos.actions.ScaleBy(1.3, 0.1) + cocos.actions.Delay(0.05) + cocos.actions.FadeOut(0.05) + cocos.actions.CallFuncS(lambda e: e.kill()))
class NoteSprite(cocos.sprite.Sprite):
def __init__(self, note: BaseNote):
def play_sound():
sound = 'click.wav'
if isinstance(note, Drag):
sound = 'drag.wav'
elif isinstance(note, Flick):
sound = 'flick.wav'
return pyglet.resource.media(sound).play()
def p(state: BaseNote.NoteState):
res = copy.copy(state)
res.pos *= settings.size
res.speed *= settings.size
return res
note.states.sort(key=lambda e:e.sec)
for state in note.states:
state.pos *=settings.size
state.speed *=settings.size
states = note.states
dis = 0
img = 'tap.png'
if isinstance(note, Drag):
img = 'drag.png'
elif isinstance(note, Flick):
img = 'flick.png'
elif isinstance(note, Hold):
img = 'hold.png'
sec = note.tap_sec
length = 0
for i in states:
if i.sec <= note.tap_sec:
continue
length += i.speed * (i.sec - sec)
sec = i.sec
length += states[-1].speed * (note.end_sec - sec)
dis += length // 2
sec = note.tap_sec
for i in states[::-1]:
if i.sec > note.tap_sec:
break
note.show_sec = min(
note.show_sec,
sec - (settings.size * 2 - abs(dis) + (length if isinstance(note, Hold) else 0)) / abs(i.speed)
)
dis += (sec - i.sec) * i.speed
sec = i.sec
note.show_sec = min(
note.show_sec,
sec - (settings.size * 2 - abs(dis) + (length if isinstance(note, Hold) else 0)) / abs(states[0].speed)
)
dis += sec * states[0].speed
super().__init__(img, (states[0].pos, dis))
if isinstance(note, Hold):
self.scale_y = length / self.image.height
action = cocos.actions.Hide()
sec = 0
speed = states[0].speed
for i in states:
if i.sec > note.tap_sec:
break
dis -= (i.sec - sec) * speed
act = cocos.actions.MoveTo((i.pos, dis), i.sec - sec)
if sec <= note.show_sec < i.sec:
act |= cocos.actions.Delay(note.show_sec - sec) + cocos.actions.Show()
action += act
sec = i.sec
speed = i.speed
act = cocos.actions.MoveTo((states[-1].pos, length // 2 if isinstance(note, Hold) else 0), note.tap_sec - sec)
if sec <= note.show_sec < note.tap_sec:
act |= cocos.actions.Delay(note.show_sec - sec) + cocos.actions.Show()
action += act
action += cocos.actions.CallFunc(play_sound)
if isinstance(note, Hold):
class Qwq(cocos.actions.IntervalAction):
def init(self, length, duration):
self._length = length
self.duration = duration
def start(self):
self._cur = self.target.scale_y
def update(self, t):
from random import randint
if randint(0, 6) < 3:
self.target.parent.add(NoteExplode((self.target.x, 0)))
self.target.scale_y = (self._cur - self._length) * (1 - t) + self._length
nowlen = length // 2
sec = note.tap_sec
for i in states:
if i.sec <= note.tap_sec:
continue
nowlen -= (i.sec - sec) * i.speed
action += cocos.actions.MoveTo((states[-1].pos, nowlen // 2), i.sec - sec) | \
Qwq(nowlen / self.image.height, i.sec - sec)
sec = i.sec
action += cocos.actions.MoveTo((states[-1].pos, 0), note.end_sec - sec) | \
Qwq(0, note.end_sec - sec)
def explode(e: NoteSprite):
e.kill()
e.parent.add(NoteExplode((e.x, e.y)))
score()
action += cocos.actions.CallFuncS(explode)
self.do(action)
class LineSprite(cocos.sprite.Sprite):
def __init__(self, line: Line):
class width_adjustment(cocos.actions.IntervalAction):
def init(self, width, duration):
self._width = width
self.duration = duration
def start(self):
self._cur = self.target.scale_y
def update(self, t):
self.target.scale_y = (self._cur - self._width) * (1 - t) + self._width
line.states.sort(key=lambda e:e.sec)
for state in line.states:
state.x*=settings.size
state.x += (settings.width - settings.size) / 2
state.y *= settings.size
state.y += (settings.height - settings.size) / 2
states = line.states
super().__init__('line_empty.png', (states[0].x, states[0].y), states[0].angle)
line_sprite = cocos.sprite.Sprite('line.png')
self.add(line_sprite)
for note in line.notes:
self.add(NoteSprite(note))
action = None
pre = states[0]
for i in states[1:]:
act = cocos.actions.MoveTo((i.x, i.y), i.sec - pre.sec) | cocos.actions.RotateBy(i.angle - pre.angle, i.sec - pre.sec)
if i.rev != pre.rev:
act |= cocos.actions.Delay(i.sec - pre.sec) + cocos.actions.FlipY(duration=0.01)
if not action:
action = act
else:
action += act
pre = i
if action:
self.do(action)
action = None
sec = 0
for i in states:
act = width_adjustment(i.width, i.sec - sec)
if not action:
action = act
else:
action += act
sec = i.sec
if action:
line_sprite.do(action)
class Player(cocos.layer.Layer):
def __init__(self):
super().__init__()
if settings.background is not None:
back = cocos.sprite.Sprite(settings.background, (settings.width / 2, settings.height / 2))
back.opacity = settings.opacity
back.scale = settings.height / back.image.height
self.add(back)
for line in chart.lines:
self.add(LineSprite(line))
update_labels()
self.add(combo_label)
self.add(score_label)
self.add(cocos.text.Label(settings.name,
(max(0, (settings.width - settings.size) / 2), 0),
font_name=settings.font_name, font_size=15,
anchor_x='left', anchor_y='bottom'))
self.add(cocos.text.Label(settings.diff,
(min(settings.width, (settings.width + settings.size) / 2), 0),
font_name=settings.font_name, font_size=15,
anchor_x='right', anchor_y='bottom'))
if settings.music is not None:
pyglet.resource.media(settings.music).play()
def preview(
name='test',
diff='SP Lv ?',
music=None,
background=None,
*,
height=600,
width=800,
size=600,
opacity=127,
font_name=['Electrolize'],
):
settings.name = name
settings.diff = diff
settings.music = music
settings.background = background
settings.height = height
settings.width = width
settings.size = size
settings.opacity = opacity
settings.font_name = font_name
data.all_combo = len(chart.notes)
pyglet.resource.path.extend([path.join(path.dirname(__file__), 'resources')])
pyglet.resource.reindex()
pyglet.resource.add_font('Electrolize.ttf')
cocos.director.director.init(width=width, height=height)
main_scene = cocos.scene.Scene(Player())
cocos.director.director.run(main_scene)
| none | 1 | 2.141232 | 2 |
|
milvus/client/types.py | ireneontheway5/pymilvus | 0 | 6630940 | <reponame>ireneontheway5/pymilvus
from enum import IntEnum
class Status:
"""
:attribute code: int (optional) default as ok
:attribute message: str (optional) current status message
"""
SUCCESS = 0
def __init__(self, code=SUCCESS, message="Success"):
self.code = code
self.message = message
def __repr__(self):
attr_list = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(attr_list))
def __eq__(self, other):
"""
Make Status comparable with self by code
"""
if isinstance(other, int):
return self.code == other
return isinstance(other, self.__class__) and self.code == other.code
def __ne__(self, other):
return self != other
def OK(self):
return self.code == Status.SUCCESS
class DataType(IntEnum):
NULL = 0
BOOL = 1
# INT8 = 2
# INT16 = 3
INT32 = 4
INT64 = 5
FLOAT = 10
DOUBLE = 11
# STRING = 20
BINARY_VECTOR = 100
FLOAT_VECTOR = 101
# VECTOR = 200
UNKNOWN = 999
| from enum import IntEnum
class Status:
"""
:attribute code: int (optional) default as ok
:attribute message: str (optional) current status message
"""
SUCCESS = 0
def __init__(self, code=SUCCESS, message="Success"):
self.code = code
self.message = message
def __repr__(self):
attr_list = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(attr_list))
def __eq__(self, other):
"""
Make Status comparable with self by code
"""
if isinstance(other, int):
return self.code == other
return isinstance(other, self.__class__) and self.code == other.code
def __ne__(self, other):
return self != other
def OK(self):
return self.code == Status.SUCCESS
class DataType(IntEnum):
NULL = 0
BOOL = 1
# INT8 = 2
# INT16 = 3
INT32 = 4
INT64 = 5
FLOAT = 10
DOUBLE = 11
# STRING = 20
BINARY_VECTOR = 100
FLOAT_VECTOR = 101
# VECTOR = 200
UNKNOWN = 999 | en | 0.695689 | :attribute code: int (optional) default as ok :attribute message: str (optional) current status message Make Status comparable with self by code # INT8 = 2 # INT16 = 3 # STRING = 20 # VECTOR = 200 | 3.305893 | 3 |
parser/fase2/team05/proyecto/analizadorFase2/Abstractas/Expresion.py | LopDlMa/tytus | 0 | 6630941 | from enum import Enum
class Tipos(Enum):
Numero = 1
Cadena = 2
Booleano = 3
Decimal = 4
Id = 5
ISQL = 6
class Expresion():
def __init__(self):
self.trueLabel = self.falseLabel = ''
| from enum import Enum
class Tipos(Enum):
Numero = 1
Cadena = 2
Booleano = 3
Decimal = 4
Id = 5
ISQL = 6
class Expresion():
def __init__(self):
self.trueLabel = self.falseLabel = ''
| none | 1 | 3.404045 | 3 |
|
agreements/app/database/metadata.py | metagov/agreements-prototype | 5 | 6630942 | import json
import logging
from tinydb.database import Document
from tinydb import where
# container for metadata in the tinydb database
class Metadata:
def __init__(self, db):
self.db = db
self.table = db.table('metadata')
self.logger = logging.getLogger(__name__)
# initializes metadata table in database if it hasn't been created yet
if not self.table.contains(doc_id=1):
self.initialize_database()
def initialize_database(self):
self.logger.info('Database is empty, loading default configuration')
config = json.load(open('app/database/default_config.json', 'r'))
config['last_status_parsed'] = config['genesis_status']
self.table.insert(Document(config, doc_id=1))
# retrieves a value from the metadata dictionary
def retrieve(self, tag):
text = self.table.get(doc_id=1)[tag]
# converts to integer or float as needed
try:
val = int(text)
except ValueError:
try:
val = float(text)
except ValueError:
return
return val
# updates a value in the metadata dictionary
def update(self, tag, value):
self.table.update(
{tag: str(value)},
doc_ids=[1]
) | import json
import logging
from tinydb.database import Document
from tinydb import where
# container for metadata in the tinydb database
class Metadata:
def __init__(self, db):
self.db = db
self.table = db.table('metadata')
self.logger = logging.getLogger(__name__)
# initializes metadata table in database if it hasn't been created yet
if not self.table.contains(doc_id=1):
self.initialize_database()
def initialize_database(self):
self.logger.info('Database is empty, loading default configuration')
config = json.load(open('app/database/default_config.json', 'r'))
config['last_status_parsed'] = config['genesis_status']
self.table.insert(Document(config, doc_id=1))
# retrieves a value from the metadata dictionary
def retrieve(self, tag):
text = self.table.get(doc_id=1)[tag]
# converts to integer or float as needed
try:
val = int(text)
except ValueError:
try:
val = float(text)
except ValueError:
return
return val
# updates a value in the metadata dictionary
def update(self, tag, value):
self.table.update(
{tag: str(value)},
doc_ids=[1]
) | en | 0.64779 | # container for metadata in the tinydb database # initializes metadata table in database if it hasn't been created yet # retrieves a value from the metadata dictionary # converts to integer or float as needed # updates a value in the metadata dictionary | 2.686791 | 3 |
devel/toolkitEditor/mainMenu/menuCallbacks.py | t3kt/raytk | 108 | 6630943 | # noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from .mainMenu import MainMenu
ext.mainMenu = MainMenu(COMP())
"""
TopMenu callbacks
Callbacks always take a single argument, which is a dictionary
of values relevant to the callback. Print this dictionary to see what is
being passed. The keys explain what each item is.
TopMenu info keys:
'widget': the TopMenu widget
'item': the item label in the menu list
'index': either menu index or -1 for none
'indexPath': list of parent menu indexes leading to this item
'define': TopMenu define DAT definition info for this menu item
'menu': the popMenu component inside topMenu
"""
def getMenuItems(info):
items = ext.mainMenu.getMenuItems(**info)
print('getMenuItems produced', items)
return items
def onItemTrigger(info):
ext.mainMenu.onMenuTrigger(**info)
# standard menu callbacks
def onSelect(info):
"""
User selects a menu option
"""
# debug(info)
def onRollover(info):
"""
Mouse rolled over an item
"""
def onOpen(info):
"""
Menu opened
"""
def onClose(info):
"""
Menu closed
"""
def onMouseDown(info):
"""
Item pressed
"""
def onMouseUp(info):
"""
Item released
"""
def onClick(info):
"""
Item pressed and released
"""
def onLostFocus(info):
"""
Menu lost focus
""" | # noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from .mainMenu import MainMenu
ext.mainMenu = MainMenu(COMP())
"""
TopMenu callbacks
Callbacks always take a single argument, which is a dictionary
of values relevant to the callback. Print this dictionary to see what is
being passed. The keys explain what each item is.
TopMenu info keys:
'widget': the TopMenu widget
'item': the item label in the menu list
'index': either menu index or -1 for none
'indexPath': list of parent menu indexes leading to this item
'define': TopMenu define DAT definition info for this menu item
'menu': the popMenu component inside topMenu
"""
def getMenuItems(info):
items = ext.mainMenu.getMenuItems(**info)
print('getMenuItems produced', items)
return items
def onItemTrigger(info):
ext.mainMenu.onMenuTrigger(**info)
# standard menu callbacks
def onSelect(info):
"""
User selects a menu option
"""
# debug(info)
def onRollover(info):
"""
Mouse rolled over an item
"""
def onOpen(info):
"""
Menu opened
"""
def onClose(info):
"""
Menu closed
"""
def onMouseDown(info):
"""
Item pressed
"""
def onMouseUp(info):
"""
Item released
"""
def onClick(info):
"""
Item pressed and released
"""
def onLostFocus(info):
"""
Menu lost focus
""" | en | 0.624582 | # noinspection PyUnreachableCode # noinspection PyUnresolvedReferences TopMenu callbacks Callbacks always take a single argument, which is a dictionary of values relevant to the callback. Print this dictionary to see what is being passed. The keys explain what each item is. TopMenu info keys: 'widget': the TopMenu widget 'item': the item label in the menu list 'index': either menu index or -1 for none 'indexPath': list of parent menu indexes leading to this item 'define': TopMenu define DAT definition info for this menu item 'menu': the popMenu component inside topMenu # standard menu callbacks User selects a menu option # debug(info) Mouse rolled over an item Menu opened Menu closed Item pressed Item released Item pressed and released Menu lost focus | 2.797218 | 3 |
test/test_binance.py | Artek199/przyjazn-pami-semkow- | 13 | 6630944 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# <NAME>
# <EMAIL>
# MIT license
import os
import ema2
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
try:
interv = '1w'
symb='BTCUSDT'
client = Client(os.getenv('BINANCE_APIKEY', 'NOTDEF_APIKEY'), os.getenv('BINANCE_SEKKEY', 'NOTDEF_APIKEY'), {"verify": True, "timeout": 20})
closedPrices = client.get_klines(symbol=symb, interval=interv, limit=1)
time_res = client.get_server_time()
print(closedPrices[0][0])
print(closedPrices)
print(closedPrices[0][6])
print(time_res)
except BinanceAPIException as e:
logging.info(f'Binance API exception: {e.status_code} - {e.message}')
except BinanceRequestException as e:
logging.info(f'Binance request exception: {e.status_code} - {e.message}')
except BinanceWithdrawException as e:
logging.info(f'Binance withdraw exception: {e.status_code} - {e.message}')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# <NAME>
# <EMAIL>
# MIT license
import os
import ema2
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
try:
interv = '1w'
symb='BTCUSDT'
client = Client(os.getenv('BINANCE_APIKEY', 'NOTDEF_APIKEY'), os.getenv('BINANCE_SEKKEY', 'NOTDEF_APIKEY'), {"verify": True, "timeout": 20})
closedPrices = client.get_klines(symbol=symb, interval=interv, limit=1)
time_res = client.get_server_time()
print(closedPrices[0][0])
print(closedPrices)
print(closedPrices[0][6])
print(time_res)
except BinanceAPIException as e:
logging.info(f'Binance API exception: {e.status_code} - {e.message}')
except BinanceRequestException as e:
logging.info(f'Binance request exception: {e.status_code} - {e.message}')
except BinanceWithdrawException as e:
logging.info(f'Binance withdraw exception: {e.status_code} - {e.message}')
| en | 0.195717 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # <NAME> # <EMAIL> # MIT license | 2.377811 | 2 |
Estudos/Python_Exercicios_Curso_Em_Video/ex043.py | wiltonjr4/Python_Language | 0 | 6630945 | <reponame>wiltonjr4/Python_Language<filename>Estudos/Python_Exercicios_Curso_Em_Video/ex043.py
peso = float(input('Qual é o seu peso? (Kg) '))
altura = float(input('Qual é a sua altura? (m)' ))
imc = peso / (altura ** 2)
print('O seu IMC atual é de: {:.1f}'.format(imc))
if imc < 18.5:
print('Você está ABAIXO do peso normal!')
elif imc <= 25:
print('Você está com o peso IDEAL!')
elif imc <= 30:
print('Você está com SOBREPESO!')
elif imc <= 40:
print('Você esta em OBESIDADE!')
else:
print('Você está em OBESIDADE MÓRBIDA! Cuidado!')
| peso = float(input('Qual é o seu peso? (Kg) '))
altura = float(input('Qual é a sua altura? (m)' ))
imc = peso / (altura ** 2)
print('O seu IMC atual é de: {:.1f}'.format(imc))
if imc < 18.5:
print('Você está ABAIXO do peso normal!')
elif imc <= 25:
print('Você está com o peso IDEAL!')
elif imc <= 30:
print('Você está com SOBREPESO!')
elif imc <= 40:
print('Você esta em OBESIDADE!')
else:
print('Você está em OBESIDADE MÓRBIDA! Cuidado!') | none | 1 | 3.803404 | 4 |
|
scripts/build_distributed_model.py | sunlightlabs/fcc-net-neutrality-comments | 18 | 6630946 | <filename>scripts/build_distributed_model.py
# IPython log file
import sys
import os
import logging
from gensim import models, corpora
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),
os.path.pardir))
import settings
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
filename='log/build_distributed_model.log', filemode='a',
level=logging.INFO)
logger = logging.getLogger(__name__)
def do_lsi(num_topics, fname_suffix):
logger.info('reading source corpus and id2word')
tfidf_corpus = corpora.MmCorpus(os.path.join(settings.PERSIST_DIR,
'tfidf_corpus{}.mm'.format(
fname_suffix)))
my_dict = corpora.Dictionary.load(os.path.join(settings.PERSIST_DIR,
'my_dict'))
logger.info('building LSI model')
lsi_model = models.LsiModel(tfidf_corpus, id2word=my_dict,
num_topics=num_topics, distributed=True)
persist_model = os.path.join(settings.PERSIST_DIR,
'lsi_model{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting model in '+persist_model)
lsi_model.save(persist_model)
logger.info('transforming tfidf corpus')
tfidf_corpus_lsi = lsi_model[tfidf_corpus]
persist_corpus = os.path.join(settings.PERSIST_DIR,
'tfidf_corpus_lsi{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting transformed corpus in '+persist_corpus)
corpora.MmCorpus.serialize(persist_corpus, tfidf_corpus_lsi)
logger.info('finished LSI')
def do_lda(num_topics, fname_suffix):
logger.info('reading source corpus and id2word')
corpus = corpora.MmCorpus(os.path.join(settings.PERSIST_DIR,
'corpus{}.mm'.format(
fname_suffix)))
my_dict = corpora.Dictionary.load(os.path.join(settings.PERSIST_DIR,
'my_dict'))
logger.info('building LDA model')
lda_model = models.LdaModel(corpus, id2word=my_dict,
num_topics=num_topics, distributed=True)
persist_model = os.path.join(settings.PERSIST_DIR,
'lda_model{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting model in '+persist_model)
lda_model.save(persist_model)
logger.info('transforming corpus')
corpus_lda = lda_model[corpus]
persist_corpus = os.path.join(settings.PERSIST_DIR,
'corpus_lda{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting transformed corpus in '+persist_corpus)
corpora.MmCorpus.serialize(persist_corpus, corpus_lda)
logger.info('finished LDA')
if __name__ == "__main__":
modeltype = sys.argv[1].lower()
num_topics = int(sys.argv[2])
fname_suffix = sys.argv[3] if len(sys.argv) > 3 else ''
if modeltype == 'lda':
do_lda(num_topics, fname_suffix)
elif modeltype == 'lsi':
do_lsi(num_topics, fname_suffix)
| <filename>scripts/build_distributed_model.py
# IPython log file
import sys
import os
import logging
from gensim import models, corpora
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),
os.path.pardir))
import settings
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
filename='log/build_distributed_model.log', filemode='a',
level=logging.INFO)
logger = logging.getLogger(__name__)
def do_lsi(num_topics, fname_suffix):
logger.info('reading source corpus and id2word')
tfidf_corpus = corpora.MmCorpus(os.path.join(settings.PERSIST_DIR,
'tfidf_corpus{}.mm'.format(
fname_suffix)))
my_dict = corpora.Dictionary.load(os.path.join(settings.PERSIST_DIR,
'my_dict'))
logger.info('building LSI model')
lsi_model = models.LsiModel(tfidf_corpus, id2word=my_dict,
num_topics=num_topics, distributed=True)
persist_model = os.path.join(settings.PERSIST_DIR,
'lsi_model{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting model in '+persist_model)
lsi_model.save(persist_model)
logger.info('transforming tfidf corpus')
tfidf_corpus_lsi = lsi_model[tfidf_corpus]
persist_corpus = os.path.join(settings.PERSIST_DIR,
'tfidf_corpus_lsi{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting transformed corpus in '+persist_corpus)
corpora.MmCorpus.serialize(persist_corpus, tfidf_corpus_lsi)
logger.info('finished LSI')
def do_lda(num_topics, fname_suffix):
logger.info('reading source corpus and id2word')
corpus = corpora.MmCorpus(os.path.join(settings.PERSIST_DIR,
'corpus{}.mm'.format(
fname_suffix)))
my_dict = corpora.Dictionary.load(os.path.join(settings.PERSIST_DIR,
'my_dict'))
logger.info('building LDA model')
lda_model = models.LdaModel(corpus, id2word=my_dict,
num_topics=num_topics, distributed=True)
persist_model = os.path.join(settings.PERSIST_DIR,
'lda_model{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting model in '+persist_model)
lda_model.save(persist_model)
logger.info('transforming corpus')
corpus_lda = lda_model[corpus]
persist_corpus = os.path.join(settings.PERSIST_DIR,
'corpus_lda{s}-{t}'.format(
s=fname_suffix, t=num_topics))
logger.info('persisting transformed corpus in '+persist_corpus)
corpora.MmCorpus.serialize(persist_corpus, corpus_lda)
logger.info('finished LDA')
if __name__ == "__main__":
modeltype = sys.argv[1].lower()
num_topics = int(sys.argv[2])
fname_suffix = sys.argv[3] if len(sys.argv) > 3 else ''
if modeltype == 'lda':
do_lda(num_topics, fname_suffix)
elif modeltype == 'lsi':
do_lsi(num_topics, fname_suffix)
| en | 0.787727 | # IPython log file | 2.404596 | 2 |
Source/fastICA_jit.py | Liwen-ZHANG/fastica_lz | 1 | 6630947 |
import scipy.linalg
import numpy as np
from numba import jit
@jit
def sym_decorrelation_jit(W):
""" Symmetric decorrelation """
K = np.dot(W, W.T)
s, u = np.linalg.eigh(K)
W = (u @ np.diag(1.0/np.sqrt(s)) @ u.T) @ W
return W
def g_logcosh_jit(wx,alpha):
"""derivatives of logcosh"""
return np.tanh(alpha * wx)
def gprime_logcosh_jit(wx,alpha):
"""second derivatives of logcosh"""
return alpha * (1-np.square(np.tanh(alpha*wx)))
# exp
def g_exp_jit(wx,alpha):
"""derivatives of exp"""
return wx * np.exp(-np.square(wx)/2)
def gprime_exp_jit(wx,alpha):
"""second derivatives of exp"""
return (1-np.square(wx)) * np.exp(-np.square(wx)/2)
def fastICA_jit(X, f,alpha=None,n_comp=None,maxit=200, tol=1e-04):
"""FastICA algorithm for several units"""
n,p = X.shape
#check if n_comp is valid
if n_comp is None:
n_comp = min(n,p)
elif n_comp > min(n,p):
print("n_comp is too large")
n_comp = min(n,p)
#centering
#by subtracting the mean of each column of X (array).
X = X - X.mean(axis=0)[None,:]
X = X.T
#whitening
s = np.linalg.svd(X @ (X.T) / n)
D = np.diag(1/np.sqrt(s[1]))
k = D @ (s[0].T)
k = k[:n_comp,:]
X1 = k @ X
# initial random weght vector
w_init = np.random.normal(size=(n_comp, n_comp))
W = sym_decorrelation_jit(w_init)
lim = 1
it = 0
# The FastICA algorithm
while lim > tol and it < maxit :
wx = W @ X1
if f =="logcosh":
gwx = g_logcosh_jit(wx,alpha)
g_wx = gprime_logcosh_jit(wx,alpha)
elif f =="exp":
gwx = g_exp_jit(wx,alpha)
g_wx = gprimeg_exp_jit(wx,alpha)
else:
print("doesn't support this approximation negentropy function")
W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W)
W1 = sym_decorrelation_jit(W1)
it = it +1
lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T))) - 1.0)
W = W1
S = W @ X1
A = scipy.linalg.pinv2(W @ k)
return{'X':X1.T,'A':A.T,'S':S.T} |
import scipy.linalg
import numpy as np
from numba import jit
@jit
def sym_decorrelation_jit(W):
""" Symmetric decorrelation """
K = np.dot(W, W.T)
s, u = np.linalg.eigh(K)
W = (u @ np.diag(1.0/np.sqrt(s)) @ u.T) @ W
return W
def g_logcosh_jit(wx,alpha):
"""derivatives of logcosh"""
return np.tanh(alpha * wx)
def gprime_logcosh_jit(wx,alpha):
"""second derivatives of logcosh"""
return alpha * (1-np.square(np.tanh(alpha*wx)))
# exp
def g_exp_jit(wx,alpha):
"""derivatives of exp"""
return wx * np.exp(-np.square(wx)/2)
def gprime_exp_jit(wx,alpha):
"""second derivatives of exp"""
return (1-np.square(wx)) * np.exp(-np.square(wx)/2)
def fastICA_jit(X, f,alpha=None,n_comp=None,maxit=200, tol=1e-04):
"""FastICA algorithm for several units"""
n,p = X.shape
#check if n_comp is valid
if n_comp is None:
n_comp = min(n,p)
elif n_comp > min(n,p):
print("n_comp is too large")
n_comp = min(n,p)
#centering
#by subtracting the mean of each column of X (array).
X = X - X.mean(axis=0)[None,:]
X = X.T
#whitening
s = np.linalg.svd(X @ (X.T) / n)
D = np.diag(1/np.sqrt(s[1]))
k = D @ (s[0].T)
k = k[:n_comp,:]
X1 = k @ X
# initial random weght vector
w_init = np.random.normal(size=(n_comp, n_comp))
W = sym_decorrelation_jit(w_init)
lim = 1
it = 0
# The FastICA algorithm
while lim > tol and it < maxit :
wx = W @ X1
if f =="logcosh":
gwx = g_logcosh_jit(wx,alpha)
g_wx = gprime_logcosh_jit(wx,alpha)
elif f =="exp":
gwx = g_exp_jit(wx,alpha)
g_wx = gprimeg_exp_jit(wx,alpha)
else:
print("doesn't support this approximation negentropy function")
W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W)
W1 = sym_decorrelation_jit(W1)
it = it +1
lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T))) - 1.0)
W = W1
S = W @ X1
A = scipy.linalg.pinv2(W @ k)
return{'X':X1.T,'A':A.T,'S':S.T} | en | 0.757875 | Symmetric decorrelation derivatives of logcosh second derivatives of logcosh # exp derivatives of exp second derivatives of exp FastICA algorithm for several units #check if n_comp is valid #centering #by subtracting the mean of each column of X (array). #whitening # initial random weght vector # The FastICA algorithm | 2.396611 | 2 |
h2o-py/tests/testdir_jira/pyunit_pubdev_5397_r2_early_stop_NOFEATURE.py | ahmedengu/h2o-3 | 6,098 | 6630948 | <reponame>ahmedengu/h2o-3<filename>h2o-py/tests/testdir_jira/pyunit_pubdev_5397_r2_early_stop_NOFEATURE.py
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
model_runtime = [] # store actual model runtime in seconds
model_maxRuntime = [] # store given maximum runtime restrictions placed on building models for different algos
algo_names =[]
actual_model_runtime = [] # in seconds
model_runtime_overrun = [] # % by which the model runtime exceeds the maximum runtime.
model_within_max_runtime = []
err_bound = 0.5 # fractor by which we allow the model runtime over-run to be
def test_r2_early_stop():
'''
This pyunit test is written to ensure that the max_runtime_secs can restrict the model training time for all
h2o algos. See PUBDEV-4702.
'''
global model_within_max_runtime
global err_bound
seed = 12345
# GBM run
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/multinomial_training1_set.csv"))
y_index = training1_data.ncol-1
x_indices = list(range(y_index))
training1_data[y_index] = training1_data[y_index].round().asfactor()
modelNoEarlyStop = H2OGradientBoostingEstimator(distribution="multinomial", seed=seed)
modelNoEarlyStop.train(x=x_indices, y=y_index, training_frame=training1_data)
numTrees = pyunit_utils.extract_from_twoDimTable(modelNoEarlyStop._model_json["output"]["model_summary"],
"number_of_trees", takeFirst=True)
model = H2OGradientBoostingEstimator(distribution="multinomial", seed=seed, stopping_metric="r2",
stopping_tolerance=0.01, stopping_rounds=5)
model.train(x=x_indices, y=y_index, training_frame=training1_data)
numTreesEarlyStop = pyunit_utils.extract_from_twoDimTable(model._model_json["output"]["model_summary"],
"number_of_trees", takeFirst=True)
print("Number of tress built with early stopping: {0}. Number of trees built without early stopping: {1}".format(numTreesEarlyStop[0], numTrees[0]))
assert numTreesEarlyStop[0] <= numTrees[0], "Early stopping criteria r2 is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(test_r2_early_stop)
else:
test_r2_early_stop() | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
model_runtime = [] # store actual model runtime in seconds
model_maxRuntime = [] # store given maximum runtime restrictions placed on building models for different algos
algo_names =[]
actual_model_runtime = [] # in seconds
model_runtime_overrun = [] # % by which the model runtime exceeds the maximum runtime.
model_within_max_runtime = []
err_bound = 0.5 # fractor by which we allow the model runtime over-run to be
def test_r2_early_stop():
'''
This pyunit test is written to ensure that the max_runtime_secs can restrict the model training time for all
h2o algos. See PUBDEV-4702.
'''
global model_within_max_runtime
global err_bound
seed = 12345
# GBM run
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/multinomial_training1_set.csv"))
y_index = training1_data.ncol-1
x_indices = list(range(y_index))
training1_data[y_index] = training1_data[y_index].round().asfactor()
modelNoEarlyStop = H2OGradientBoostingEstimator(distribution="multinomial", seed=seed)
modelNoEarlyStop.train(x=x_indices, y=y_index, training_frame=training1_data)
numTrees = pyunit_utils.extract_from_twoDimTable(modelNoEarlyStop._model_json["output"]["model_summary"],
"number_of_trees", takeFirst=True)
model = H2OGradientBoostingEstimator(distribution="multinomial", seed=seed, stopping_metric="r2",
stopping_tolerance=0.01, stopping_rounds=5)
model.train(x=x_indices, y=y_index, training_frame=training1_data)
numTreesEarlyStop = pyunit_utils.extract_from_twoDimTable(model._model_json["output"]["model_summary"],
"number_of_trees", takeFirst=True)
print("Number of tress built with early stopping: {0}. Number of trees built without early stopping: {1}".format(numTreesEarlyStop[0], numTrees[0]))
assert numTreesEarlyStop[0] <= numTrees[0], "Early stopping criteria r2 is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(test_r2_early_stop)
else:
test_r2_early_stop() | en | 0.883868 | # store actual model runtime in seconds # store given maximum runtime restrictions placed on building models for different algos # in seconds # % by which the model runtime exceeds the maximum runtime. # fractor by which we allow the model runtime over-run to be This pyunit test is written to ensure that the max_runtime_secs can restrict the model training time for all h2o algos. See PUBDEV-4702. # GBM run | 2.267311 | 2 |
tests/test_cli.py | HenriqueLin/CityWok-ManagementSystem | 0 | 6630949 | <filename>tests/test_cli.py
import os
import pytest
from citywok_ms import db
from citywok_ms.cli import compile, create, drop, init, load_example, update
from flask.cli import shell_command
from flask.testing import FlaskCliRunner
import shutil
def test_dev_load_without_create(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Please create database first.\n"
def test_dev_load_duplicate(app_without_db):
runner = FlaskCliRunner(app_without_db)
db.create_all()
runner.invoke(load_example)
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Database already loaded.\n"
def test_dev_load(app_without_db):
runner = FlaskCliRunner(app_without_db)
db.create_all()
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Loaded example entities.\n"
def test_dev_create(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(create)
assert not result.exception
assert "Created all tables." in result.output
assert "Created admin user." in result.output
@pytest.mark.parametrize("input, output", [("y", "Dropped"), ("n", "Abort")])
def test_dev_drop_yes(app_without_db, input, output):
runner = FlaskCliRunner(app_without_db)
db.create_all()
runner.invoke(load_example)
result = runner.invoke(drop, input=input)
assert output in result.output
def test_shell(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(shell_command)
assert result.exit_code == 0
assert app_without_db.name in result.output
def test_i18n(app_without_db):
path = os.path.join(app_without_db.root_path, "translations/es")
# i18n init
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(init, "es -q")
assert not result.exception
assert os.path.isdir(path)
assert os.path.isfile(os.path.join(path, "LC_MESSAGES/messages.po"))
# i18n update
result = runner.invoke(update, "-q")
assert not result.exception
# i18n compile
result = runner.invoke(compile, "-q")
assert not result.exception
assert os.path.isfile(os.path.join(path, "LC_MESSAGES/messages.mo"))
# clean up
shutil.rmtree(path)
| <filename>tests/test_cli.py
import os
import pytest
from citywok_ms import db
from citywok_ms.cli import compile, create, drop, init, load_example, update
from flask.cli import shell_command
from flask.testing import FlaskCliRunner
import shutil
def test_dev_load_without_create(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Please create database first.\n"
def test_dev_load_duplicate(app_without_db):
runner = FlaskCliRunner(app_without_db)
db.create_all()
runner.invoke(load_example)
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Database already loaded.\n"
def test_dev_load(app_without_db):
runner = FlaskCliRunner(app_without_db)
db.create_all()
result = runner.invoke(load_example)
assert not result.exception
assert result.output == "Loaded example entities.\n"
def test_dev_create(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(create)
assert not result.exception
assert "Created all tables." in result.output
assert "Created admin user." in result.output
@pytest.mark.parametrize("input, output", [("y", "Dropped"), ("n", "Abort")])
def test_dev_drop_yes(app_without_db, input, output):
runner = FlaskCliRunner(app_without_db)
db.create_all()
runner.invoke(load_example)
result = runner.invoke(drop, input=input)
assert output in result.output
def test_shell(app_without_db):
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(shell_command)
assert result.exit_code == 0
assert app_without_db.name in result.output
def test_i18n(app_without_db):
path = os.path.join(app_without_db.root_path, "translations/es")
# i18n init
runner = FlaskCliRunner(app_without_db)
result = runner.invoke(init, "es -q")
assert not result.exception
assert os.path.isdir(path)
assert os.path.isfile(os.path.join(path, "LC_MESSAGES/messages.po"))
# i18n update
result = runner.invoke(update, "-q")
assert not result.exception
# i18n compile
result = runner.invoke(compile, "-q")
assert not result.exception
assert os.path.isfile(os.path.join(path, "LC_MESSAGES/messages.mo"))
# clean up
shutil.rmtree(path)
| es | 0.118445 | # i18n init # i18n update # i18n compile # clean up | 2.295224 | 2 |
netcat.py | wwwins/TreeAdmin | 0 | 6630950 | import socket
def netcat(hostname, port, content):
buf = ''
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(5)
s.connect((hostname, port))
s.sendall(content)
s.shutdown(socket.SHUT_WR)
while 1:
data = s.recv(1024)
if data == "":
break
buf += data
print "Received:",repr(data)
print "connection closed"
s.close()
return buf;
| import socket
def netcat(hostname, port, content):
buf = ''
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(5)
s.connect((hostname, port))
s.sendall(content)
s.shutdown(socket.SHUT_WR)
while 1:
data = s.recv(1024)
if data == "":
break
buf += data
print "Received:",repr(data)
print "connection closed"
s.close()
return buf;
| none | 1 | 2.917754 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.