blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a14567cc685925ddb6144b5bcefab69fcbc2dd61 | 7cf8cc1f944946f0378da2e6af4ba1c89466dfb4 | /dbselectprgrm.py | 36776142444cd630a74ce82002009a999d6df7f8 | [] | no_license | ashilz/pythonnew | 8abd164f757efaefa2216d663db2082c241cf4f5 | 5b57e0f1211a67671999bd3a1cae064318ab1e2f | refs/heads/master | 2022-12-10T21:22:02.597080 | 2020-09-16T06:01:53 | 2020-09-16T06:01:53 | 292,829,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import mysql.connector
db=mysql.connector.connect(
host="localhost",
user="root",
password="Ashil333!",
auth_plugin="mysql_native_password",
database="luminarpython"
)
print(db)
cursor=db.cursor()
try:
query="SELECT * FROM EMPLOYEE"
cursor.execute(query)
result=cursor.fetchall()
for x in result:
print(x)
except Exception as e:
print(e.args)
finally:
db.close
| [
"[email protected]"
] | |
c2676e96a5fa3fb235dce65d0cf781e52fa97b0c | 5ecaded45e28c1041c1986c13db446806a28b3ee | /files/learn-python-files/writing-a-file.py | 809ddbc02cfa52f72acd5c1c5abf38e464e07cac | [] | no_license | 109658067/Python3_Codecademy | 12206ec74e8dc95cc1200491b4ed75b856bfb25e | 8480912c6dd15649b3c51f4c205afdd253ea462b | refs/heads/master | 2022-09-15T18:21:26.742741 | 2020-06-04T05:48:58 | 2020-06-04T05:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | with open('bad_bands.txt', 'w') as bad_bands_doc:
bad_bands_doc.write("Maroon 5")
| [
"[email protected]"
] | |
acfe4f324a502158f5c16d5b7d61048a3e4eac8c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_jimmied.py | ab4838861245bf47f9fd504bf6da78a0bd7b2e15 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._jimmy import _JIMMY
#calss header
class _JIMMIED(_JIMMY, ):
def __init__(self,):
_JIMMY.__init__(self)
self.name = "JIMMIED"
self.specie = 'verbs'
self.basic = "jimmy"
self.jsondata = {}
| [
"[email protected]"
] | |
9d55651d7155c6be5e3d04e0bb02204342ea9cd5 | 49afe5ff0a10354e0c2c91f805f57dd02a24537d | /cashe/s4day118/s4.py | 8c42aac83a3c6c7986e6689b0b09cb758182bd9f | [] | no_license | shaoqianliang/scrapy | d155f103fdda0553981649aa7fa9aa9c9457b9a6 | 9aba8835640ddddd9ab4e1e54b83f6cafaeb8b9e | refs/heads/master | 2020-03-07T05:12:49.933792 | 2018-04-25T07:59:28 | 2018-04-25T07:59:28 | 127,289,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import redis
pool = redis.ConnectionPool(host='192.168.11.81', port=6379)
conn = redis.Redis(connection_pool=pool)
# r.set('foo', '友情并阿斯顿发生地方')
# print(r.get('foo'))
# v = r.getrange('foo',0,3)
# print(v)
# v = r.strlen('foo')
# print(v)
#
# r.lpush('names','alex')
# r.lpush('names','eric')
# r.lpush('names','egon')
# v = r.lindex('names',1)
# print(v)
# aa 0 ba 0 ca 0 da 0 ea 0 fa 0 ga 0
#
conn.zadd('zz', '友情并', -1, '阮国栋', -2,'成汤',-3)
# v = conn.zrange('zz',0,0)
# print(v[0].decode('utf-8'))
# print(v[0])
v = conn.zrank('zz',"成汤")
print(v)
| [
"[email protected]"
] | |
e079a60a0203f845eb514f4beba68b66be5303fa | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/309/103346/submittedfiles/jogoDaVelha_BIB.py | e2a9991dee25881eef4f01f86be054f83d14d35f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,485 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from random import randint
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= (input("Qual o simbolo que você deseja utilizar no jogo? "))
while simbH!="X" and simbH!="O" and simbH!="o" and simbH!="x" :
print ("Ops! Simbolo inválido")
simbH= input("Informe um simbolo válido que deseja utilizar para a partida: X ou O : ")
if simbH=="X" or simbH=="x":
simbH="X"
else:
simbH=="O"
return simbH
#sorteio
def sorteioPrimeiraJogada (simbM, simbH, tabuleiro, nome):
now= datetime.now()
a=now.second
#essa var serve para ajudar a definir de quem será aproxima jogada
pro=0
if a%2==0:
print("Vencedor do sorteio para inicio do jogo: Computador")
prop=1
# chama a função mostraTabuleiro com a jogada do computador
tabuleiro=jogada1computer(tabuleiro, simbM)
mostraTabuleiro(tabuleiro)
else:
print("Vencedor do sorteio para inicio do jogo: Jogador")
prop=2
#chama a função mostraTabuleiro com a jogada do jogador
#tabuleiro=jogadaHumana(nome, simbH, tabuleiro)
return prop
#Função para printar o tabuleiro:
def mostraTabuleiro(tabuleiro):
print (tabuleiro[0][0] +'|'+ tabuleiro[0][1] + '|'+ tabuleiro[0][2])
print (tabuleiro[1][0] +'|'+ tabuleiro[1][1] + '|'+ tabuleiro[1][2])
print (tabuleiro[2][0] +'|'+ tabuleiro[2][1] + '|'+ tabuleiro[2][2])
#Função da jogada do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=[]
casa=input(" Qual a sua jogada, %s ?" %nome)
#tabuleiro[casa//10][casa%10]=simbH
i=int(casa[0])
j=int(casa[2])
while i>2 and j>2 and i<0 and j<0 :
print('Ops! Jogada invalida... ')
casa=int(input(" Qual a sua jogada, %s ?" %nome))
i=int(casa[0])
j=int(casa[2])
validarJogada(nome, simbH, tabuleiro, i, j)
return tabuleiro
#Função para validar uma jogada
def validarJogada(nome, simbH, tabuleiro, i, j):
if tabuleiro[i][j]!="X" and tabuleiro[i][j]!="O" :
tabuleiro[i][j]=simbH
else:
print ("OPS!!! Essa jogada não está disponível. Tente novamente!")
jogadaHumana(nome, simbH, tabuleiro)
while i>2 and j>2:
print('Ops! Jogada invalida... ')
#Função da Jogada do computador
#def jogadaComputador(tabuleiro, simbM):
# if tabuleiro
#Função caso computador inicie o jogo
def jogada1computer(tabuleiro, simbM):
sortL=randint(0, 2)
sortC=randint(0, 2)
while tabuleiro[sortL][sortC] !=" " :
sortL=randint(0, 2)
sortC=randint(0, 2)
tabuleiro[sortL][sortC]=simbM
return tabuleiro
#Função que verifica o vencedor
def VerificaVencedor(tab, simbH, nome):
if tab[0][0]==tab[0][2] and tab[0][0]==tab[0][1] and tab[0][1]==tab[0][2]:
if tab[0][0]==simbH:
x=2
else:
print("Vencedor: Máquina")
x=4
elif tab[1][0]==tab[1][1] and tab[1][1]==tab[1][2] and tab[1][0]==tab[1][2]:
if tab[1][0]==simbH:
x=2
else:
x=4
elif tab[2][0]==tab[2][1] and tab[2][1]==tab[2][2] and tab[2][0]==tab[2][2]:
if tab[2][0]==simbH:
x=2
else:
x=4
elif tab[0][0]==tab[1][0] and tab[2][0]==tab[0][0] and tab[2][0]==tab[1][0]:
if tab[1][0]==simbH:
x=2
else:
x=4
elif tab[0][1]==tab[1][1] and tab[1][1]==tab[2][1] and tab[0][1]==tab[2][1]:
if tab[1][1]==simbH:
x=2
else:
x=4
elif tab[0][2]==tab[1][2] and tab[1][2]==tab[2][2] and tab[0][2]==tab[2][2]:
if tab[2][2]==simbH:
x=2
else:
x=4
elif tab[0][0]==tab[1][1] and tab[1][1]==tab[2][2] and tab[0][0]==tab[2][2]:
if tab[0][0]==simbH:
x=2
else:
x=4
elif tab[0][2]==tab[1][1] and tab[1][1]==tab[2][0] and tab[2][0]==tab[0][2]:
if tab[2][0]==simbH:
x=2
else:
x=4
elif tab[0][0]!=" " and tab[0][1]!=" " and tab[0][2]!=" " and tab[1][0]!=" " and tab[1][1]!=" " and tab[1][2]!=" " and tab[2][0]!=" " and tab[2][1]!=" " and tab[2][2]!=" ":
print ('Deu velha')
x=6
else:
x=1
return x
| [
"[email protected]"
] | |
ffd2dbbfe0e5759bc19b443279803a036bc898b0 | dbdf5d6e9e1e04066bcf13b3e81d00578c6dc25d | /Trakttv.bundle/Contents/Libraries/Shared/shove/caches/memcached.py | f3c2064a367f862001df0ba1a9826389eee781be | [] | no_license | cnstudios/Plex-Trakt-Scrobbler | 59dfd0b1361d5b1d0f638b1a2009cffe0d5da421 | 73557f52bdba172c0b7261454536641d9c65edb8 | refs/heads/master | 2021-01-22T11:04:06.080191 | 2015-03-29T23:58:35 | 2015-03-29T23:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | # -*- coding: utf-8 -*-
'''
"memcached" cache.
The shove URI for a memcache cache is:
memcache://<memcache_server>
'''
try:
import memcache
except ImportError:
raise ImportError("requires 'python-memcached' library")
from shove.base import Base
__all__ = ['MemCache']
class MemCache(Base):
'''Memcached-based cache frontend.'''
def __init__(self, engine, **kw):
super(MemCache, self).__init__(engine, **kw)
if engine.startswith('memcache://'):
engine = engine.split('://')[1]
self._store = memcache.Client(engine.split(';'))
# set timeout
self.timeout = kw.get('timeout', 300)
def __getitem__(self, key):
value = self._store.get(key)
if value is None:
raise KeyError(key)
return self.loads(value)
def __setitem__(self, key, value):
self._store.set(key, self.dumps(value), self.timeout)
def __delitem__(self, key):
self._store.delete(key)
| [
"[email protected]"
] | |
6fa2dd1b962d93710df683eaac29099f951a25c2 | 786232b3c9eac87728cbf2b5c5636d7b6f10f807 | /Leetcode/medium/162.py | b6de70d172ced69a49525fc35b79a96011180de2 | [] | no_license | luoyanhan/Algorithm-and-data-structure | c9ada2e123fae33826975665be37ca625940ddd4 | fb42c3a193f58360f6b6f3b7d5d755cd6e80ad5b | refs/heads/master | 2021-12-22T15:45:28.260386 | 2021-12-02T03:08:35 | 2021-12-02T03:08:35 | 251,007,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | class Solution:
def findPeakElement(self, nums: List[int]) -> int:
length = len(nums)
if length == 1:
return 0
for i in range(length):
if i == 0 and nums[i] > nums[i+1]:
return i
elif i == length-1 and nums[i] > nums[i-1]:
return i
else:
left = nums[i-1]
right = nums[i+1]
if nums[i] > left and nums[i] > right:
return i
| [
"[email protected]"
] | |
36738b0db03c6d09b59fe47a634737de972a1946 | ea622960f82fbc374ff3ac300ef670b56820af4e | /f2b_venv/bin/gunicorn | 6e02cafa859287fd91e63e48b1999cf57e125ead | [] | no_license | Zacharilius/Food2Beer-Django | d1b22e58d5d4c8fab914915428063d66d23958cd | 691f30822cc80b47cb1bf58eb8521bcf19720b98 | refs/heads/master | 2021-01-19T00:44:44.093599 | 2015-04-10T13:59:08 | 2015-04-10T13:59:08 | 32,464,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | #!/home/zacharilius/Documents/GitHub/Food2Beer-Django/f2b_venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
932d9be49faba3e76429f78de93191bcabbaa964 | 6fab071f4b3f3852a3f7fb7f87e7d033d5ea9425 | /4_Demo_Django/2_Django_Test/1_Django_login/APPS/4_Django项目实战.py | ebdf6404cd02fc9bc3b921d1de0831d2ff6784fd | [] | no_license | pythonzhangfeilong/Python_WorkSpace | 5d76026d0553bb85346264fc6375b1fc0a388729 | 646b460c79bedc80010185a240c8cd23342093bc | refs/heads/master | 2020-08-26T09:51:43.763751 | 2020-07-07T07:23:20 | 2020-07-07T07:23:20 | 216,998,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,520 | py | # 1、Django安装:直接采用的是pip install Django
'''
1、如果之前安装的Django版本较低,可以采用pip uninstall Django卸载,再重新安装
2、安装过程中出现问题是,要记得更新pip,更新命令python -m pip install --upgrade pip
3、pip安装时要使用系统的最高管理员权限,否则会出错
'''
# 2、使用pycharm创建项目时,注意在More Settings中加上app的名字,这样的项目直接就有app,不用再次命令行创建
'''
1、创建好Diango文件后,个文件夹的名字含义:
__init__ 空文件
settings 主配置文件
urls 主路由文件
wsgi 网关接口
templates HTML文件编辑目录
manage 项目个管理脚本(也就是使用命令行的时候会用到)
注意:如果在写前端时,一些导入性的文件要自己新建一个static的文件夹,把他们放进去
2、创建APP时在More Settings编写后就不用管了,但是没有编写就亚奥采用下面命令行的模式去创建
创建app的命令行python manage.py startapp login (这样就是创建了一个叫login的文件夹,文件夹中有各个相关联的文件)
'''
# 3、路由编写urls
'''
1、路由是浏览器输入url,在Django服务器响应url的转发中心。
路由都写在urls文件里,它将浏览器输入的url映射到相应的业务处理逻辑也就是视图
2、要在urls中导入APP中的views文件,from APPS import views
3、接下来就是在urlpatterns中写path('index/',views.index) 最重要的是视图后面的函数
'''
# 4、编写视图函数views,路由转发用户请求到视图函数。视图函数处理用户请求,也就是编写业务处理逻辑,一般都在views.py文件里
'''
1、首先要导入一个HTTP模块,也就是from django.shortcuts import HttpResponse
注意:函数的名字要与urls中的名字一样,第一个参数最好是使用request
def index(request):
不能直接返回字符串,必须要由HttpResponse封装起来,才能被HTTP识别到
return HttpResponse('Hello world')
###通过上面的操作将index这个url指向了views里的index()视图函数,它接收用户请求,并返回一个“hello world”字符串。
'''
# 5、运行web服务
'''
1、采用命令行运行是 python manage.py runserver 127.0.0.1:8000
2、在pycharm中直接在右上角有个雷氏播放的绿箭头,运行就行
3、或者点击向下的箭头,在Edit Configurations中编辑运行的内容
4、运行时出现404报错,在url后面拼接执行的文件刷新就好,https://127.0.0.1:8000/index
'''
# 6、返回HTML文件操作
'''
1、首先在templates中创建一个index.html文件
2、然后再在views中导入from django.shortcuts import render
def index(request):
render方法使用数据字典和请求元数据,渲染一个指定的HTML模板,其中多个参数,第一个参数必须是request,第二个参数是HTML
return render(request,'func.html')
3、为了让Django知道HTML文件在哪里,需要在settings中设置
在settings中找到TEMPLATES=[{
'DIRS':[os.path.join(BASE_DIR,'templates')]
}]
'''
# 7、使用静态文件
'''
1、将HTML文件返还给用户了,但是这还不够,前端三大块HTML、CSS、JavaScript,还有各种插件,它们齐全才是一个完整的页面。
在Django中,创建一个static目录,将这些静态文件放在static目录中。
2、为了让Django找到static这个目录,需要在settings中,
找到STATIC_URL='/static/'下面编写 STATIC_URL='/static/'的作用是浏览器访问静态文件时加载的前缀部分,比如https://127.0.0.1:8000/static/login.jpg
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
3、上面的bain写好就可以在template文件夹的index.html文件中引用静态文件了
<script src='/static/js/jquery-3.2.1.min.js'></script>
'''
# 8、接受用户发送的数据
'''
将一个要素齐全的HTML文件返还给了用户浏览器。但这还不够,因为web服务器和用户之间没有动态交互。
下面设计一个表单,让用户输入用户名和密码,提交给index这个url,服务器将接收到这些数据
1、首先修改index.html,修改时注意action是html的名字,不带后缀名
<h1>兄弟,你好</h1>
<form action="/index/" method="post">
{% csrf_token %}
<div>
<p>用户名:<input type="text" name="username" title="请输入用户名"></p>
<p>密码:<input type="text" name="password" title="请输入密码"></p>
<p><button type="submit">提交</button></p>
</div>
</form>
2、修改完html后是不能直接输入信息的,这时需要修改views
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
print(username,password)
return render(request,'func.html')
3、这是刷新页面是会报403错误的,因为Django有一个跨站请求保护机制,要在html的form表单中加入{%csrf_token%}
'''
# 9、返回动态页面,收到了用户的数据,但是返回给用户依然是一个静态页面,通常会根据用户的数据,进行处理后再返回给用户
'''
1、先修改views
# 创建一个空列表
user_list=[]
def index(request):
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
print(username,password)
# 将用户发送过来的数据,构建成一个字典
temp={'user':username,'pwd':password}
# 将字典内容添加到列表中
user_list.append(temp)
# 将用户的数据返回给html
return render(request,'func.html',{'date':user_list})
再修改index.html
<div class="bk">
<h1>用户输入</h1>
<form action="/index/" method="post">
{% csrf_token %}
<div>
<p>用户名:<input type="text" name="username" title="请输入用户名"></p>
<p>密码:<input type="text" name="password" title="请输入密码"></p>
<p><button type="submit">提交</button></p>
</div>
</form>
</div>
<div class="bk">
<h1>用户展示</h1>
<table>
<thead>
<tr>用户名</tr>
<tr>密码</tr>
</thead>
<tbody>
{% for item in date %}
<tr>
<td>{{ item.user }}</td>
<td>{{ item.pwd }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
'''
# 10、 数据库的使用,使用数据库的需求是毫无疑问的,Django通过自带的ORM框架操作数据库,并且原生支持轻量级的sqlite3数据库。
'''
1、在使用个数据库的时候,首先要在settings中的INSTALLED_APPS添加自己的app名字,不注册它,数据库就不知道给那个app创建数据库
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'APPS.apps.AppConfig',
]
2、在settings中的DATABASES配置,默认的是sqlite3
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_database',
'USER':'root',
'PASSWORD':'',
'HOST':'127.0.0.1',
'PORT':'3306',
}
}
3、再编辑models
#首先要继承这个类是固定的写法
class UserInfo(models.Model):
user=models.CharField(max_length=32)
pwd=models.CharField(max_length=32)
# 创建上面的俩个字段,用来保存用户名和密码
1、在pycharm的Terminal中输入python manage.py makemigrations,
会在login目录中的migrations目录中生成一个0001_initial.py迁移记录文件。
2、再输入python manage.py migrate,执行成功就会创建好数据库表
4、再编辑views
# 将提交的用户名和密码保存在数据库中
from APPS import models
def index(request):
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
# 将数据保存在数据库中
models.UserInfo.objects.create(user=username,pwd=password)
# 从数据库中读取所有的数据
user_list=models.UserInfo.objects.all()
return render(request,'func.html',{'date':user_list})
'''
| [
"[email protected]"
] | |
d06cf95418eb5d2b103212483966c9976382668e | 70f684acbafbcc009a618ffd1dda78ebecca5d28 | /appendixA/countwordsinfeatures/parsefeaturejsons.py | 4ca481a0bf02eac54f578a1fd1935aea1cacb109 | [
"MIT"
] | permissive | ericayhayes/horizon | d026ee90f356445486a148c7c5798d18b7494163 | 15558246778efd2e5ed915635f3c9e113229bf20 | refs/heads/master | 2020-07-09T04:17:50.540176 | 2019-02-28T21:08:23 | 2019-02-28T21:08:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,956 | py | #!/usr/bin/env python3
# parsefeaturejsons.py
# classes and functions that can unpack the extracted feature files
# created by HTRC, and convert them into a .csv that is easier to
# manipulate
import csv, os, sys, bz2, random, json
from collections import Counter
import numpy as np
import pandas as pd
# import utils
currentdir = os.path.dirname(__file__)
libpath = os.path.join(currentdir, '../lib')
sys.path.append(libpath)
import SonicScrewdriver as utils
abspath = os.path.abspath(__file__)
thisdirectory = os.path.dirname(abspath)
namepath = os.path.join(thisdirectory, 'PersonalNames.txt')
placepath = os.path.join(thisdirectory, 'PlaceNames.txt')
romanpath = os.path.join(thisdirectory, 'RomanNumerals.txt')
with open(namepath, encoding = 'utf-8') as f:
personalnames = set([x.strip().lower() for x in f.readlines()])
with open(placepath, encoding = 'utf-8') as f:
placenames = set([x.strip().lower() for x in f.readlines()])
with open(romanpath, encoding = 'utf-8') as f:
romannumerals = set([x.strip().lower() for x in f.readlines()])
daysoftheweek = {'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'}
monthsoftheyear = {'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'}
# This is a little bit of a cheat, because it means we're not inferring everything
# empirically from evidence, but this is the product of a lot of previous experience,
# and hard-coding it here makes it possible to do a nice normalization at the page
# level.
ficwords = {'me', 'my', 'i', 'you', 'your', 'she', 'her', 'hers', 'he', 'him', 'his', 'the', 'said'}
def normalize_token(token):
''' Normalizes a token by lowercasing it and by bundling
certain categories together. The lists of personal and place names
are never going to be all-inclusive; you have to be aware of that,
and deactivate this in corpora where it could pose a problem.
'''
global personalnames, placenames, daysoftheweek, monthsoftheyear
token = token.lower()
if len(token) < 1:
return token
elif token[0].isdigit() and token[-1].isdigit():
return "#arabicnumeral"
elif token in daysoftheweek:
return "#dayoftheweek"
elif token in monthsoftheyear:
return "#monthoftheyear"
elif token in personalnames:
return "#personalname"
elif token in placenames:
return "#placename"
else:
return token
def normalize_token_for_page(token):
''' Normalizes a token by lowercasing it and by bundling
certain categories together. Differs from the previous
function in adding roman numerals.
'''
global personalnames, placenames, daysoftheweek, monthsoftheyear, romannumerals
if token == "I":
return token.lower()
# uppercase I is not usually a roman numeral!
token = token.lower()
if len(token) < 1:
return token
elif token[0].isdigit() and token[-1].isdigit():
return "#arabicnumeral"
elif token in daysoftheweek:
return "#dayoftheweek"
elif token in monthsoftheyear:
return "#monthoftheyear"
elif token in personalnames:
return "#personalname"
elif token in placenames:
return "#placename"
elif token in romannumerals:
return "#romannumeral"
else:
return token
class VolumeFromJson:
# Mainly a data object that contains page-level wordcounts
# for a volume.
# Has been expanded in Jan 2017 by adding the default argument
# pagestoinclude
def __init__(self, volumepath, volumeid, pagestoinclude = set()):
'''Initializes a LoadedVolume by reading wordcounts from
a json file. By default it reads all the pages. But if
a set of pagestoinclude is passed in, it will read only page numbers
belonging to that set.'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
self.volumeid = thejson['id']
pagedata = thejson['features']['pages']
self.numpages = len(pagedata)
self.pagecounts = []
self.totalcounts = Counter()
self.totaltokens = 0
self.bodytokens = 0
self.sentencecount = 0
self.linecount = 0
typetokenratios = []
chunktokens = 0
typesinthischunk = set()
# a set of types in the current 10k-word chunk; progress
# toward which is tracked by chunktokens
self.integerless_pages = 0
self.out_of_order_pages = 0
self.skipped_pages = 0
compromise_pg = 0
if len(pagestoinclude) < 1:
pagestoinclude = set([x+1 for x in range(self.numpages)])
# If an empty set was passed in, or no set was provided,
# include all pages. the x+1 is because pages start counting
# at one, not zero.
for i in range(self.numpages):
thispagecounts = Counter()
thisbodytokens = 0
thisheadertokens = 0
thispage = pagedata[i]
# There are really two ways of numbering pages. They come in an order,
# which gives them an inherent ordinality (this is the *first* page). But
# they also have cardinal *labels* attached, in the "seq" field. These labels
# are usually, but not necessarily, convertible to integers. (Usually "00000001",
# but could be "notes.") *Usually* they are == to the ordinal number,
# but again, not necessarily.
# In this loop, i is the ordinal page number, and cardinal_page is the cardinal
# label; its value will be -1 if it can't be converted to an integer.
# compromise_pg skips pages that have no integer seq, but otherwise
# proceeds ordinally
try:
cardinal_page = int(thispage['seq'])
except:
cardinal_page = -1
if cardinal_page > 0:
compromise_pg += 1
elif cardinal_page < 0:
self.integerless_pages += 1
if compromise_pg != cardinal_page:
self.out_of_order_pages += 1
if cardinal_page >= 0 and compromise_pg in pagestoinclude:
linesonpage = int(thispage['lineCount'])
sentencesonpage = int(thispage['body']['sentenceCount'])
self.sentencecount += sentencesonpage
self.linecount += linesonpage
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
for token, partsofspeech in bodywords.items():
lowertoken = token.lower()
typesinthischunk.add(lowertoken)
# we do that to keep track of types -- notably, before nortmalizing
normaltoken = normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisbodytokens += count
chunktokens += count
thispagecounts[normaltoken] += count
if chunktokens > 10000:
typetoken = len(typesinthischunk) / chunktokens
typetokenratios.append(typetoken)
typesinthischunk = set()
chunktokens = 0
# generally speaking we count typetoken ratios on 10000-word chunks
headerwords = thispage['header']['tokenPosCount']
for token, partsofspeech in headerwords.items():
lowertoken = token.lower()
normaltoken = "#header" + normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisheadertokens += count
thispagecounts[normaltoken] += count
# You will notice that I treat footers (mostly) as part of the body
# Footers are rare, and rarely interesting.
footerwords = thispage['footer']['tokenPosCount']
for token, partsofspeech in footerwords.items():
lowertoken = token.lower()
typesinthischunk.add(lowertoken)
# we do that to keep track of types -- notably before nortmalizing
normaltoken = normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisbodytokens += count
chunktokens += count
thispagecounts[normaltoken] += count
self.pagecounts.append(thispagecounts)
for key, value in thispagecounts.items():
self.totalcounts[key] += value
self.totaltokens += thisbodytokens
self.totaltokens += thisheadertokens
self.bodytokens += thisbodytokens
else:
# print(i, cardinal_page, compromise_pg)
self.skipped_pages += 1
if len(typetokenratios) < 1 or chunktokens > 5000:
# After all pages are counted, we may be left with a
# chunk of fewer than 10000 words that we could use as further
# evidence about typetoken ratios.
# We do this only if we have to, or if the chunk is large
# enough to make it reasonable evidence.
chunktokens = chunktokens + 1 # Laplacian correction aka kludge
typetoken = len(typesinthischunk) / chunktokens
predictedtt = 4.549e-01 - (5.294e-05 * chunktokens) + (2.987e-09 * pow(chunktokens, 2))
# That's an empirical quadratic regression on observed data from many genres
extrapolatedtt = 0.2242 * (typetoken / predictedtt)
# We infer what typetoken *would* be for a 10k word chunk of this vol, given that it's
# typetoken for an n-word chunk.
if extrapolatedtt > 0.6:
extrapolatedtt = 0.6
if extrapolatedtt < 0.1:
extrapolatedtt = 0.1
# Let's be realistic. We have some priors on the bounds.
typetokenratios.append(extrapolatedtt)
self.typetoken = sum(typetokenratios) / len(typetokenratios)
self.sentencelength = self.bodytokens / (self.sentencecount + 1)
self.linelength = self.totaltokens / self.linecount
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def write_volume_features(self, outpath, override = False, translator = dict()):
''' This writes volume features while normalizing word frequencies,
after using a translation table to, for instance, convert American spellings
to British.
'''
if os.path.isfile(outpath) and not override:
print('Error: you are asking me to override an existing')
print('file without explicitly specifying to do so in your')
print('invocation of write_volume_features.')
for word, equivalent in translator.items():
if word in self.totalcounts:
self.totalcounts[equivalent] += self.totalcounts.pop(word)
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['feature', 'count'])
for key, value in self.totalcounts.items():
if value > 0:
writer.writerow([key, value / self.totaltokens])
writer.writerow(['#sentencelength', self.sentencelength])
writer.writerow(['#typetoken', self.typetoken])
writer.writerow(['#linelength', self.linelength])
def get_raw_body_features(self):
'''
Return features sans normalization.
'''
outdict = Counter()
for key, value in self.totalcounts.items():
if not key.startswith('#header'):
outdict[key] = value
outdict['#sentencelength'] = self.sentencelength
outdict['#typetoken'] = self.typetoken
outdict['#linelength'] = self.linelength
return outdict, self.bodytokens
def get_volume_features(self):
'''
Just like write_volume_features, except we return them
as a dictionary.
'''
outdict = Counter()
if self.totaltokens < 1:
return outdict, 0
else:
for key, value in self.totalcounts.items():
outdict[key] = value / self.totaltokens
outdict['#sentencelength'] = self.sentencelength
outdict['#typetoken'] = self.typetoken
outdict['#linelength'] = self.linelength
return outdict, self.totaltokens
def append_volume_features(self, outpath):
''' This is probably the way to do it. Initialize the file with
a header, and then add a bunch of volumes to the same file,
incorporating a column that distinguishes them by docid.
'''
with open(outpath, mode = 'a', encoding = 'utf-8') as f:
writer = csv.writer(f)
for key, value in self.totalcounts.items():
writer.writerow([self.volumeid, key, value / self.totaltokens])
writer.writerow([self.volumeid, '#sentencelength', self.sentencelength])
writer.writerow([self.volumeid, '#typetoken', self.typetoken])
writer.writerow([self.volumeid, '#linelength', self.linelength])
def log_tokens_for_page(pagejson, pagedict, typesonpage, ficcount, headerflag):
'''
Takes data from the pagejson and logs it appropriately in pagedict
and typesonpage.
'''
global ficwords
for token, partsofspeech in pagejson.items():
if token.istitle():
titleflag = True
else:
titleflag = False
if token.isupper():
upperflag = True
else:
upperflag = False
lowertoken = token.lower()
typesonpage.add(lowertoken)
# we do that to keep track of types -- notably, before normalizing
normaltoken = normalize_token_for_page(token)
# normalizing also lowercases the token, but we don't
# want to *send in* a lowercased token
for part, count in partsofspeech.items():
if headerflag:
pagedict['headertokens'] += count
else:
pagedict['bodytokens'] += count
if upperflag:
pagedict['uppercase'] += count
if titleflag:
pagedict['titlecase'] += count
if lowertoken in ficwords:
ficcount += count
pagedict['tokens'][normaltoken] += count
return ficcount
class PagelistFromJson:
# A data object that contains page-level wordcounts
# for a volume,
def __init__(self, volumepath, volumeid):
'''initializes a LoadedVolume by reading wordcounts from
a json file'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
assert thejson['id'] == volumeid
# I require volumeid to be explicitly passed in,
# although I could infer it, because I don't want
#any surprises.
self.volumeid = thejson['id']
pagejsons = thejson['features']['pages']
self.numpages = len(pagejsons)
self.pages = []
self.features = []
# in this data structure, a volume is a list of pages
for i in range(self.numpages):
pagedata = dict()
# each page is a dictionary that contains categories of
# features, most obviously wordcounts:
pagedata['tokens'] = Counter()
pagedata['bodytokens'] = 0
pagedata['titlecase'] = 0
pagedata['uppercase'] = 0
pagedata['headertokens'] = 0
self.pages.append(pagedata)
for i in range(self.numpages):
pagedata = self.pages[i]
thispage = pagejsons[i]
typesonpage = set()
ficcount = 0
pagedata['lines'] = int(thispage['lineCount'])
pagedata['sentences'] = int(thispage['body']['sentenceCount'])
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
ficcount = log_tokens_for_page(bodywords, pagedata, typesonpage, ficcount, headerflag = False)
headerwords = thispage['header']['tokenPosCount']
ficcount = log_tokens_for_page(headerwords, pagedata, typesonpage, ficcount, headerflag = True)
footerwords = thispage['footer']['tokenPosCount']
ficcount = log_tokens_for_page(footerwords, pagedata, typesonpage, ficcount, headerflag = True)
pagefeatures = dict()
# We don't directly return token counts, but normalize them
# in various ways
totaltokens = pagedata['bodytokens'] + pagedata['headertokens']
if totaltokens > 0:
for key, value in pagedata['tokens'].items():
pagefeatures[key] = value / totaltokens
pagefeatures['#totaltokens'] = totaltokens
if totaltokens > 0:
pagefeatures['#typetoken'] = len(typesonpage) / totaltokens
else:
pagefeatures['#typetoken'] = 1
pagefeatures['#absfromedge'] = min(i, self.numpages - i)
pagefeatures['#pctfromedge'] = pagefeatures['#absfromedge'] / self.numpages
pagefeatures['#absupper'] = pagedata['uppercase']
if totaltokens > 0:
pagefeatures['#pctupper'] = pagedata['uppercase'] / totaltokens
else:
pagefeatures['#pctupper'] = 0.5
pagefeatures['#abstitle'] = pagedata['titlecase']
if totaltokens > 0:
pagefeatures['#pcttitle'] = pagedata['titlecase'] / totaltokens
else:
pagefeatures['#pcttitle'] = 0.5
if pagedata['lines'] > 0:
pagefeatures['#linelength'] = totaltokens / pagedata['lines']
else:
pagefeatures['#linelength'] = 10
if totaltokens > 0:
pagefeatures['#ficpct'] = ficcount / totaltokens
else:
pagefeatures['#ficpct'] = 0
self.features.append(pagefeatures)
# Some features also get recorded as Z values normalized by the mean and
# standard deviation for this volume.
tonormalize = ['#typetoken', '#pcttitle', '#linelength', '#totaltokens', '#ficpct']
for feature in tonormalize:
values = np.zeros(self.numpages)
for i in range(self.numpages):
pagefeatures = self.features[i]
values[i] = (pagefeatures[feature])
meanval = np.mean(values)
stdval = np.std(values) + .0001
normalizedfeature = feature + 'normed'
for i in range(self.numpages):
self.features[i][normalizedfeature] = (self.features[i][feature] - meanval) / stdval
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def get_feature_list(self):
'''
Returns a list where each page is represented as a dictionary of features.
Features should already be normalized in all the ways we're going to
normalize them.
'''
return self.features
class LiteralVolumeFromJson:
# Mainly a data object that contains page-level wordcounts
# for a volume.
def __init__(self, volumepath, volumeid):
'''initializes a LoadedVolume by reading wordcounts from
a json file'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
assert thejson['id'] == volumeid
# I require volumeid to be explicitly passed in,
# although I could infer it, because I don't want
#any surprises.
self.volumeid = thejson['id']
pagedata = thejson['features']['pages']
self.numpages = len(pagedata)
self.pagecounts = []
self.totalcounts = Counter()
self.totaltokens = 0
for i in range(self.numpages):
thispagecounts = Counter()
thisbodytokens = 0
thisheadertokens = 0
thispage = pagedata[i]
linesonpage = int(thispage['lineCount'])
sentencesonpage = int(thispage['body']['sentenceCount'])
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
for normaltoken, partsofspeech in bodywords.items():
for part, count in partsofspeech.items():
thisbodytokens += count
thispagecounts[normaltoken] += count
self.pagecounts.append(thispagecounts)
for key, value in thispagecounts.items():
self.totalcounts[key] += value
self.totaltokens += thisbodytokens
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def write_volume_features(self, outpath, override = False):
if os.path.isfile(outpath) and not override:
print('Error: you are asking me to override an existing')
print('file without explicitly specifying to do so in your')
print('invocation of write_volume_features.')
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['feature', 'count'])
for key, value in self.totalcounts.items():
writer.writerow([key, value / self.totaltokens])
writer.writerow(['#sentencelength', self.sentencelength])
writer.writerow(['#typetoken', self.typetoken])
writer.writerow(['#linelength', self.linelength])
def get_volume_features(self):
'''
Just like write_volume_features, except we return them
as a dictionary.
'''
if self.totaltokens < 1:
return Counter(), 0
else:
return self.totalcounts, self.totaltokens
if __name__ == "__main__":
meta = pd.read_csv('/Users/tunder/Dropbox/python/train20/bzipmeta.csv', dtype = 'object', index_col = 'docid')
for index, row in meta.iterrows():
inpath = row['filepath']
vol = VolumeFromJson(inpath, index)
outpath = '/Volumes/TARDIS/work/train20/' + utils.clean_pairtree(index) + '.csv'
vol.write_volume_features(outpath, override = True)
| [
"[email protected]"
] | |
4455e95b78174d01b70882d3db82c199191ef89c | f1be5da6283270803c3f0cbb80f7d11ff9260655 | /scripts/vectorize.py | cddbfbce4c15edf2d095fa082351dc434767d35e | [
"Apache-2.0"
] | permissive | mogproject/tutte-polyn | 1a1cf371dd434991a41c73ab4e9a3936d9b93d5c | 991ce12619f86484ffac8a57186b5eea22d01f0a | refs/heads/master | 2022-07-12T05:00:35.493640 | 2020-05-12T07:25:07 | 2020-05-12T07:25:07 | 247,176,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | #!/usr/bin/env python3
"""
Converts output from the tuttepoly program into coefficient vectors for each graph.
"""
__author__ = 'Yosuke Mizutani'
__version__ = '0.0.1'
__license__ = 'Apache License, Version 2.0'
# imports standard libraries
import sys
import argparse
def get_parser():
"""Argument parser."""
parser = argparse.ArgumentParser(description='<program description>')
parser.add_argument('-n', type=int, required=True, help='number of vertices')
parser.add_argument('path', help='input file path')
return parser
def parse_tp_line(line):
assert(line[:3] == 'TP[')
tokens = line.split(':=')
gid = int(tokens[0][3:-2])
terms = tokens[1].rstrip(':\n').split('+')
elems = [term.strip().split('*') for term in terms]
ret = []
for elem in elems:
dx, dy = 0, 0
for e in elem[1:]:
if e[0] == 'x':
dx = int(e[2:]) if e[1:2] == '^' else 1
elif e[0] == 'y':
dy = int(e[2:]) if e[1:2] == '^' else 1
ret += [(int(elem[0]), dx, dy)]
return gid, ret
def parse_graph_line(line):
assert(line[:2] == 'G[')
tokens = line.split(':=')
gid = int(tokens[0][2:-2])
edges = tokens[1].strip().rstrip('\}').lstrip('\{').split(',')
ret = []
for edge in edges:
vs = edge.split('--')
ret += [(int(vs[0]), int(vs[1]))]
return gid, ret
def main(args):
"""Entry point of the program. """
nx = args.n # max degree of x: n - 1
ny = 1 + (args.n - 1) * (args.n - 2) // 2 # max degree of y: n - 1 choose 2
with open(args.path) as f:
for line in f:
if line[0] == 'T':
parsed = parse_tp_line(line)
vec = [0 for i in range(nx * ny)]
for c, dx, dy in parsed[1]:
assert(dx < nx)
assert(dy < ny)
vec[dy * nx + dx] = c
print('%d: %s' % (parsed[0], ' '.join(map(str, vec))))
if __name__ == '__main__':
main(get_parser().parse_args())
| [
"[email protected]"
] | |
633868dbf7071b0ab8e9a8d69163295e0e39e2f9 | 146cd740649b87032cbbfb97cde6ae486f76230b | /venv/lib/python3.6/site-packages/matplotlib/backends/__init__.py | 06cad2d1ad10fdbfb2c19ff6f42036aef101cece | [] | no_license | shellyhuang18/plank-filter-master | 8b7024c46334062496f05d31eefc618ebae50b4e | 8993a5b00f45841c3385fe997857bfdd10b71a84 | refs/heads/master | 2020-03-30T18:14:45.017957 | 2018-12-27T20:51:25 | 2018-12-27T20:51:25 | 151,490,556 | 0 | 1 | null | 2018-12-19T22:42:26 | 2018-10-03T22:50:58 | Python | UTF-8 | Python | false | false | 3,720 | py | import importlib
import logging
import os
import sys
import traceback
import matplotlib
from matplotlib import cbook
from matplotlib.backend_bases import _Backend
_log = logging.getLogger(__name__)
# NOTE: plt.switch_backend() (called at import time) will add a "backend"
# attribute here for backcompat.
def _get_running_interactive_framework():
"""
Return the interactive framework whose event loop is currently running, if
any, or "headless" if no event loop can be started, or None.
Returns
-------
Optional[str]
One of the following values: "qt5", "qt4", "gtk3", "wx", "tk",
"macosx", "headless", ``None``.
"""
QtWidgets = (sys.modules.get("PyQt5.QtWidgets")
or sys.modules.get("PySide2.QtWidgets"))
if QtWidgets and QtWidgets.QApplication.instance():
return "qt5"
QtGui = (sys.modules.get("PyQt4.QtGui")
or sys.modules.get("PySide.QtGui"))
if QtGui and QtGui.QApplication.instance():
return "qt4"
Gtk = (sys.modules.get("gi.repository.Gtk")
or sys.modules.get("pgi.repository.Gtk"))
if Gtk and Gtk.main_level():
return "gtk3"
wx = sys.modules.get("wx")
if wx and wx.GetApp():
return "wx"
tkinter = sys.modules.get("tkinter")
if tkinter:
for frame in sys._current_frames().values():
while frame:
if frame.f_code == tkinter.mainloop.__code__:
return "tk"
frame = frame.f_back
try:
from matplotlib.backends import _macosx
except ImportError:
pass
else:
if _macosx.event_loop_is_running():
return "macosx"
if sys.platform.startswith("linux") and not os.environ.get("DISPLAY"):
return "headless"
return None
@cbook.deprecated("3.0")
def pylab_setup(name=None):
"""
Return new_figure_manager, draw_if_interactive and show for pyplot.
This provides the backend-specific functions that are used by pyplot to
abstract away the difference between backends.
Parameters
----------
name : str, optional
The name of the backend to use. If `None`, falls back to
``matplotlib.get_backend()`` (which return :rc:`backend`).
Returns
-------
backend_mod : module
The module which contains the backend of choice
new_figure_manager : function
Create a new figure manager (roughly maps to GUI window)
draw_if_interactive : function
Redraw the current figure if pyplot is interactive
show : function
Show (and possibly block) any unshown figures.
"""
# Import the requested backend into a generic module object.
if name is None:
name = matplotlib.get_backend()
backend_name = (name[9:] if name.startswith("module://")
else "matplotlib.backends.backend_{}".format(name.lower()))
backend_mod = importlib.import_module(backend_name)
# Create a local Backend class whose body corresponds to the contents of
# the backend module. This allows the Backend class to fill in the missing
# methods through inheritance.
Backend = type("Backend", (_Backend,), vars(backend_mod))
# Need to keep a global reference to the backend for compatibility reasons.
# See https://github.com/matplotlib/matplotlib/issues/6092
global backend
backend = name
_log.debug('backend %s version %s', name, Backend.backend_version)
return (backend_mod,
Backend.new_figure_manager,
Backend.draw_if_interactive,
Backend.show)
| [
"[email protected]"
] | |
f5fcee0e713532c5d2bae4ea721d31bb5f801dea | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /next_child_and_first_case/bad_way/group/right_problem.py | 33aff9e2e1183bf56ce9cb9ad48a6b8731a0d934 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
#! /usr/bin/env python
def able_government(str_arg):
day(str_arg)
print('eye')
def day(str_arg):
print(str_arg)
if __name__ == '__main__':
able_government('right_case')
| [
"[email protected]"
] | |
5fd7b4bec315d38a946f9058525294c069c76165 | 8fc9520d7224e6179f63f19e668b4b3b6a7d76c5 | /apps/basket/migrations/0003_auto__add_field_line_price_excl_tax.py | b89b7e147321a346aca2d07aaea5281be153f402 | [] | no_license | quantm/custom_django_oscar | 352ef2fd95e7da932958d4aa80d77dff5b6c1e70 | 9205807030ab360884283810e94177440c228a23 | refs/heads/master | 2016-08-09T12:23:39.413677 | 2016-02-08T22:16:53 | 2016-02-08T22:16:53 | 51,326,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,084 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Line.price_excl_tax'
db.add_column('basket_line', 'price_excl_tax', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2), keep_default=False)
def backwards(self, orm):
# Deleting field 'Line.price_excl_tax'
db.delete_column('basket_line', 'price_excl_tax')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'basket.basket': {
'Meta': {'object_name': 'Basket'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_merged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'baskets'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'vouchers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['voucher.Voucher']", 'null': 'True', 'symmetrical': 'False'})
},
'basket.line': {
'Meta': {'unique_together': "(('basket', 'line_reference'),)", 'object_name': 'Line'},
'basket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['basket.Basket']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_reference': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'basket_lines'", 'to': "orm['catalogue.Product']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'basket.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['basket.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Option']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'voucher.voucher': {
'Meta': {'object_name': 'Voucher'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'num_basket_additions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'vouchers'", 'symmetrical': 'False', 'to': "orm['offer.ConditionalOffer']"}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'usage': ('django.db.models.fields.CharField', [], {'default': "'Multi-use'", 'max_length': '128'})
}
}
complete_apps = ['basket']
| [
"[email protected]"
] | |
e2e86cef61265cda1718cdef84254ca06d3b8814 | 56fe9508764866a9bab08d1e13bc34777fb50e1b | /tests/mock_commands/nextpnr-gowin | f967efea61107e7fd199a3213e20b3395417e115 | [
"BSD-2-Clause"
] | permissive | GregAC/edalize | 402970d32a708fb2268cc8c03c88527e958ebc7a | f4b3cc5bccf0c10375a4dcd101273423de470a08 | refs/heads/master | 2021-09-08T06:06:10.281868 | 2021-07-07T11:39:27 | 2021-07-28T22:05:51 | 247,763,731 | 0 | 0 | BSD-2-Clause | 2020-03-16T16:27:06 | 2020-03-16T16:27:05 | null | UTF-8 | Python | false | false | 245 | #!/usr/bin/env python3
import os
import sys
output_file = sys.argv[sys.argv.index('--write')+1]
with open(output_file, 'a'):
os.utime(output_file, None)
with open('nextpnr-gowin.cmd', 'w') as f:
f.write(' '.join(sys.argv[1:]) + '\n')
| [
"[email protected]"
] | ||
a1d224b156b32482685c38df145d7ec196174f7f | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/mojo/public/tools/manifest/manifest_collator.py | 9a6d0e9b01049681a0f8e0c309faa288aeea1b23 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 2,909 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import os
import shutil
import sys
import urlparse
eater_relative = '../../../../../tools/json_comment_eater'
eater_relative = os.path.join(os.path.abspath(__file__), eater_relative)
sys.path.insert(0, os.path.normpath(eater_relative))
try:
import json_comment_eater
finally:
sys.path.pop(0)
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.loads(json_comment_eater.Nom(json_file.read()))
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def MergeDicts(left, right):
for k, v in right.iteritems():
if k not in left:
left[k] = v
else:
if isinstance(v, dict):
assert isinstance(left[k], dict)
MergeDicts(left[k], v)
elif isinstance(v, list):
assert isinstance(left[k], list)
left[k].extend(v)
else:
raise "Refusing to merge conflicting non-collection values."
return left
def MergeBaseManifest(parent, base):
MergeDicts(parent["capabilities"], base["capabilities"])
if "applications" in base:
if "applications" not in parent:
parent["applications"] = []
parent["applications"].extend(base["applications"])
if "process-group" in base:
parent["process-group"] = base["process-group"]
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
parser.add_argument("--base-manifest", default=None)
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
if args.base_manifest:
base = ParseJSONFile(args.base_manifest)
if base == None:
return 1
MergeBaseManifest(parent, base)
app_path = parent['name'].split(':')[1]
if app_path.startswith('//'):
raise ValueError("Application name path component '%s' must not start " \
"with //" % app_path)
if args.application_name != app_path:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, app_path))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
97fd5ad23c8102ca21a0e79c714223f673715934 | 46d1e177dbdd712e539b104b95386ae97247fe30 | /mayaTools/cgm/core/mrs/blocks/organic/muzzle.py | 4baf50a449341c5211e2a18d0b494d11dc2fee36 | [
"BSD-3-Clause"
] | permissive | stephanosterburg/cgmTools | 60986937f9c2bbaab31a2706977dc613d2b572f8 | 473737732ec15fe5966e4f8e850630f4aa153ed9 | refs/heads/master | 2022-12-19T04:46:35.228393 | 2020-08-08T02:51:55 | 2020-08-08T02:51:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345,495 | py | """
------------------------------------------
cgm.core.mrs.blocks.organic.lowerFace
Author: Josh Burton
email: [email protected]
Website : http://www.cgmonks.com
------------------------------------------
================================================================
"""
__MAYALOCAL = 'MUZZLE'
# From Python =============================================================
import copy
import re
import pprint
import time
import os
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# From Maya =============================================================
import maya.cmds as mc
# From Red9 =============================================================
from Red9.core import Red9_Meta as r9Meta
import Red9.core.Red9_AnimationUtils as r9Anim
#r9Meta.cleanCache()#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< TEMP!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
import cgm.core.cgm_General as cgmGEN
from cgm.core.rigger import ModuleShapeCaster as mShapeCast
import cgm.core.cgmPy.os_Utils as cgmOS
import cgm.core.cgmPy.path_Utils as cgmPATH
import cgm.core.mrs.assets as MRSASSETS
path_assets = cgmPATH.Path(MRSASSETS.__file__).up().asFriendly()
import cgm.core.mrs.lib.ModuleControlFactory as MODULECONTROL
#reload(MODULECONTROL)
from cgm.core.lib import curve_Utils as CURVES
import cgm.core.lib.rigging_utils as CORERIG
from cgm.core.lib import snap_utils as SNAP
import cgm.core.lib.attribute_utils as ATTR
import cgm.core.rig.joint_utils as JOINT
import cgm.core.classes.NodeFactory as NODEFACTORY
import cgm.core.lib.transform_utils as TRANS
import cgm.core.lib.distance_utils as DIST
import cgm.core.lib.position_utils as POS
import cgm.core.lib.math_utils as MATH
import cgm.core.rig.constraint_utils as RIGCONSTRAINT
import cgm.core.rig.general_utils as RIGGEN
import cgm.core.lib.constraint_utils as CONSTRAINT
import cgm.core.lib.locator_utils as LOC
import cgm.core.lib.rayCaster as RAYS
import cgm.core.lib.shape_utils as SHAPES
import cgm.core.mrs.lib.block_utils as BLOCKUTILS
import cgm.core.mrs.lib.builder_utils as BUILDERUTILS
import cgm.core.mrs.lib.shared_dat as BLOCKSHARE
import cgm.core.mrs.lib.blockShapes_utils as BLOCKSHAPES
import cgm.core.tools.lib.snap_calls as SNAPCALLS
import cgm.core.rig.ik_utils as IK
import cgm.core.cgm_RigMeta as cgmRIGMETA
import cgm.core.lib.nameTools as NAMETOOLS
import cgm.core.cgmPy.validateArgs as VALID
import cgm.core.lib.list_utils as LISTS
import cgm.core.rig.ik_utils as IK
import cgm.core.rig.skin_utils as RIGSKIN
import cgm.core.lib.string_utils as STR
import cgm.core.lib.surface_Utils as SURF
#for m in DIST,POS,MATH,IK,CONSTRAINT,LOC,BLOCKUTILS,BUILDERUTILS,CORERIG,RAYS,JOINT,RIGCONSTRAINT,RIGGEN,BLOCKSHAPES:
# reload(m)
# From cgm ==============================================================
from cgm.core import cgm_Meta as cgmMeta
DGETAVG = DIST.get_average_position
CRVPCT = CURVES.getPercentPointOnCurve
DPCTDIST = DIST.get_pos_by_linearPct
#=============================================================================================================
#>> Block Settings
#=============================================================================================================
__version__ = 'alpha.10.31.2018'
__autoForm__ = False
__menuVisible__ = True
__faceBlock__ = True
#These are our base dimensions. In this case it is for human
__dimensions_by_type = {'box':[22,22,22],
'human':[15.2, 23.2, 19.7]}
__l_rigBuildOrder__ = ['rig_dataBuffer',
'rig_skeleton',
'rig_shapes',
'rig_controls',
'rig_frame',
'rig_lipSegments',
'rig_cleanUp']
d_wiring_skeleton = {'msgLinks':[],
'msgLists':['moduleJoints','skinJoints']}
d_wiring_prerig = {'msgLinks':['moduleTarget','prerigNull','noTransPrerigNull']}
d_wiring_form = {'msgLinks':['formNull','noTransFormNull'],
}
d_wiring_extraDags = {'msgLinks':['bbHelper'],
'msgLists':[]}
#>>>Profiles ==============================================================================================
d_build_profiles = {}
d_block_profiles = {'default':{},
'jaw':{'baseSize':[17.6,7.2,8.4],
'faceType':'default',
'muzzleSetup':'dag',
'noseSetup':'none',
'jawSetup':'simple',
'lipSetup':'none',
'teethSetup':'none',
'cheekSetup':'none',
'tongueSetup':'none',
'uprJaw':False,
'chinSetup':'none',
},
'canine':{'jawSetup':'simple',
'lipSetup':'default',
'noseSetup':'simple',
'chinSetup':'none',
'nostrilSetup':'simple'},
'human':{'jawSetup':'simple',
'lipSetup':'default',
'muzzleSetup':'dag',
'noseSetup':'simple',
'chinSetup':'single',
'cheekSetup':'single',
'sneerSetup':'single',
'nostrilSetup':'simple'},
'beak':{},
}
"""
'eyebrow':{'baseSize':[17.6,7.2,8.4],
'browType':'full',
'profileOptions':{},
'paramStart':.2,
'paramMid':.5,
'paramEnd':.7,
},"""
#>>>Attrs =================================================================================================
l_attrsStandard = ['side',
'position',
'baseAim',
'attachPoint',
'attachIndex',
'nameList',
'loftDegree',
'loftSplit',
'scaleSetup',
'visLabels',
'buildSDK',
'controlOffset',
'conDirectOffset',
'moduleTarget',]
d_attrsToMake = {'faceType':'default:muzzle:beak',
'muzzleSetup':'none:dag:joint',
'noseSetup':'none:simple',
'jawSetup':'none:simple:slide',
'lipSetup':'none:default',
'teethUprSetup':'none:simple',
'teethLwrSetup':'none:simple',
'cheekSetup':'none:single',
'cheekUprSetup':'none:single',
'tongueSetup':'none:single',
'sneerSetup':'none:single',
'smileSetup':'none:single',
#Jaw...
'uprJawSetup':'none:default',
'chinSetup':'none:single',
#Nose...
'nostrilSetup':'none:default',
'bridgeSetup':'none:default',
'numJointsNostril':'int',
'numJointsNoseTip':'int',
#Lips...
'lipSealSetup':'none:default',
'numConLips':'int',
'numLipShapersUpr':'int',
'numLipShapersLwr':'int',
'numJointsLipUpr':'int',
'numJointsLipLwr':'int',
'paramUprStart':'float',
'paramLwrStart':'float',
'numLoftJaw_u':'int',
'numLoftJaw_v':'int',
'numLoftLip_u':'int',
'numLoftLipOver_u':'int',
'numLoftLipUnder_u':'int',
'numLoftLip_v':'int',
'numLoftNose_u':'int',
'numLoftNose_v':'int',
'numLoftBridge_u':'int',
'numLoftBridge_v':'int',
'numLoftBag_u':'int',
'numLipOverSplit':'int',
'numLipUnderSplit':'int',
'numBridgeSplit':'int',
#'lipCorners':'bool',
#Tongue...
'numJointsTongue':'int',
'jointDepth':'float',
'jointDepthLip':'float',
'jointRadius':'float',
}
d_defaultSettings = {'version':__version__,
'attachPoint':'end',
'side':'none',
'loftDegree':'cubic',
'numJointsLipUpr':3,
'numConLips':3,
'numJointsLipLwr':3,
'numJointsNoseTip':1,
'numJointsNostril':1,
'paramUprStart':.15,
'paramLwrStart':.15,
'numJointsTongue':3,
'visLabels':True,
'numLoftJaw_u':22,
'numLoftJaw_v':6,
'numLoftLip_u':9,
'numLoftOverUnder_u':6,
'numLoftLipUnder_u':3,
'numLoftLip_v':13,
'numLoftNose_u':10,
'numLoftNose_v':10,
'numLoftBridge_u':10,
'numLoftBridge_v':3,
'numLoftBag_u':8,
'numLipShapersUpr':6,
'numLipShapersLwr':6,
'numBridgeSplit':1,
'numLipOverSplit':3,
'numLipUnderSplit':2,
'jointDepth':-.41,
'jointDepthLip':-.41,
'controlOffset':1,
'conDirectOffset':0,
'jointRadius':1.0,
#'baseSize':MATH.get_space_value(__dimensions[1]),
}
_d_scaleSpace = {'beak':
{'cheekBoneRight': [-0.4706429982653817,
0.09505896616210308,
0.7782782571806026],
'cheekRight': [-0.7577426494534092, -0.1000000000000032, 0.25237789627805113],
'cornerBackRight': [-0.2799999999999998,
-0.16730074985625443,
0.9000000000000001],
'cornerBagRight': [-0.2999999999999985,
-0.16730074985625443,
0.8500000000000001],
'cornerFrontRight': [-0.2999999999999985,
-0.16730074985625443,
1.0000000000000007],
'cornerPeakRight': [-0.3581046628664546,
-0.1637323149082519,
0.9251310369583978],
'jawFrontRight': [-0.15871517417250391,
-0.8944764139389338,
0.48158759765709797],
'jawNeck': [8.881784197001252e-16, -1.0, -0.09999999999999987],
'jawNeckRight': [-0.3999999999999999,
-0.8000000000000007,
-0.24999999999999997],
'jawRight': [-0.8500000000000001, -0.3000000000000007, -1.0],
'jawTopRight': [-0.9999999999999987, 0.5, -1.0],
'lwrBack': [8.881784197001252e-16, -0.25811272959247056, 1.4366308171068805],
'lwrBackOutLeft': [0.14600316686371873,
-0.2382539451300385,
1.1014928068592929],
'lwrBackOutRight': [-0.14600316686371784,
-0.2382539451300385,
1.1014928068592929],
'lwrBackRight': [-0.07041679936800271,
-0.25811272959247056,
1.3721398724832463],
'lwrFront': [8.881784197001252e-16, -0.25811272959247056, 1.5100974054480814],
'lwrFrontOutLeft': [0.16836614122778926,
-0.2464644544312744,
1.1419072659670515],
'lwrFrontOutRight': [-0.16836614122778837,
-0.2464644544312744,
1.1419072659670515],
'lwrFrontRight': [-0.0999999999999952,
-0.25811272959247056,
1.460097405448082],
'lwrGum': [3.552713678800501e-15, -0.3828124377718929, 1.3555163105674595],
'lwrGumOutRight': [-0.12285507421543107,
-0.31773137293822806,
1.0735065119064129],
'lwrPeak': [-1.7763568394002505e-15, -0.36757669158860473, 1.470463494666341],
'lwrPeakOutLeft': [0.1643090294234293,
-0.3184307982092278,
1.1239340074675737],
'lwrPeakOutRight': [-0.1643090294234284,
-0.3184307982092278,
1.1239340074675737],
'lwrPeakRight': [-0.09552407160766885,
-0.3383278497732505,
1.3652433978548943],
'orbFrontRight': [-0.5052126275807152,
0.4879150381731616,
0.6929776357587651],
'orbRight': [-0.7275390891024611, 0.5640872012272311, 0.24922301958874898],
'smileRight': [-0.4344443854213531, -0.1388694510960402, 0.853222188289098],
'uprBack': [4.884981308350689e-15, -0.25978087813038186, 1.5018211539415889],
'uprBackOutLeft': [0.14739875106495814,
-0.23064632176076216,
1.1035608764729905],
'uprBackOutRight': [-0.14739875106495726,
-0.23064632176076216,
1.1035608764729905],
'uprBackRight': [-0.07123755755266226,
-0.25741394469547707,
1.3680678889065139],
'uprFront': [8.881784197001252e-16, -0.3363247362670556, 1.6045441923683403],
'uprFrontOutLeft': [0.1747895324661024,
-0.24635271777310486,
1.1475659097281832],
'uprFrontOutRight': [-0.17478953246610152,
-0.24635271777310486,
1.1475659097281832],
'uprFrontRight': [-0.09999999999999254,
-0.25741394469547707,
1.456726679658548],
'uprGum': [3.552713678800501e-15, -0.09568063015775685, 1.4845226141398844],
'uprGumOutRight': [-0.1455244682733401,
-0.14289519688253272,
1.094882888563098],
'uprPeak': [3.552713678800501e-15, -0.16101775510074567, 1.632339068615723],
'uprPeakOutLeft': [0.19521451801805734,
-0.16717155191035893,
1.1661646804732075],
'uprPeakOutRight': [-0.1952145180180569,
-0.16717155191035893,
1.1661646804732075],
'uprPeakRight': [-0.11029238066243474,
-0.1574139446954792,
1.5359215390758874]}
,
'canine':
{'bridge': [0, 0.7498176416406359, 1.0360182177554098],
'bridgeOuterLeft': [0.1957615666726813,
0.5861744098168451,
0.9841679114197788],
'bridgeOuterRight': [-0.19576156667268174,
0.5861744098168451,
0.9841679114197788],
'bridgeRight': [-0.09935131199319214, 0.7189223703844227, 1.0360182177554107],
'bulb': [0, 0.5771649917634214, 1.4865237503303455],
'bulbRight': [-0.10000000000000098, 0.559579202989049, 1.486523750330346],
'cheekBoneRight': [-0.4548718429906855,
0.3193815118184702,
0.4193117038087638],
'cheekRight': [-0.766609681139002, -0.3377810960371548, -0.158567563006563],
'cornerBackRight': [-0.37214375696857793,
-0.5474608808125421,
0.30569460998633347],
'cornerBagRight': [-0.3309945846495146,
-0.5474608808125438,
0.26342441742485634],
'cornerFrontRight': [-0.4088476244546153,
-0.5474608808125421,
0.31501298295863644],
'cornerLwrRight': [-0.39308398337602046,
-0.6189502601280825,
0.30429465981816595],
'cornerPeakRight': [-0.4524272643516176,
-0.5474608808125652,
0.277378868596756],
'cornerUprRight': [-0.4313490937931834,
-0.4130946123885284,
0.35572687429844563],
'jawFrontRight': [-0.303667363085141,
-0.8136541251421114,
0.21283793611728252],
'jawNeck': [0, -1.0155196870030885, -0.09988547315186386],
'jawNeckRight': [-0.5579989616498406,
-0.8301545313225525,
-0.04454479938204825],
'jawRight': [-0.8267515799055545, -0.5189037586570784, -0.8910403473217492],
'jawTopRight': [-1.0000000000000053, 0.6216915556280753, -0.9999999999999998],
'lwrBack': [-8.881784197001252e-16, -0.5918607643873628, 1.2101399766119272],
'lwrBackOutLeft': [0.28060741271139555,
-0.5800119857936608,
0.7754055610110713],
'lwrBackOutRight': [-0.280607412711396,
-0.5800119857936608,
0.7754055610110713],
'lwrBackRight': [-0.18033650066295914,
-0.5918607643873628,
1.1294913439189411],
'lwrFront': [0, -0.5918607643873628, 1.2984826494456996],
'lwrFrontOutLeft': [0.33951090218515745,
-0.5816402133093899,
0.8160079970770875],
'lwrFrontOutRight': [-0.3395109021851579,
-0.5816402133093899,
0.8160079970770875],
'lwrFrontRight': [-0.22231060034631822,
-0.5918607643873628,
1.2179583546034918],
'lwrGum': [-8.881784197001252e-16, -0.691860764387366, 1.1422086678390535],
'lwrGumOutRight': [-0.2436406968926219,
-0.7098406465304699,
0.7567150755165863],
'lwrOver': [0, -0.8269212605232905, 1.1305542962469466],
'lwrOverOutLeft': [0.2891436087748458,
-0.7672977478568832,
0.7935513492282487],
'lwrOverOutRight': [-0.28914360877484624,
-0.7672977478568832,
0.7935513492282487],
'lwrOverRight': [-0.1768902633669831,
-0.8045733904124557,
1.0806476321082719],
'lwrPeak': [-3.1086244689504383e-15, -0.7140052399719963, 1.2417462320469328],
'lwrPeakOutLeft': [0.33661382195627754,
-0.6604659845353336,
0.793977322099741],
'lwrPeakOutRight': [-0.336613821956278,
-0.6604659845353336,
0.793977322099741],
'lwrPeakRight': [-0.20448822571832803,
-0.7118227410444398,
1.1390415005943137],
'noseBase': [0, -0.06898868185345464, 1.604132679267137],
'noseBaseRight': [-0.10000000000000098,
-0.006028153579244133,
1.604132679267137],
'noseTip': [0, 0.3737935982860847, 1.6879084942027562],
'noseTipRight': [-0.1735018630054248, 0.3732671288382967, 1.6059596572032393],
'noseTop': [0, 1.0, 0.5],
'noseTopRight': [-0.11617618248954154, 0.9550754787151163, 0.500000000000002],
'noseUnder': [0, 0.12141895581972761, 1.6669100954216],
'nostrilBaseRight': [-0.25242091464837246,
0.1410632843513504,
1.4806614633476713],
'nostrilLineInnerLeft': [0.07518023118280803,
0.14445261224951622,
1.6386126003323707],
'nostrilLineInnerRight': [-0.07518023118280848,
0.14445261224951622,
1.6386126003323707],
'nostrilLineOuterRight': [-0.15748658244389135,
0.19748125577367048,
1.6238699713504006],
'nostrilRight': [-0.24404273259852172,
0.40107545329665584,
1.4985048303021897],
'nostrilTopRight': [-0.1841356867342694,
0.5068183782140139,
1.4606647904279297],
'orbFrontRight': [-0.4301394577464368,
0.5909860773442261,
0.30849262045566506],
'orbRight': [-0.6456105096034843, 0.7427437489438979, 0.048974030106921695],
'smileRight': [-0.5141412209272933, -0.5437366183790004, 0.24013782225955904],
'sneerRight': [-0.22141491559884363, 0.8244026206143751, 0.4450581223588941],
'snoutTopRight': [-0.39335312995227945,
0.21876120502259155,
0.9056429069695511],
'uprBack': [0, -0.5884473089030458, 1.21955862746079],
'uprBackOutLeft': [0.277309719760507, -0.578053836709385, 0.7860807569310277],
'uprBackOutRight': [-0.277309719760507,
-0.578053836709385,
0.7860807569310277],
'uprBackRight': [-0.17868564856592073,
-0.5884473089030493,
1.1319569647395937],
'uprFront': [0, -0.5884473089030404, 1.2953757632288139],
'uprFrontOutLeft': [0.33892340015558986,
-0.5884473089030404,
0.8119809244359124],
'uprFrontOutRight': [-0.3389234001555903,
-0.5884473089030404,
0.8119809244359124],
'uprFrontRight': [-0.22190603872315906,
-0.5884473089030493,
1.2196805068488046],
'uprGum': [0, -0.38844730890304113, 1.2453757632288138],
'uprGumOutRight': [-0.2636740128954118,
-0.4752923682393497,
0.7628915247068588],
'uprOver': [0, -0.21963642489463275, 1.5802602472539617],
'uprOverOutLeft': [0.4303941696256979,
-0.2504627316956718,
0.8465348977211804],
'uprOverOutRight': [-0.43039416962569854,
-0.2504627316956718,
0.8465348977211804],
'uprOverRight': [-0.29563766059242225,
-0.21699739669405993,
1.4311541131749324],
'uprPeak': [0, -0.4665727638281254, 1.4295372373948583],
'uprPeakOutLeft': [0.3897574118981584,
-0.4636183550215076,
0.8246406265608373],
'uprPeakOutRight': [-0.3897574118981586,
-0.4636183550215076,
0.8246406265608373],
'uprPeakRight': [-0.2739604280711778,
-0.4849660828778699,
1.3442676225708896]}
,
'human':
{'bridge': [0, 0.7365867340472114, 1.0030996597926332],
'bridgeOuterLeft': [0.13949252389112712,
0.5837717540493443,
0.8367171328970837],
'bridgeOuterRight': [-0.13949252389112712,
0.5837717540493443,
0.8367171328970837],
'bridgeRight': [-0.07520474838168828, 0.7270662266989021, 0.9835762575207574],
'bulb': [0, 0.5240699068480765, 1.3901734896052207],
'bulbRight': [-0.11468922985910648, 0.4988461562971267, 1.2372688699933758],
'cheekBoneRight': [-0.4552455251816405,
0.3524273607183872,
0.7305402042245492],
'cheekRight': [-0.7548138362346037, -0.0135475526453952, 0.10398873890517879],
'chinRight': [-0.1614409523259761, -0.7468972693510736, 0.947632866875574],
'cornerBackRight': [-0.28625966490909616,
-0.23679384075461485,
0.6385293062014132],
'cornerBagRight': [-0.3062596649090961,
-0.23679384075461485,
0.5885293062014108],
'cornerFrontRight': [-0.3062596649090961,
-0.23679384075461485,
0.7385293062014107],
'cornerLwrRight': [-0.29821787815454764,
-0.33065820535748713,
0.7786768690864982],
'cornerPeakRight': [-0.3596721197671391,
-0.23679384075463616,
0.7230461841801437],
'cornerUprRight': [-0.30667137028392527,
-0.1529287167356017,
0.7934864476056909],
'jawFrontRight': [-0.17999508832537225,
-0.9719119178444089,
0.7578889161402307],
'jawNeck': [0, -0.8881111221874534, -0.10000000000000342],
'jawNeckRight': [-0.539036874461076,
-0.6726205915006354,
-0.08258840573581838],
'jawRight': [-0.7651724059447822, -0.3164820148480878, -0.7067427535826039],
'jawTopRight': [-0.9969301781281826, 0.7911406527910891, -0.8627656184986184],
'lwrBack': [0, -0.19957702569902835, 0.8578025218313079],
'lwrBackRight': [-0.09999999999999999,
-0.19957702569902835,
0.8312374301041356],
'lwrFront': [0, -0.19957702569902835, 0.9812374301041378],
'lwrFrontRight': [-0.09999999999999999,
-0.19957702569902835,
0.9312374301041357],
'lwrGum': [0, -0.4050106397762363, 0.7101662861302107],
'lwrOver': [0, -0.43009829085550066, 0.9333321698811523],
'lwrOverRight': [-0.14114921069235795,
-0.40910262091812655,
0.9054754675656356],
'lwrPeak': [0, -0.28145464592647684, 1.0325640693178344],
'lwrPeakRight': [-0.11879611628814471,
-0.2762527299064921,
0.9856467769551167],
'mouthBagBack': [0, -0.1682872287475199, -0.5303918321125718],
'mouthBagBottom': [0, -0.6273181063579649, -0.07760146037222282],
'mouthBagRight': [-0.5274779924958036,
-0.17152047717288,
-0.1334743803977605],
'mouthBagTop': [0, 0.3950271775166314, -0.06793996224606946],
'noseBase': [0, 0.15984447456211726, 1.1225285802452465],
'noseBaseRight': [-0.06447592850531289,
0.2013173640764272,
1.0687523205093905],
'noseTip': [0, 0.3805325402582582, 1.4355435576859916],
'noseTipRight': [-0.12175008451239278,
0.34132424799971517,
1.2811248605594763],
'noseTop': [0, 0.9250453592947459, 0.8484752751013809],
'noseTopRight': [-0.07398231796050096,
0.8846985812493671,
0.8092251486208948],
'noseUnder': [0, 0.2255249531889234, 1.2590827521178323],
'nostrilBaseRight': [-0.17840265418137208,
0.20201759620469062,
0.9279836834010364],
'nostrilLineInnerLeft': [0.05220606107410563,
0.24410677272582504,
1.1687362833998676],
'nostrilLineInnerRight': [-0.05220606107410563,
0.24410677272582504,
1.1687362833998676],
'nostrilLineOuterRight': [-0.1403097181284395,
0.277964514108767,
1.104542194650513],
'nostrilRight': [-0.29213419777709765,
0.29438972478670067,
0.8434804420810997],
'nostrilTopRight': [-0.21669630246982277,
0.41844683344454126,
0.8235378198554102],
'orbFrontRight': [-0.46951524910375025,
0.5835603299980932,
0.6025409869886382],
'orbRight': [-0.7916816805290654, 0.7847957495560784, 0.20855368016648068],
'smileRight': [-0.45059839999917634,
-0.23096933114542395,
0.6535786299993362],
'sneerRight': [-0.14101534953672315, 0.7249906642449702, 0.7062305133770392],
'uprBack': [0, -0.1914104378629098, 0.8619720386817642],
'uprBackRight': [-0.09999999999999999,
-0.1914104378629098,
0.816796936835106],
'uprFront': [0, -0.1914104378629098, 0.9667969368351086],
'uprFrontRight': [-0.09999999999999999,
-0.1914104378629098,
0.9167969368351043],
'uprGum': [0, 0.056504646743022136, 0.7786976244523256],
'uprOver': [0, 0.014046671071223926, 1.0801305041861746],
'uprOverRight': [-0.1277698904501184,
-0.0017240145500210247,
1.0364692906386268],
'uprPeak': [0, -0.10848970608268971, 1.1026890245980834],
'uprPeakRight': [-0.1252550253791018,
-0.11449613164451478,
1.0483478230802286]}
}
#=============================================================================================================
#>> Define
#=============================================================================================================
def mirror_self(self,primeAxis = 'Left'):
_str_func = 'mirror_self'
_idx_state = self.getState(False)
ml_done = []
log.debug("|{0}| >> define...".format(_str_func)+ '-'*80)
ml_mirrorDefineHandles = self.msgList_get('defineSubHandles')
for mObj in ml_mirrorDefineHandles:
ml_done.append(mObj)
r9Anim.MirrorHierarchy().makeSymmetrical([mObj.mNode for mObj in ml_mirrorDefineHandles],
mode = '',primeAxis = primeAxis.capitalize() )
if _idx_state > 0:
log.debug("|{0}| >> form...".format(_str_func)+ '-'*80)
ml_mirrorFormHandles = self.msgList_get('formHandles')
ml_use = []
for mObj in ml_mirrorFormHandles:
if mObj not in ml_done:
ml_use.append(mObj)
else:
ml_done.append(mObj)
r9Anim.MirrorHierarchy().makeSymmetrical([mObj.mNode for mObj in ml_use],
mode = '',primeAxis = primeAxis.capitalize() )
if _idx_state > 1:
log.debug("|{0}| >> prerig...".format(_str_func)+ '-'*80)
ml_mirrorPreHandles = self.msgList_get('prerigHandles') + self.msgList_get('jointHandles')
ml_use = []
for mObj in ml_mirrorPreHandles:
if mObj not in ml_done:
ml_use.append(mObj)
else:
ml_done.append(mObj)
r9Anim.MirrorHierarchy().makeSymmetrical([mObj.mNode for mObj in ml_use],
mode = '',primeAxis = primeAxis.capitalize() )
@cgmGEN.Timer
def define(self):
_str_func = 'define'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
_short = self.mNode
#Attributes =========================================================
ATTR.set_alias(_short,'sy','blockScale')
self.setAttrFlags(attrs=['sx','sz','sz'])
self.doConnectOut('sy',['sx','sz'])
ATTR.set_min(_short, 'loftSplit', 1)
ATTR.set_min(_short, 'paramUprStart', 0.0)
ATTR.set_min(_short, 'paramLwrStart', 0.0)
#Buffer our values...
_str_faceType = self.getEnumValueString('faceType')
_str_muzzleSetup = self.getEnumValueString('muzzleSetup')
_str_noseSetup = self.getEnumValueString('noseSetup')
_str_uprJawSetup = self.getEnumValueString('uprJawSetup')
_str_lipsSetup = self.getEnumValueString('lipsSetup')
_str_teethSetup = self.getEnumValueString('teethSetup')
_str_cheekSetup = self.getEnumValueString('cheekSetup')
_str_tongueSetup = self.getEnumValueString('tongueSetup')
#Cleaning =========================================================
_shapes = self.getShapes()
if _shapes:
log.debug("|{0}| >> Removing old shapes...".format(_str_func))
mc.delete(_shapes)
defineNull = self.getMessage('defineNull')
if defineNull:
log.debug("|{0}| >> Removing old defineNull...".format(_str_func))
mc.delete(defineNull)
ml_handles = []
#rigBlock Handle ===========================================================
log.debug("|{0}| >> RigBlock Handle...".format(_str_func))
_size = MATH.average(self.baseSize[1:])
_crv = CURVES.create_fromName(name='locatorForm',#'axis3d',#'arrowsAxis',
direction = 'z+', size = _size/4)
SNAP.go(_crv,self.mNode,)
CORERIG.override_color(_crv, 'white')
CORERIG.shapeParent_in_place(self.mNode,_crv,False)
mHandleFactory = self.asHandleFactory()
self.addAttr('cgmColorLock',True,lock=True, hidden=True)
mDefineNull = self.atUtils('stateNull_verify','define')
mNoTransformNull = self.atUtils('noTransformNull_verify','define',forceNew=True,mVisLink=self)
#Bounding sphere ==================================================================
_bb_shape = CURVES.create_controlCurve(self.mNode,'cubeOpen', size = 1.0, sizeMode='fixed')
mBBShape = cgmMeta.validateObjArg(_bb_shape, 'cgmObject',setClass=True)
#mScaleNull = mBBShape.doCreateAt(setClass=True)
#mScaleNull.rename("scaleRef")
mBBShape.p_parent = mDefineNull
mBBShape.tz = -.5
mBBShape.ty = .5
#mScaleNull.p_parent = mBBShape
#mScaleNull.p_position = POS.get(mBBShape.mNode,'bb')
#mScaleNull.dagLock()
CORERIG.copy_pivot(mBBShape.mNode,self.mNode)
mHandleFactory.color(mBBShape.mNode,controlType='sub')
mBBShape.setAttrFlags()
mBBShape.doStore('cgmName', self)
mBBShape.doStore('cgmType','bbVisualize')
mBBShape.doName()
self.connectChildNode(mBBShape.mNode,'bbHelper')
self.doConnectOut('baseSize', "{0}.scale".format(mBBShape.mNode))
#Make our handles creation data =======================================================
d_pairs = {}
d_creation = {}
l_order = []
d_curves = {}
d_curveCreation = {}
d_toParent = {}
_str_pose = self.blockProfile#'human'
if not _d_scaleSpace.get(_str_pose):
log.error(cgmGEN.logString_sub(_str_func,'Unregistered scaleSpace blockProfile: {0}'.format(_str_pose)))
return False
l_mainHandles = []
def get_handleScaleSpaces(d_base,d_scaleSpace,key,plug_left,plug_right):
for k,d in d_base.iteritems():
if plug_left in k:
k_use = str(k).replace(plug_left,plug_right)
_v = copy.copy(d_scaleSpace[_str_pose].get(k_use))
if _v:
_v[0] = -1 * _v[0]
else:
_v = d_scaleSpace[key].get(k)
if _v is not None:
d_base[k]['scaleSpace'] = _v
#Jaw ---------------------------------------------------------------------
if self.jawSetup:
_str_jawSetup = self.getEnumValueString('jawSetup')
log.debug(cgmGEN.logString_sub(_str_func,'jawSetup: {0}'.format(_str_jawSetup)))
_d_pairs = {}
_d = {}
l_sideKeys = ['jaw','jawTop','jawFront','orbFront','orb','jawNeck','cheek','cheekBone',
]
for k in l_sideKeys:
_d_pairs[k+'Left'] = k+'Right'
d_pairs.update(_d_pairs)#push to master list...
l_centerKeys = ['jawNeck']
for k in l_centerKeys:
_d[k] = {'color':'yellowWhite','tagOnly':1,'arrow':0,'jointLabel':1,'vectorLine':0}
for k in l_sideKeys:
_d[k+'Left'] = {'color':'blueBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
_d[k+'Right'] = {'color':'redBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
get_handleScaleSpaces(_d,_d_scaleSpace,_str_pose,'Left','Right')
"""
for k,d in _d.iteritems():
if 'Left' in k:
k_use = str(k).replace('Left','Right')
_v = copy.copy(_d_scaleSpace[_str_pose].get(k_use))
if _v:
_v[0] = -1 * _v[0]
else:
_v = _d_scaleSpace[_str_pose].get(k)
if _v is not None:
_d[k]['scaleSpace'] = _v"""
_keys = _d.keys()
_keys.sort()
l_order.extend(_keys)
d_creation.update(_d)
_d_curveCreation = {'jawLine':{'keys':['jawTopLeft','jawLeft','jawNeckLeft','jawFrontLeft',
'jawFrontRight','jawNeckRight','jawRight','jawTopRight'],
'rebuild':False},
'cheekLineLeft':{'keys':['jawTopLeft','orbLeft','orbFrontLeft'],
'rebuild':False},
'cheekLineRight':{'keys':['jawTopRight','orbRight','orbFrontRight'],
'rebuild':False},
'cheekCurveLeft':{'keys':['orbLeft','cheekLeft','jawNeckLeft'],
'rebuild':False},
'cheekCurveRight':{'keys':['orbRight','cheekRight','jawNeckRight'],
'rebuild':False},
'jawUnder':{'keys':['jawNeckRight','jawNeck','jawNeckLeft'],
'rebuild':False},
}
if _str_pose == 'human':
_d_curveCreation['cheekFrameLeft'] = {'keys':['orbFrontLeft','cheekBoneLeft','jawFrontLeft'],
'rebuild':False}
_d_curveCreation['cheekFrameRight'] = {'keys':['orbFrontRight','cheekBoneRight','jawFrontRight'],
'rebuild':False}
elif _str_pose in ['canine','beak']:
pass
#_d_curveCreation['cheekFrameLeft'] = {'keys':['orbFrontLeft','cheekBoneLeft','jawNeckLeft'],
# 'rebuild':False}
#_d_curveCreation['cheekFrameRight'] = {'keys':['orbFrontRight','cheekBoneRight','jawNeckRight'],
# 'rebuild':False}
d_curveCreation.update(_d_curveCreation)
#lip ---------------------------------------------------------------------
if self.lipSetup:
_str_lipSetup = self.getEnumValueString('lipSetup')
log.debug(cgmGEN.logString_sub(_str_func,'lip setup: {0}'.format(_str_lipSetup)))
#Declarations of keys...---------------------------------------------------------------------
_d_pairs = {}
_d = {}
l_sideKeys = ['cornerBag','cornerBack','cornerFront','cornerPeak',
'cornerUpr','cornerLwr',
'smile',
'uprOver','lwrOver',
'uprPeak','uprFront','uprBack',
'lwrPeak','lwrFront','lwrBack',
]
l_centerKeys = ['uprFront','uprPeak','uprBack','uprGum',
'uprOver','lwrOver',
'lwrFront','lwrPeak','lwrBack','lwrGum']
if _str_pose in ['canine','beak']:
l_sideKeys.extend(['uprPeakOut','uprFrontOut','uprBackOut','uprGumOut','uprOverOut',
'lwrPeakOut','lwrFrontOut','lwrBackOut','lwrGumOut','lwrOverOut'])
for k in l_centerKeys:
_d[k] = {'color':'yellowWhite','tagOnly':1,'arrow':0,'jointLabel':1,'vectorLine':0}
for k in l_sideKeys:
_d[k+'Left'] = {'color':'blueBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
_d[k+'Right'] = {'color':'redBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
#Mirror map left/right----------------------------------------------------------------
for k in l_sideKeys:
_d_pairs[k+'Left'] = k+'Right'
d_pairs.update(_d_pairs)#push to master list...
#Process scaleSpace------------------------------------------------------------------
get_handleScaleSpaces(_d,_d_scaleSpace,_str_pose,'Left','Right')
_keys = _d.keys()
_keys.sort()
l_order.extend(_keys)
d_creation.update(_d)
#Heirarchy Mapping -------------------------------------------------------------------
#d_toParent['lwrPeak'] = 'lwrFront'
#d_toParent['lwrBack'] = 'lwrFront'
#d_toParent['lwrGum'] = 'lwrFront'
#d_toParent['lwrPeakLeft'] = 'lwrFront'
#d_toParent['lwrPeakRight'] = 'lwrFront'
if _str_pose in ['canine','beak']:
for k2 in ['upr','lwr']:
for side in ['Left','Right','']:
for k in ['PeakOut','BackOut','GumOut']:
d_toParent[k2+k+side] = k2+'FrontOut'+side
for k2 in ['upr','lwr']:
for side in ['Left','Right','']:
for k in ['Peak','Back','Gum']:
d_toParent[k2+k+side] = k2+'Front'+side
d_toParent['uprFrontLeft'] = 'uprFront'
d_toParent['uprFrontRight'] = 'uprFront'
d_toParent['lwrFrontLeft'] = 'lwrFront'
d_toParent['lwrFrontRight'] = 'lwrFront'
for s in 'Left','Right':
for k in ['cornerUpr','cornerLwr','cornerBag',
'cornerBack','cornerPeak']:
d_toParent[k+s] = 'cornerFront'+s
l_mainHandles.extend(['cornerFrontLeft','cornerFrontRight',
'lwrFrontLeft','lwrFrontRight',
'uprFrontLeft','uprFrontRight',
'lwrFront','uprFront'])
if _str_pose in ['canine','beak']:
l_mainHandles.extend(['uprFrontOutLeft','lwrFrontOutLeft',
'uprFrontOutRight','lwrFrontOutRight'])
#Curve Mapping
_d_curveCreation = {'uprPeak':{'keys':['cornerFrontRight','uprPeakRight','uprPeak',
'uprPeakLeft','cornerFrontLeft'],
'rebuild':0},
'lwrPeak':{'keys':['cornerFrontRight','lwrPeakRight','lwrPeak','lwrPeakLeft','cornerFrontLeft'],
'color':'greenWhite',
'rebuild':0},
'uprLip':{'keys':['cornerFrontRight','uprFrontRight','uprFront','uprFrontLeft','cornerFrontLeft'],
'rebuild':0},
'lwrLip':{'keys':['cornerFrontRight','lwrFrontRight','lwrFront','lwrFrontLeft','cornerFrontLeft'],
'color':'greenWhite',
'rebuild':0},
'uprLipBack':{'keys':['cornerBackRight','uprBackRight','uprBack','uprBackLeft','cornerBackLeft'],
'rebuild':0},
'lwrLipBack':{'keys':['cornerBackRight','lwrBackRight','lwrBack',
'lwrBackLeft','cornerBackLeft'],
'color':'greenWhite',
'rebuild':0},
'lipCrossUpr':{'keys':['uprGum','uprBack','uprFront','uprPeak'],
'rebuild':0},
'lipCrossLwr':{'keys':['lwrGum','lwrBack','lwrFront','lwrPeak'],
'color':'greenBright',
'rebuild':0},
'lipCrossLwrLeft':{'keys':['lwrBackLeft','lwrFrontLeft','lwrPeakLeft'],
'color':'greenBright',
'rebuild':0},
'lipCrossUprLeft':{'keys':['uprBackLeft','uprFrontLeft','uprPeakLeft'],
'rebuild':0},
'lipCrossLwrRight':{'keys':['lwrBackRight','lwrFrontRight','lwrPeakRight'],
'color':'greenBright',
'rebuild':0},
'lipCrossUprRight':{'keys':['uprBackRight','uprFrontRight','uprPeakRight'],
'rebuild':0},
'lipCornerLeft':{'keys':['cornerBagLeft','cornerBackLeft',
'cornerFrontLeft','cornerPeakLeft'],
'color':'blueWhite',
'rebuild':0},
'lipCornerRight':{'keys':['cornerBagRight','cornerBackRight',
'cornerFrontRight','cornerPeakRight'],
'color':'redWhite',
'rebuild':0},
'smileLineLeft':{'keys':['cornerPeakLeft','jawFrontLeft'],
'color':'yellowWhite',
'rebuild':0},
'smileLineRight':{'keys':['cornerPeakRight','jawFrontRight'],
'color':'yellowWhite',
'rebuild':0},
'smileCrossLeft':{'keys':['cornerPeakLeft','smileLeft'],
'color':'yellowWhite',
'rebuild':0},
'smileCrossRight':{'keys':['cornerPeakRight','smileRight'],
'color':'yellowWhite',
'rebuild':0},
}
_d_curveCreation['lipCrossLwr']['keys'].append('lwrOver')
_d_curveCreation['lipCrossUpr']['keys'].append('uprOver')
_d_curveCreation['lipCrossUprRight']['keys'].append('uprOverRight')
_d_curveCreation['lipCrossUprLeft']['keys'].append('uprOverLeft')
_d_curveCreation['lipCrossLwrRight']['keys'].append('lwrOverRight')
_d_curveCreation['lipCrossLwrLeft']['keys'].append('lwrOverLeft')
if self.noseSetup:
_d_curveCreation['lipCrossUpr']['keys'].append('noseBase')
if _str_pose in ['canine','beak']:
_d_curveCreation['lipCrossLwrOutRight'] = {'keys':['lwrGumOutRight','lwrBackOutRight',
'lwrFrontOutRight','lwrPeakOutRight',
'lwrOverOutRight'],
'color':'greenBright',
'rebuild':0}
_d_curveCreation['lipCrossLwrOutLeft'] = {'keys':['lwrGumOutLeft','lwrBackOutLeft',
'lwrFrontOutLeft','lwrPeakOutLeft',
'lwrOverOutLeft'],
'color':'greenBright',
'rebuild':0}
_d_curveCreation['lipCrossUprOutRight'] = {'keys':['uprGumOutRight','uprBackOutRight',
'uprFrontOutRight','uprPeakOutRight',
'uprOverOutRight'],
'color':'greenBright',
'rebuild':0}
_d_curveCreation['lipCrossUprOutLeft'] = {'keys':['uprGumOutLeft','uprBackOutLeft',
'uprFrontOutLeft','uprPeakOutLeft',
'uprOverOutLeft'],
'color':'greenBright',
'rebuild':0}
#Snout...
_d_curveCreation['snoutLeft'] = {'keys':['nostrilBaseLeft','snoutTopLeft','cheekBoneLeft'],
'color':'blueWhite',
'rebuild':1}
_d_curveCreation['snoutRight'] = {'keys':['nostrilBaseRight','snoutTopRight','cheekBoneRight'],
'color':'redWhite',
'rebuild':1}
_d_curveCreation['uprLipOver'] = {'keys':['cornerPeakRight','cornerUprRight',
'uprOverOutRight','uprOverRight','uprOver',
'uprOverLeft','uprOverOutLeft',
'cornerUprLeft','cornerPeakLeft'],
'rebuild':0}
_d_curveCreation['lwrLipOver'] = {'keys':['cornerPeakRight','cornerLwrRight',
'lwrOverOutRight','lwrOverRight','lwrOver',
'lwrOverLeft','lwrOverOutLeft',
'cornerLwrLeft','cornerPeakLeft'],
'rebuild':0}
_d_curveCreation['uprPeak']['keys'].insert(1,'uprPeakOutRight')
_d_curveCreation['uprPeak']['keys'].insert(-1,'uprPeakOutLeft')
_d_curveCreation['uprLipBack']['keys'].insert(1,'uprBackOutRight')
_d_curveCreation['uprLipBack']['keys'].insert(-1,'uprBackOutLeft')
_d_curveCreation['uprLip']['keys'].insert(1,'uprFrontOutRight')
_d_curveCreation['uprLip']['keys'].insert(-1,'uprFrontOutLeft')
_d_curveCreation['lwrPeak']['keys'].insert(1,'lwrPeakOutRight')
_d_curveCreation['lwrPeak']['keys'].insert(-1,'lwrPeakOutLeft')
_d_curveCreation['lwrLipBack']['keys'].insert(1,'lwrBackOutRight')
_d_curveCreation['lwrLipBack']['keys'].insert(-1,'lwrBackOutLeft')
_d_curveCreation['lwrLip']['keys'].insert(1,'lwrFrontOutRight')
_d_curveCreation['lwrLip']['keys'].insert(-1,'lwrFrontOutLeft')
if self.noseSetup:
_d_curveCreation['lipCrossUprOutLeft']['keys'].extend(['snoutTopLeft','bridgeOuterLeft'])
_d_curveCreation['lipCrossUprOutRight']['keys'].extend(['snoutTopRight','bridgeOuterRight'])
else:#human
"""
_d_curveCreation['lipToChinRight'] = {'keys':['cornerPeakRight','jawFrontRight'],
'color':'yellowWhite',
'rebuild':0}
_d_curveCreation['lipToChinLeft'] = {'keys':['cornerPeakLeft','jawFrontLeft'],
'color':'yellowWhite',
'rebuild':0}"""
_d_curveCreation['uprLipOver'] = {'keys':['cornerPeakRight','cornerUprRight',
'uprOverRight','uprOver',
'uprOverLeft',
'cornerUprLeft','cornerPeakLeft'],
'rebuild':0}
_d_curveCreation['lwrLipOver'] = {'keys':['cornerPeakRight','cornerLwrRight',
'lwrOverRight','lwrOver',
'lwrOverLeft',
'cornerLwrLeft','cornerPeakLeft'],
'rebuild':0}
_d_curveCreation['smileLineLeft']['keys'].remove('jawFrontLeft')
_d_curveCreation['smileLineRight']['keys'].remove('jawFrontRight')
if self.chinSetup:
_d_curveCreation['lipCrossLwrLeft']['keys'].append('chinLeft')
_d_curveCreation['lipCrossLwrRight']['keys'].append('chinRight')
_d_curveCreation['lipCrossLwrLeft']['keys'].append('jawFrontLeft')
_d_curveCreation['lipCrossLwrRight']['keys'].append('jawFrontRight')
d_curveCreation.update(_d_curveCreation)
if self.noseSetup:
d_curveCreation['cheekLineLeft']['keys'].append('sneerLeft')
d_curveCreation['cheekLineRight']['keys'].append('sneerRight')
if _str_pose == 'canine':
d_curveCreation['smileLineLeft']['keys'].insert(0,'sneerLeft')
d_curveCreation['smileLineRight']['keys'].insert(0,'sneerRight')
d_curveCreation['smileLineLeft']['keys'].insert(1,'cheekBoneLeft')
d_curveCreation['smileLineRight']['keys'].insert(1,'cheekBoneRight')
if d_curveCreation.get('lipToChinLeft'):
d_curveCreation['lipToChinLeft']['keys'].insert(0,'sneerLeft')
d_curveCreation['lipToChinRight']['keys'].insert(0,'sneerRight')
else:
d_curveCreation['smileLineLeft']['keys'].insert(0,'nostrilTopLeft')
d_curveCreation['smileLineRight']['keys'].insert(0,'nostrilTopRight')
d_curveCreation['smileLineLeft']['keys'].insert(1,'nostrilLeft')
d_curveCreation['smileLineRight']['keys'].insert(1,'nostrilRight')
if d_curveCreation.get('lipToChinLeft'):
d_curveCreation['lipToChinLeft']['keys'].insert(0,'nostrilLeft')
d_curveCreation['lipToChinRight']['keys'].insert(0,'nostrilRight')
_d_curveCreation['lipCrossUprRight']['keys'].append('noseBaseRight')
_d_curveCreation['lipCrossUprLeft']['keys'].append('noseBaseLeft')
"""
d_curveCreation['overLipLeft'] = {'keys':['uprPeakLeft','noseBaseLeft',],
'color':'yellowWhite',
'rebuild':0}
d_curveCreation['overLipRight'] = {'keys':['uprPeakRight','noseBaseRight',],
'color':'yellowWhite',
'rebuild':0}"""
#d_curveCreation['overLip'] = {'keys':['uprPeak','noseBase',],
#'color':'yellowWhite',
#'rebuild':0}
if self.jawSetup:
#if _str_pose not in ['canine']:
#d_curveCreation['smileLineLeft']['keys'].insert(0,'cheekBoneLeft')
#d_curveCreation['smileLineRight']['keys'].insert(0,'cheekBoneRight')
if d_curveCreation.get('cheekFrameLeft'):
d_curveCreation['cheekFrameLeft']['keys'][-1]='smileLeft'
d_curveCreation['cheekFrameRight']['keys'][-1]='smileRight'
d_curveCreation['smileCrossLeft']['keys'].append('cheekLeft')
d_curveCreation['smileCrossRight']['keys'].append('cheekRight')
if self.chinSetup:
#d_curveCreation['smileLineLeft']['keys'][-1] = 'chinLeft'
#d_curveCreation['smileLineRight']['keys'][-1] = 'chinRight'
if d_curveCreation.get('cheekFrameLeft'):
d_curveCreation['cheekFrameLeft']['keys'].append('chinLeft')
d_curveCreation['cheekFrameRight']['keys'].append('chinRight')
"""
d_curveCreation['underLipLeft'] = {'keys':['lwrPeakLeft','underLipLeft',],
'color':'yellowWhite',
'rebuild':0}
d_curveCreation['underLipRight'] = {'keys':['lwrPeakRight','underLipRight',],
'color':'yellowWhite',
'rebuild':0}"""
#mouthbag ================================================================================
if self.lipSetup:
log.debug(cgmGEN.logString_sub(_str_func,'Mouth bag: {0}'.format(_str_lipSetup)))
#Declarations of keys...---------------------------------------------------------------------
_d_pairs = {}
_d = {}
l_sideKeys = ['mouthBag']
l_centerKeys = ['mouthBagTop','mouthBagBottom','mouthBagBack']
for k in l_centerKeys:
_d[k] = {'color':'yellowWhite','tagOnly':1,'arrow':0,'jointLabel':1,'vectorLine':0}
for k in l_sideKeys:
_d[k+'Left'] = {'color':'blueBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
_d[k+'Right'] = {'color':'redBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
#Mirror map left/right----------------------------------------------------------------
for k in l_sideKeys:
_d_pairs[k+'Left'] = k+'Right'
d_pairs.update(_d_pairs)#push to master list...
#Process scaleSpace------------------------------------------------------------------
get_handleScaleSpaces(_d,_d_scaleSpace,_str_pose,'Left','Right')
_keys = _d.keys()
_keys.sort()
l_order.extend(_keys)
d_creation.update(_d)
#Curve Mapping
_d_curveCreation = {'bagTop':{'keys':['mouthBagLeft','mouthBagTop','mouthBagRight'],
'rebuild':0},
'bagBottom':{'keys':['mouthBagLeft','mouthBagBottom','mouthBagRight'],
'rebuild':0},
'bagCrossHorizontal':{'keys':['mouthBagLeft','mouthBagBack','mouthBagRight'],
'rebuild':0},
'bagCrossVertical':{'keys':['mouthBagTop','mouthBagBack','mouthBagBottom'],
'rebuild':0},
}
d_curveCreation.update(_d_curveCreation)
if self.lipSetup:
d_curveCreation['bagCrossHorizontal']['keys'].insert(0,'cornerBagLeft')
d_curveCreation['bagCrossHorizontal']['keys'].append('cornerBagRight')
d_curveCreation['bagCrossVertical']['keys'].insert(0,'uprGum')
d_curveCreation['bagCrossVertical']['keys'].append('lwrGum')
#nose ================================================================================
if self.noseSetup:
_str_noseSetup = self.getEnumValueString('noseSetup')
log.debug(cgmGEN.logString_sub(_str_func,'noseSetup: {0}'.format(_str_noseSetup)))
_d_pairs = {}
_d = {}
l_sideKeys = ['sneer','nostrilTop','nostril','bridgeOuter',
'noseTop','bridge','bulb','noseTip','nostrilBase','noseBase',
'nostrilLineInner','nostrilLineOuter',
]
if _str_pose == 'canine':
l_sideKeys.append('snoutTop')
l_centerKeys = ['noseBase','noseUnder','noseTip','bulb','bridge','noseTop']
for k in l_centerKeys:
_d[k] = {'color':'yellowWhite','tagOnly':1,'arrow':0,'jointLabel':1,'vectorLine':0}
for k in l_sideKeys:
_d[k+'Left'] = {'color':'blueBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
_d[k+'Right'] = {'color':'redBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
#Mirror map left/right
for k in l_sideKeys:
_d_pairs[k+'Left'] = k+'Right'
d_pairs.update(_d_pairs)#push to master list...
#Process
get_handleScaleSpaces(_d,_d_scaleSpace,_str_pose,'Left','Right')
_keys = _d.keys()
_keys.sort()
l_order.extend(_keys)
d_creation.update(_d)
_d_curveCreation = {'noseProfile':{'keys':['noseTop','bridge','bulb','noseTip','noseUnder','noseBase'],
'rebuild':False},
'noseProfileLeft':{'keys':['noseTopLeft','bridgeLeft','bulbLeft',
'noseTipLeft','nostrilLineOuterLeft'],
'rebuild':False},
'noseProfileRight':{'keys':['noseTopRight','bridgeRight','bulbRight',
'noseTipRight','nostrilLineOuterRight'],
'rebuild':False},
'noseCross':{'keys':['nostrilRight','noseTipRight','noseTip',
'noseTipLeft','nostrilLeft'],
'rebuild':False},
'noseRight':{'keys':['sneerRight','bridgeOuterRight','nostrilTopRight','nostrilRight','nostrilBaseRight'],
'rebuild':False},
'noseLeft':{'keys':['sneerLeft','bridgeOuterLeft','nostrilTopLeft','nostrilLeft','nostrilBaseLeft'],
'rebuild':False},
#'noseLeft':{'keys':['sneerLeft','noseLeft'],
# 'rebuild':False},
#'noseUnder':{'keys':['nostrilBaseRight','noseUnder','nostrilBaseLeft'],
# 'rebuild':False},
'noseBridge':{'keys':['bridgeOuterRight',
'bridgeRight',
'bridge',
'bridgeLeft',
'bridgeOuterLeft'],
'rebuild':False},
'noseBase':{'keys':['nostrilBaseRight','noseBaseRight','noseBase','noseBaseLeft','nostrilBaseLeft'],'rebuild':False},
'nostrilRight':{'keys':['nostrilBaseRight','nostrilLineOuterRight',
'nostrilLineInnerRight','noseBaseRight'],
'rebuild':False},
'nostrilLeft':{'keys':['nostrilBaseLeft','nostrilLineOuterLeft',
'nostrilLineInnerLeft','noseBaseLeft'],
'rebuild':False},
'noseTipUnder':{'keys':['nostrilLineInnerRight',
'noseUnder',
'nostrilLineInnerLeft',
],'rebuild':False},
'noseBulb':{'keys':['nostrilTopRight','bulbRight','bulb','bulbLeft','nostrilTopLeft'],
'rebuild':False},
'bridgeTop':{'keys':['sneerRight','noseTopRight','noseTop','noseTopLeft','sneerLeft'],
'rebuild':False},
}
d_curveCreation.update(_d_curveCreation)
d_curveCreation['cheekLineLeft']['keys'].append('sneerLeft')
d_curveCreation['cheekLineRight']['keys'].append('sneerRight')
if self.jawSetup:
if _str_pose in ['human']:
d_curveCreation['frontPlaneLeft'] = {'keys':['nostrilTopLeft','cheekBoneLeft'],
'color':'blueWhite',
'rebuild':0}
d_curveCreation['frontPlaneRight'] = {'keys':['nostrilTopRight','cheekBoneRight'],
'color':'redWhite',
'rebuild':0}
if _str_pose == 'canine':
d_curveCreation['noseLeft']['keys'].insert(1,'bridgeOuterLeft')
d_curveCreation['noseRight']['keys'].insert(1,'bridgeOuterRight')
d_curveCreation['noseBridge']['keys'].append('bridgeOuterLeft')
d_curveCreation['noseBridge']['keys'].insert(0,'bridgeOuterRight')
if self.chinSetup:
_str_chinSetup = self.getEnumValueString('chinSetup')
log.debug(cgmGEN.logString_sub(_str_func,'chinSetup: {0}'.format(_str_chinSetup)))
_d_pairs = {}
_d = {}
l_sideKeys = ['chin',#'underLip',
]
#l_centerKeys = ['noseBase','noseUnder','noseTip','bulb','bridge','noseTop']
#for k in l_centerKeys:
# _d[k] = {'color':'yellowWhite','tagOnly':1,'arrow':0,'jointLabel':1,'vectorLine':0}
for k in l_sideKeys:
_d[k+'Left'] = {'color':'blueBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
_d[k+'Right'] = {'color':'redBright','tagOnly':1,'arrow':0,'jointLabel':0,'vectorLine':0}
#Mirror map left/right
for k in l_sideKeys:
_d_pairs[k+'Left'] = k+'Right'
d_pairs.update(_d_pairs)#push to master list...
#Process
get_handleScaleSpaces(_d,_d_scaleSpace,_str_pose,'Left','Right')
_keys = _d.keys()
_keys.sort()
l_order.extend(_keys)
d_creation.update(_d)
_d_curveCreation = {'chinLine':{'keys':['chinRight','chinLeft'],
'rebuild':False},
#'underLip':{'keys':['underLipRight','underLipLeft'],
# 'rebuild':False},
}
d_curveCreation.update(_d_curveCreation)
if self.lipSetup:
#d_curveCreation['smileLineLeft']['keys'][-1] = 'chinLeft'
#d_curveCreation['smileLineRight']['keys'][-1] = 'chinRight'
#d_curveCreation['lipToChinLeft']['keys'].insert(-1,'underLipLeft')
#d_curveCreation['lipToChinRight']['keys'].insert(-1,'underLipRight')
if d_curveCreation.get('lipToChinLeft'):
d_curveCreation['lipToChinLeft']['keys'].insert(-1,'chinLeft')
d_curveCreation['lipToChinRight']['keys'].insert(-1,'chinRight')
#if self.jawSetup:
#d_curveCreation['cheekFrameLeft']['keys'][-1] = 'chinLeft'
#d_curveCreation['cheekFrameRight']['keys'][-1] = 'chinRight'
#make em... ==============================================================================================
for tag in l_mainHandles:
d_creation[tag]['shape'] = 'locatorForm'
log.debug("|{0}| >> Make the handles...".format(_str_func))
md_res = self.UTILS.create_defineHandles(self, l_order, d_creation, _size / 10, mDefineNull, mBBShape)
md_handles = md_res['md_handles']
ml_handles = md_res['ml_handles']
for k,p in d_toParent.iteritems():
try:md_handles[k].p_parent = md_handles[p]
except Exception,err:
log.error(cgmGEN.logString_msg(_str_func,'{0} | {1}'.format(k,err)))
idx_ctr = 0
idx_side = 0
d = {}
for tag,mHandle in md_handles.iteritems():
if tag not in l_mainHandles:
if cgmGEN.__mayaVersion__ >= 2018:
mController = mHandle.controller_get()
mController.visibilityMode = 2
mHandle._verifyMirrorable()
_center = True
for p1,p2 in d_pairs.iteritems():
if p1 == tag or p2 == tag:
_center = False
break
if _center:
log.debug("|{0}| >> Center: {1}".format(_str_func,tag))
mHandle.mirrorSide = 0
mHandle.mirrorIndex = idx_ctr
idx_ctr +=1
mHandle.mirrorAxis = "translateX,rotateY,rotateZ"
#Self mirror wiring -------------------------------------------------------
for k,m in d_pairs.iteritems():
md_handles[k].mirrorSide = 1
md_handles[m].mirrorSide = 2
md_handles[k].mirrorIndex = idx_side
md_handles[m].mirrorIndex = idx_side
md_handles[k].doStore('mirrorHandle',md_handles[m])
md_handles[m].doStore('mirrorHandle',md_handles[k])
idx_side +=1
#Curves -------------------------------------------------------------------------
log.debug("|{0}| >> Make the curves...".format(_str_func))
for k,d in d_curveCreation.iteritems():
if "Left" in k:
d_curveCreation[k]['color'] = 'blueWhite'
elif "Right" in k:
d_curveCreation[k]['color'] = 'redWhite'
md_resCurves = self.UTILS.create_defineCurve(self, d_curveCreation, md_handles, mNoTransformNull)
self.msgList_connect('defineHandles',ml_handles)#Connect
self.msgList_connect('defineSubHandles',ml_handles)#Connect
self.msgList_connect('defineCurves',md_resCurves['ml_curves'])#Connect
return
#=============================================================================================================
#>> Form
#=============================================================================================================
def formDelete(self):
_str_func = 'formDelete'
log.debug("|{0}| >> ...".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
ml_defSubHandles = self.msgList_get('defineSubHandles')
for mObj in ml_defSubHandles:
mObj.template = False
mObj.v = 1
try:self.defineLoftMesh.template = False
except:pass
self.bbHelper.v = True
for mObj in self.msgList_get('defineCurves'):
mObj.template=0
mObj.v=1
@cgmGEN.Timer
def form(self):
try:
_str_func = 'form'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
_short = self.p_nameShort
#_baseNameAttrs = ATTR.datList_getAttrs(self.mNode,'nameList')
#Initial checks ===============================================================================
log.debug("|{0}| >> Initial checks...".format(_str_func)+ '-'*40)
#Create temple Null ==================================================================================
mFormNull = BLOCKUTILS.formNull_verify(self)
mNoTransformNull = self.atUtils('noTransformNull_verify','form')
mHandleFactory = self.asHandleFactory()
self.bbHelper.v = False
_size = MATH.average(self.baseSize[1:])
_str_pose = self.blockProfile#'human'
#Gather all our define dhandles and curves -----------------------------
log.debug("|{0}| >> Get our define curves/handles...".format(_str_func)+ '-'*40)
md_handles = {}
md_dCurves = {}
d_defPos = {}
ml_defineHandles = self.msgList_get('defineSubHandles')
for mObj in ml_defineHandles:
md_handles[mObj.handleTag] = mObj
d_defPos[mObj.handleTag] = mObj.p_position
for mObj in self.msgList_get('defineCurves'):
md_dCurves[mObj.handleTag] = mObj
#mObj.template=1
mObj.v = 0
#pprint.pprint(vars())
#
d_pairs = {}
d_creation = {}
l_order = []
d_curveCreation = {}
ml_subHandles = []
md_loftCreation = {}
pSmileR = False
pSmileL = False
d_handlePosDat = {}
d_color = {'left':'blueWhite',
'right':'redWhite',
'center':'yellowWhite'}
d_handleBase = {'tagOnly':True,'arrow':False,'jointLabel':0,'vectorLine':False}
#Main setup -----------------------------------------------------
if self.lipSetup:
log.debug("|{0}| >> lip setup...".format(_str_func))
_str_lipSetup = self.getEnumValueString('lipSetup')
#For now we just need to generate some extra data for one of our curves...the others we'll just use the define curves for
#We need to generate a couple of positions
p_lipCornerAdd_l = DGETAVG([d_defPos['cornerFrontLeft'],
d_defPos['cornerBackLeft']])
p_lipCornerAdd_r = DGETAVG([d_defPos['cornerFrontRight'],
d_defPos['cornerBackRight']])
d_lipDat = {'upr':{'count':self.numLipShapersUpr},
'lwr':{'count':self.numLipShapersLwr},
}
_l_clean = []
d_uprHandles = {}
for tag in 'upr','lwr':
#Get our base curves...
d_baseCurves = {}
d_handlePosDat_lips = {}
#LipOver
for t in ['Peak','LipBack']:
d_baseCurves[t] = md_dCurves[tag+t].mNode
#We need to redfine the lipCurve... ---------------------------------------------
l_pos = [p_lipCornerAdd_r,
#d_defPos['cornerFrontRight'],
d_defPos[tag+'FrontRight'],
d_defPos[tag+'Front'],
d_defPos[tag+'FrontLeft'],
#d_defPos['cornerFrontLeft'],
p_lipCornerAdd_l,
]
crv_lip = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'Peak')
d_baseCurves['Lip'] = crv_lip
_l_clean.append(crv_lip)
#Now we need a gum curve ...-----------------------------
l_pos = [d_defPos['cornerBagRight'],
d_defPos[tag+'Gum'],
d_defPos['cornerBagLeft'],
]
crv_gumLine = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'Gum')
d_baseCurves['Gum'] = crv_gumLine
_l_clean.append(crv_gumLine)
#Now we need an arc curve -----------------------------------------------
_res_tmp = mc.loft([md_dCurves[tag+'Peak'].mNode,
md_dCurves[tag+'Lip'].mNode],
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
#Get our curves...
crv_arc = SURF.get_surfaceSplitCurves(str_meshShape,count = 3, mode='u')[0]
_l_clean.extend(_res_tmp+[crv_arc])
d_baseCurves['Arc'] = crv_arc
_count = d_lipDat[tag]['count']
if MATH.is_even(_count):
b_even = True
log.debug("|{0}| >> lips | Even...".format(_str_func))
else:
b_even = False
log.debug("|{0}| >> lips | odd...".format(_str_func))
_keyCenter = None
l_keys_crv = []
l_curveOrder = ['Peak','Arc','Lip','LipBack','Gum']
#LipOver
if tag == 'upr':
#Now we need a mouthBagUp
l_pos = [d_defPos['mouthBagRight'],
d_defPos['mouthBagTop'],
d_defPos['mouthBagLeft'],
]
_crv = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'bagTop')
d_baseCurves['bagTop'] = _crv
_l_clean.append(_crv)
l_curveOrder.append('bagTop')
#Now we need a mouthBagBack
l_pos = [d_defPos['mouthBagRight'],
d_defPos['mouthBagBack'],
d_defPos['mouthBagLeft'],
]
_crv = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'bagBack')
d_baseCurves['bagBack'] = _crv
_l_clean.append(_crv)
l_curveOrder.append('bagBack')
else:
#Now we need a mouthBagUp
l_pos = [d_defPos['mouthBagRight'],
d_defPos['mouthBagBottom'],
d_defPos['mouthBagLeft'],
]
_crv = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'bagBottom')
d_baseCurves['bagBottom'] = _crv
_l_clean.append(_crv)
l_curveOrder.append('bagBottom')
for k in l_curveOrder:
crv = d_baseCurves[k]
_l_split = CURVES.getUSplitList(crv,_count,rebuild=1)
if tag == 'lwr' or k in ['Arc']:#Get rid of the ends because we share...
_l_split.pop(0)
_l_split.pop(-1)
if tag == 'upr' and k in ['bagBack']:
_l_split.pop(0)
_l_split.pop(-1)
#Now to split the positional data by left right
_mid = MATH.get_midIndex(len(_l_split))
if b_even:
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid:]
else:
_midV = _l_split[_mid]
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid+1:]
_keyCenter = "{0}_{1}_center".format(tag,k)
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['center']
d_use['pos'] = _midV
d_defPos[_keyCenter] = _midV
d_creation[_keyCenter] = d_use
l_order.append(_keyCenter)
_l_left.reverse()#reverse dat for mirror indexing
#Now we need to split out our handle create dat
l_handlesLeft = []
l_handlesRight = []
for i,p in enumerate(_l_right):
_key_l = "{0}_{1}_{2}_left".format(tag,k,i)
_key_r = "{0}_{1}_{2}_right".format(tag,k,i)
d_pairs[_key_l] = _key_r
l_order.extend([_key_l,_key_r])
#Right...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['right']
d_use['pos'] = p
d_creation[_key_r] = d_use
l_handlesRight.append(_key_r)
d_defPos[_key_r] = p
#Left...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['left']
d_use['pos'] = _l_left[i]
d_creation[_key_l] = d_use
d_defPos[_key_l] = _l_left[i]
l_handlesLeft.append(_key_l)
#Then curve create dat...
_keys = copy.copy(l_handlesRight)
if _keyCenter:
_keys.append(_keyCenter)
l_handlesLeft.reverse()
_keys.extend(l_handlesLeft)
if tag == 'upr':
d_uprHandles[k] = copy.copy(_keys)
elif k not in ['Arc']:
k_use = k
if k_use == 'bagBottom':
k_use = 'bagTop'
_keys.insert(0,d_uprHandles[k_use][0])
_keys.append(d_uprHandles[k_use][-1])
k_crv = "{0}_{1}".format(tag,k)
l_keys_crv.append(k_crv)
d_curveCreation[k_crv] = {'keys':_keys,
'rebuild':1}
#Setup base loft list
#for i,p in enumerate(_l_split):
#LOC.create(position=p,name = "{0}_{1}_{2}_loc".format(tag,k,i))
#Some fixes for arc
d_curveCreation['{0}_Arc'.format(tag)]['keys'].insert(0, d_curveCreation['upr_Peak']['keys'][0])
d_curveCreation['{0}_Arc'.format(tag)]['keys'].append(d_curveCreation['upr_Peak']['keys'][-1])
if tag == 'lwr':
l_keys_crv.append('upr_bagBack')
l_keysLip = l_keys_crv[:5]
l_mouthBagKeys = l_keys_crv[4:]
if tag == 'lwr':
l_keysLip.reverse()
md_loftCreation[tag+'Lip'] = {'keys':l_keysLip,
'rebuild':{'spansU':7,'spansV':7,'degreeU':3},
'uDriver':'{0}.numLoftLip_u'.format(_short),
'vDriver':'{0}.numLoftLip_v'.format(_short),
'kws':{'noRebuild':True}}
if tag == 'lwr':
l_mouthBagKeys.reverse()
md_loftCreation[tag+'mouthBag'] = {'keys':l_mouthBagKeys,
'rebuild':{'spansU':7,'spansV':7,'degreeU':3},
'uDriver':'{0}.numLoftBag_u'.format(_short),
'vDriver':'{0}.numLoftLip_v'.format(_short),
'kws':{'noRebuild':True}}
"""
#Let's define our arc data....
log.debug("|{0}| >> {1} arc...".format(_str_func,tag))
#Generate a curve...
l_pos = [p_lipCornerAdd_r,
DGETAVG([d_defPos[tag+'front'],d_defPos[tag+'peak']]),
p_lipCornerAdd_l
]
crv_arc = CORERIG.create_at(create='curve',l_pos = l_pos,baseName = tag+'Peak')
_l_split = CURVES.getUSplitList(crv_arc,5,rebuild=0)
_l_split = _l_split[1:-1]#cull start end
_key_r = "{0}_arc_right".format(tag)
_key_l = "{0}_arc_left".format(tag)
_key_c = "{0}_arc_center".format(tag)
for i,p in enumerate(_l_split):"""
#Split...
#Handles...
#Curves...
"""
_res_tmp = mc.loft(l_bulbCurves,
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
l_knots = SURF.get_dat(str_meshShape, uKnots=True)['uKnots']
pprint.pprint(l_knots)
crv_bulb_2 = mc.duplicateCurve("{0}.u[{1}]".format(str_meshShape,MATH.average(l_knots[:2])),
ch = 0, rn = 0, local = 0)[0]
crv_bulb_4 = mc.duplicateCurve("{0}.u[{1}]".format(str_meshShape,MATH.average(l_knots[1:])),
ch = 0, rn = 0, local = 0)[0]
"""
mc.delete(_l_clean)
#pprint.pprint(d_curveCreation)
#return
if self.noseSetup:
log.debug("|{0}| >> nose setup...".format(_str_func))
_str_noseSetup = self.getEnumValueString('noseSetup')
_d_pairs = {}
d_handlePosDat_nose = {}
d_noseCurves = {}
d_noseHandles = {'bridge':
{'center':
{0:{},
1:{0:'bridge'},
2:{},
3:{0:'noseTop'}},
'left':
{0:{},
1:{0:'bridgeOuterLeft',
2:'bridgeLeft'},
2:{},
3:{0:'sneerLeft',
2:'noseTopLeft'},},
'right':
{0:{},
1:{0:'bridgeOuterRight',
2:'bridgeRight'},
2:{},
3:{0:'sneerRight',
2:'noseTopRight'},}},
'bulb':
{'center':
{0:{0:'noseBase'},
1:{0:'noseUnder'},
2:{},
3:{0:'noseTip'},
4:{},
5:{0:'bulb'}},
'left':
{0:{0:'nostrilBaseLeft',
2:'noseBaseLeft'
},
1:{0:'nostrilBaseLeft',
1:'nostrilLineOuterLeft',
2:'nostrilLineInnerLeft'},
2:{},
3:{0:'nostrilLeft',
3:'noseTipLeft'},
4:{},
5:{0:'nostrilTopLeft',
2:'bulbLeft'}},
'right':
{0:{0:'nostrilBaseRight',
2:'noseBaseRight'
},
1:{0:'nostrilBaseRight',
1:'nostrilLineOuterRight',
2:'nostrilLineInnerRight'},
2:{},
3:{0:'nostrilRight',
3:'noseTipRight'},
4:{},
5:{0:'nostrilTopRight',
2:'bulbRight'}},
}}
#Position gather ---------------------------------------------------------------------
#We need some base positions....
#bulb...
d_handlePosDat_nose['bulb'] = {}
d_handlePosDat_nose['bulb']['center'] = {}
d_handlePosDat_nose['bulb']['center'][2] = {}
d_handlePosDat_nose['bulb']['center'][2][0] = DGETAVG([d_defPos['noseUnder'],
d_defPos['noseTip']])
#d_handlePosDat_nose['bulb']['center'][4] = {}
"""d_handlePosDat_nose['bulb']['center'][0][0] = DGETAVG([d_defPos['noseTip'],
d_defPos['bulb']])"""
#bridge...
d_handlePosDat_nose['bridge'] = {}
d_handlePosDat_nose['bridge']['center'] = {}
d_handlePosDat_nose['bridge']['center'][0] = {}
d_handlePosDat_nose['bridge']['center'][0][0] = DGETAVG([d_defPos['bulb'],
d_defPos['bridge']])
d_handlePosDat_nose['bridge']['center'][2] = {}
d_handlePosDat_nose['bridge']['center'][2][0] = DGETAVG([d_defPos['noseTop'],
d_defPos['bridge']])
"""
{section: side : curve idx: handle idx}
"""
#Sides...
_d_pos_bulb = d_handlePosDat_nose['bulb']#...connect
_d_pos_bridge = d_handlePosDat_nose['bridge']#...connect
_l_clean = []
for side in 'left','right':
_cap = STR.capFirst(side)
#Bulb...-----------------------------------------------------------------------------
#_d_pos_bulb[side] = {}#...declare
_d_tmp = {}
_d_pos_bulb[side] = _d_tmp#...declare
#Bulb 0...
_d_tmp[0] = {}
_d_tmp[0][1] = DGETAVG([d_defPos['noseBase'+_cap],
d_defPos['nostrilBase'+_cap]])
#We need some tmp stuff to find some curves
#Bulb 2...after
#Bulb 3...
_d_tmp[3] = {}
_d_tmp[3][1] = DPCTDIST(d_defPos['nostril'+_cap],
d_defPos['noseTip'+_cap],
.3)
_d_tmp[3][2] = DPCTDIST(d_defPos['nostril'+_cap],
d_defPos['noseTip'+_cap],
.6)
_d_tmp[3][4] = DGETAVG([d_defPos['noseTip'+_cap],
d_defPos['noseTip']])
#Bulb 4...after
#Bulb 5
_d_tmp[5] = {}
_d_tmp[5][1] = DGETAVG([d_defPos['nostrilTop'+_cap],
d_defPos['bulb'+_cap]])
_d_tmp[5][3] = DGETAVG([d_defPos['bulb'],
d_defPos['bulb'+_cap]])
#Bridge...-----------------------------------------------------------------------------
_d_tmp = {}
_d_pos_bridge[side] = _d_tmp#...declare
#Bridge 0...
_d_tmp[0] = {}
_d_tmp[0][0] = DGETAVG([d_defPos['bridgeOuter'+_cap],
d_defPos['nostrilTop'+_cap]])
_d_tmp[0][2] = DGETAVG([d_defPos['bridge'+_cap],
d_defPos['bulb'+_cap]])
_d_tmp[0][1] = DGETAVG([_d_tmp[0][0],
_d_tmp[0][2]])
#Bridge 1...
_d_tmp[1] = {}
_d_tmp[1][1] = DGETAVG([d_defPos['bridgeOuter'+_cap],
d_defPos['bridge'+_cap]])
#Bridge 2...
_d_tmp[2] = {}
_d_tmp[2][0] = DGETAVG([d_defPos['bridgeOuter'+_cap],
d_defPos['sneer'+_cap]])
_d_tmp[2][2] = DGETAVG([d_defPos['bridge'+_cap],
d_defPos['noseTop'+_cap]])
_d_tmp[2][1] = DGETAVG([_d_tmp[2][0],
_d_tmp[2][2]])
#Bridge 3...
_d_tmp[3] = {}
_d_tmp[3][1] = DGETAVG([d_defPos['noseTop'+_cap],
d_defPos['sneer'+_cap]])
crv_bulbBase = CORERIG.create_at(create='curve',l_pos = [d_defPos['nostrilBaseRight'],
d_defPos['nostrilLineOuterRight'],
d_defPos['nostrilLineInnerRight'],
d_defPos['noseUnder'],
d_defPos['nostrilLineInnerLeft'],
d_defPos['nostrilLineOuterLeft'],
d_defPos['nostrilBaseLeft'],
])
_l_clean.append(crv_bulbBase)
#We need a tmp loft for the bulb to get some data...
l_bulbCurves = [crv_bulbBase,
md_dCurves['noseCross'].mNode,
md_dCurves['noseBulb'].mNode
]
_res_tmp = mc.loft(l_bulbCurves,
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
l_knots = SURF.get_dat(str_meshShape, uKnots=True)['uKnots']
pprint.pprint(l_knots)
crv_bulb_2 = mc.duplicateCurve("{0}.u[{1}]".format(str_meshShape,MATH.average(l_knots[:2])),
ch = 0, rn = 0, local = 0)[0]
crv_bulb_4 = mc.duplicateCurve("{0}.u[{1}]".format(str_meshShape,MATH.average(l_knots[1:])),
ch = 0, rn = 0, local = 0)[0]
#_l_pos = CURVES.getUSplitList(_crv,_split,rebuild=1)
_l_clean.extend([crv_bulb_2,crv_bulb_4] + _res_tmp)
#Splitting out values for the generated curves
for i,crv in enumerate([crv_bulb_2,crv_bulb_4]):
if not i:
_split = 11
_idx = 2
else:
_split = 9
_idx = 4
_l_split = CURVES.getUSplitList(crv,_split,rebuild=1)
_mid = MATH.get_midIndex(_split)
_midV = _l_split[_mid]
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid+1:]
_l_left.reverse()
_d_pos_bulb['center'][_idx] = {0:_midV}
_d_pos_bulb['right'][_idx] = {}
_d_pos_bulb['left'][_idx] = {}
for ii,v in enumerate(_l_right):
_d_pos_bulb['right'][_idx][ii] = v
for ii,v in enumerate(_l_left):
_d_pos_bulb['left'][_idx][ii] = v
for section,d_section in d_handlePosDat_nose.iteritems():
for side,d_crv in d_section.iteritems():
for i,d_pos in d_crv.iteritems():
for ii,p in d_pos.iteritems():
_key = "{0}_{1}_{2}_{3}".format(section,i,ii,side)
if side == 'left':d_pairs[_key] = "{0}_{1}_{2}_{3}".format(section,i,ii,'right')
l_order.append(_key)
d_defPos[_key] = p
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color[side]
d_use['pos'] = p
d_creation[_key] = d_use
d_noseHandles[section][side][i][ii] = _key
#LOC.create(position=p,name = "{0}_loc".format(_key))
#Loop to gather handles
for section,d_section in d_noseHandles.iteritems():
d_noseCurves[section] = {}
#Loop to gather handles
l_crvIdx = []
for side,d_crv in d_section.iteritems():
d_noseCurves[section][side] = {}
for i,d_handle in d_crv.iteritems():
if i not in l_crvIdx:
l_crvIdx.append(i)
k_crv = "{0}_{1}_{2}".format(section,i,side)
d_noseCurves[section][side][i] = {'key':k_crv,
'handles':[]}
for ii,handle in d_handle.iteritems():
d_noseCurves[section][side][i]['handles'].append(handle)
#Now we need to sort those handles
for i in l_crvIdx:
if not d_noseCurves[section]['right'].get(i):
continue
k_crv = "{0}_{1}".format(section,i)
l_r = d_noseCurves[section]['right'][i]['handles']
l_c = d_noseCurves[section]['center'][i]['handles']
l_l = d_noseCurves[section]['left'][i]['handles']
l_l.reverse()
d_curveCreation[k_crv] = {'keys':l_r + l_c + l_l,
'rebuild':1}
l_noseKeys = ['bulb_0','bulb_1','bulb_2','bulb_3','bulb_4','bulb_5',
'bridge_0','bridge_1','bridge_2','bridge_3']
l_noseKeys.reverse()
md_loftCreation['nose'] = {'keys':l_noseKeys,
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftNose_u'.format(_short),
'vDriver':'{0}.numLoftNose_v'.format(_short),
'kws':{'noRebuild':True}}
#pprint.pprint(d_noseHandles)
#pprint.pprint(d_curveCreation)
mc.delete(_l_clean)
if self.jawSetup:
log.debug("|{0}| >> Jaw setup...".format(_str_func))
_str_jawSetup = self.getEnumValueString('jawSetup')
_d_pairs = {}
d_handlePosDat_jaw = {}
_d_curveCreateDat = {
'cheek_0':{'h':{0:'orbFront',2:'orb',4:'jawTop'}},
'cheek_1':{},
'cheek_2':{},
'cheek_3':{},
'chin_0':{},
'chin_1':{},
}
"""
How do I need the data...
l_order - append handles to make
d_creation - keyed to order handle
crv_list - by handle key
surface lists
"""
d_jawCurves = {}
d_jawHandles = {'cheek':
{'left':
{0:{0:'orbFrontLeft',
2:'orbLeft',
4:'jawTopLeft'},
1:{0:'cheekBoneLeft'},
2:{0:'smileLeft',
2:'cheekLeft',
4:'jawLeft'},
3:{2:'jawNeckLeft'},
4:{}},
'right':
{0:{0:'orbFrontRight',
2:'orbRight',
4:'jawTopRight'},
1:{0:'cheekBoneRight'},
2:{0:'smileRight',
2:'cheekRight',
4:'jawRight'},
3:{2:'jawNeckRight'},
4:{}}},
'chin':
{'center':
{0:{4:'jawNeck'}},
'left':
{0:{0:'chinLeft',
2:'jawFrontLeft',
}},
'right':
{0:{0:'chinRight',
2:'jawFrontRight',
}}
}}
#'chin':
#{'center':{0:{0:{}}}}}
#pprint.pprint(d_jawHandles)
#return
#Position gather ---------------------------------------------------------------------
#We need some base positions....
d_handlePosDat_jaw['chin'] = {}
d_handlePosDat_jaw['chin']['center'] = {}
d_handlePosDat_jaw['chin']['center'][0] = {}
_d_chin = d_handlePosDat_jaw['chin']['center'][0]
_d_chin[0] = DGETAVG([d_defPos['chinLeft'],
d_defPos['chinRight']])
_d_chin[2] = DGETAVG([d_defPos['jawFrontLeft'],
d_defPos['jawFrontRight']])
_d_chin[1]= DGETAVG([_d_chin[0],
_d_chin[2]])
_d_chin[3] = DGETAVG([d_handlePosDat_jaw['chin']['center'][0][2],
d_defPos['jawNeck']])
"""
{section: side : curve idx: handle idx}
"""
#Sides...
d_handlePosDat_jaw['cheek'] = {}#...declare
_d_handle_pos = d_handlePosDat_jaw['cheek']#...connect
for side in 'left','right':
_d_tmp = {}
_d_handle_pos[side] = _d_tmp
d_handlePosDat_jaw['chin'][side]= {}#...declare
_l_clean = []
_cap = STR.capFirst(side)
crv_jawLeft = CORERIG.create_at(create='curve',l_pos = [d_defPos['jawTop'+_cap],
d_defPos['jaw'+_cap],
d_defPos['jawNeck']
])
_l_clean.append(crv_jawLeft)
#...cheek 0....
_d_tmp[0] = {}
_d_tmp[0][1] = DGETAVG([d_defPos['orbFront'+_cap],
d_defPos['orb'+_cap]])
_d_tmp[0][3] = DGETAVG([d_defPos['orb'+_cap],
d_defPos['jawTop'+_cap]])
#...cheek 1...
_d_tmp[1] = {}
_d_tmp[1][2] = DGETAVG([d_defPos['orb'+_cap],
d_defPos['cheek'+_cap]])
_d_tmp[1][1] = DGETAVG([_d_tmp[1][2],
d_defPos['cheekBone'+_cap]])
_d_tmp[1][4] = CRVPCT(crv_jawLeft,.2)
_d_tmp[1][3] = DGETAVG([_d_tmp[1][4],
_d_tmp[1][2]])
#...cheek 2...
_d_tmp[2] = {}
#_d_tmp[2][4] = CRVPCT(crv_jawLeft,.4)
_d_tmp[2][1] = DGETAVG([d_defPos['smile'+_cap],
d_defPos['cheek'+_cap]])
_d_tmp[2][3] = DGETAVG([d_defPos['cheek'+_cap],
d_defPos['jaw'+_cap]])#_d_tmp[2][4]])
#...cheek 3...
_d_tmp[3] = {}
crv_chinSplit = CORERIG.create_at(create='curveLinear',l_pos = [d_defPos['smile'+_cap],
d_defPos['chin'+_cap],
d_handlePosDat_jaw['chin']['center'][0][0]
])
_l_clean.append(crv_chinSplit)
_d_tmp[3][0] = CRVPCT(crv_chinSplit,.3)
crv_cheek3Split = CORERIG.create_at(create='curve',l_pos = [_d_tmp[3][0],
d_defPos['jawNeck'+_cap],
d_defPos['jaw'+_cap],
])
_l_clean.append(crv_cheek3Split)
_d_tmp[3][1] = CRVPCT(crv_cheek3Split,.2)
_d_tmp[3][4] = CRVPCT(crv_jawLeft,.6)
#...cheek 4...
_d_tmp[4] = {}
crv_4Find = CORERIG.create_at(create='curve',l_pos = [d_defPos['cheek'+_cap],
d_defPos['jawNeck'+_cap],
d_handlePosDat_jaw['chin']['center'][0][3],
])
_l_clean.append(crv_4Find)
_d_tmp[4][0] = CRVPCT(crv_chinSplit,.5)
_d_tmp[4][3] = CRVPCT(crv_jawLeft,.8)
_d_tmp[4][2] = DGETAVG([d_defPos['jawNeck'+_cap],
d_defPos['jawFront'+_cap]])
_d_tmp[4][1] = DGETAVG([_d_tmp[4][0] ,
_d_tmp[4][2] ])
#...chin...
_d_tmp = d_handlePosDat_jaw['chin'][side]
_d_tmp[0] = {}
_d_tmp[0][4] = CRVPCT(crv_jawLeft,.9)
_d_tmp[0][1] = DGETAVG([ d_defPos['chin'+_cap],
d_defPos['jawFront'+_cap]])
_d_tmp[0][3] = DGETAVG([d_defPos['jawFront'+_cap],
d_handlePosDat_jaw['chin'][side][0][4]])
mc.delete(_l_clean)
for section,d_section in d_handlePosDat_jaw.iteritems():
for side,d_crv in d_section.iteritems():
for i,d_pos in d_crv.iteritems():
for ii,p in d_pos.iteritems():
_key = "{0}_{1}_{2}_{3}".format(section,i,ii,side)
if side == 'left':d_pairs[_key] = "{0}_{1}_{2}_{3}".format(section,i,ii,'right')
l_order.append(_key)
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color[side]
d_use['pos'] = p
d_creation[_key] = d_use
d_defPos[_key] = p
d_jawHandles[section][side][i][ii] = _key
#LOC.create(position=p,name = "{0}_loc".format(_key))
for section,d_section in d_jawHandles.iteritems():
d_jawCurves[section] = {}
for side,d_crv in d_section.iteritems():
d_jawCurves[section][side] = {}
for i,d_handle in d_crv.iteritems():
k_crv = "{0}_{1}_{2}".format(section,i,side)
d_jawCurves[section][side][i] = {'key':k_crv,
'handles':[]}
for ii,handle in d_handle.iteritems():
d_jawCurves[section][side][i]['handles'].append(handle)
d_curveCreation[k_crv] = {'keys':d_jawCurves[section][side][i]['handles'],
'rebuild':True}
md_loftCreation['jaw'] = {'keys':['cheek_0_left','cheek_1_left','cheek_2_left',
'cheek_3_left','cheek_4_left',
'chin_0_left','chin_0_center','chin_0_right',
'cheek_4_right','cheek_3_right','cheek_2_right',
'cheek_1_right','cheek_0_right'],
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftJaw_u'.format(_short),
'vDriver':'{0}.numLoftJaw_v'.format(_short),
'kws':{'noRebuild':True}}
#pprint.pprint(d_jawHandles)
#pprint.pprint(d_jawCurves)
"""
if self.lipSetup:
pSmileR = DIST.get_average_position([md_handles['cheekBoneRight'].p_position,
md_handles['chinRight'].p_position])
pSmileL = DIST.get_average_position([md_handles['cheekBoneLeft'].p_position,
md_handles['chinLeft'].p_position])
_d['smileLeft'] = {'color':'blueSky','tagOnly':True,'arrow':False,'jointLabel':0,
'vectorLine':False,'pos':pSmileL}
_d['smileRight'] = {'color':'redWhite','tagOnly':True,'arrow':False,'jointLabel':0,
'vectorLine':False,'pos':pSmileR}
l_order.extend(['smileLeft','smileRight'])
_d_pairs['smileLeft']='smileRight'"""
# ==========================================================================================
# Bridges
# ==========================================================================================
log.debug(cgmGEN.logString_sub(_str_func,'Bridges'))
d_bridgeDat = {'upr':{},
'lwr':{}}
#for i,l in d_bridgeDat['upr'][side]['handles'].iteritems():
l_overHandles = []
l_underHandles = []
r_overHandles = []
r_underHandles = []
if self.lipSetup:#Bridge Lip setup ----------------------------------------------------------------------
_count_over = self.numLipOverSplit
_count_under = self.numLipUnderSplit
log.debug(cgmGEN.logString_sub(_str_func,'over lip'))
#Get our base curves to split our loft
l_curves_baseLoft = []
_l_clean = []
#Get our base curves...
d_baseCurves = {}
d_handlePosDat_lips = {}
d_overUnderDat = {'over':{},
'under':{}}
for t in ['Peak','LipOver']:
l_curves_baseLoft.append(md_dCurves['upr'+t].mNode)
if self.noseSetup:
#We need a new base curve...
l_baseTmp = ['cornerPeakRight',
'cornerFrontRight',
'uprPeakRight',
'uprPeak',
'uprPeakLeft',
'cornerFrontLeft',
'cornerPeakLeft']
crv_base = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in l_baseTmp],
baseName='base')
l_curves_baseLoft[0] = crv_base
_l_clean.append(crv_base)
#End Curve
l_nose_underTags = ['nostrilRight',
'bulb_2_0_right',
'nostrilBaseRight',
'bulb_0_1_right',
'noseBaseRight',
'noseBase',
'noseBaseLeft',
'bulb_0_1_left',
'nostrilBaseLeft',
'bulb_2_0_left',
'nostrilLeft']
#l_tmpTags = ['smileRight'] + l_nose_underTags + ['smileLeft']
crv_end = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in l_nose_underTags],
baseName='end')
_l_clean.append(crv_end)
l_curves_baseLoft.append(crv_end)
l_endKeys = copy.copy(l_nose_underTags)
d_tmp = {'right':[],'left':[]}
#for side in 'right','left':
#for i,l in d_bridgeDat['upr'][side]['handles'].iteritems():
#d_tmp[side].append(l[-1])
#l_endKeys = ['cheekBoneRight'] + d_tmp['right'] + l_nose_underTags + d_tmp['left'] + ['cheekBoneLeft']
d_curveCreation['overEnd'] = {'keys':l_endKeys,
'rebuild':1}
#Make our new over start curve
l_overStart = ['cornerPeakRight'] + d_curveCreation['upr_Peak']['keys'] + ['cornerPeakLeft']
d_curveCreation['overStart'] = {'keys':l_overStart,
'rebuild':1}
else:
l_nose_underTags = ['nostrilRight',
'nostrilBaseRight',
'noseBaseRight',
'noseBase',
'noseBaseLeft',
'nostrilBaseLeft',
'nostrilLeft']
l_endKeys = copy.copy(l_nose_underTags)
d_tmp = {'right':[],'left':[]}
d_curveCreation['overEnd'] = {'keys':l_endKeys,
'rebuild':1}
l_overStart = ['cornerPeakRight'] + d_curveCreation['upr_Peak']['keys'] + ['cornerPeakLeft']
d_curveCreation['overStart'] = {'keys':l_overStart,
'rebuild':1}
pass
#raise ValueError,"Finish this"
#Loft/baseCurves ----------------------------------------------------------------------------------
_res_tmp = mc.loft(l_curves_baseLoft,
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
_l_clean.extend(_res_tmp)
#Get our curves...
l_crvs = SURF.get_surfaceSplitCurves(str_meshShape,count = _count_over+2, mode='u')
_l_clean.extend(l_crvs)
d_uprHandles = {}
#Get our handle values...
tag = 'overJoin'
l_keys_crv = []
for i,crv in enumerate(l_crvs):
_l_split = CURVES.getUSplitList(crv,d_lipDat['upr']['count'],rebuild=1)
l_handlesLeft = []
l_handlesRight = []
d_overUnderDat['over'][i] = {}
#Pop start and end as we'll use the upr handles
#_l_split.pop(0)
#_l_split.pop(-1)
#Now to split the positional data by left right
_mid = MATH.get_midIndex(len(_l_split))
if b_even:
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid:]
else:
_midV = _l_split[_mid]
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid+1:]
_keyCenter = "{0}_{1}_center".format(tag,i)
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['center']
d_use['pos'] = _midV
d_defPos[_keyCenter] = _midV
d_creation[_keyCenter] = d_use
l_order.append(_keyCenter)
_l_left.reverse()#reverse dat for mirror indexing
#Now we need to split out our handle create dat
for ii,p in enumerate(_l_right):
#if crv == l_crvs[-1] and p == _l_right[0]:
#l_handlesRight.append('smileRight')
#l_handlesLeft.append('smileLeft')
#continue
_key_l = "{0}_{1}_{2}_left".format(tag,i,ii)
_key_r = "{0}_{1}_{2}_right".format(tag,i,ii)
d_pairs[_key_l] = _key_r
l_order.extend([_key_l,_key_r])
#Right...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['right']
d_use['pos'] = p
d_creation[_key_r] = d_use
l_handlesRight.append(_key_r)
d_defPos[_key_r] = p
#Left...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['left']
d_use['pos'] = _l_left[ii]
d_creation[_key_l] = d_use
d_defPos[_key_l] = _l_left[ii]
l_handlesLeft.append(_key_l)
#LOC.create(position=_l_left[ii],name=_key_l)
#LOC.create(position=p,name=_key_r)
#Then curve create dat...
_keys = copy.copy(l_handlesRight)
if _keyCenter:
_keys.append(_keyCenter)
l_handlesLeft.reverse()
_keys.extend(l_handlesLeft)
l_overHandles.append(_keys[-1])
r_overHandles.append(_keys[0])
#_keys.insert(0,d_bridgeDat['upr']['right']['handles'][-1])
#_keys.append(d_bridgeDat['upr']['left']['handles'][-1])
d_uprHandles[i] = copy.copy(_keys)
d_overUnderDat['over'][i]['handles'] = copy.copy(_keys)
k_crv = "{0}_{1}".format(tag,i)
l_keys_crv.append(k_crv)
d_curveCreation[k_crv] = {'keys':_keys,
'rebuild':1}
#mc.delete(l_crvs + _res_tmp + [crv_start,crv_end])
#l_keys_crv.insert(0,'lwr_Peak')
#l_keys_crv.insert(0,md_loftCreation['uprLip']['keys'][0])
l_keys_crv.insert(0,'overStart')
l_keys_crv.append('overEnd')
l_keys_crv.reverse()
md_loftCreation['overLip'] = {'keys':l_keys_crv,
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftLipOver_u'.format(_short),
'vDriver':'{0}.numLoftLip_v'.format(_short),
'kws':{'noRebuild':True}}
mc.delete(_l_clean)
#Underlip ==================================================================================
log.debug(cgmGEN.logString_sub(_str_func,'under lip'))
l_curves_baseLoft = []
_l_clean = []
#Start..
l_baseTmp = ['cornerPeakRight',
'cornerFrontRight',
'lwrPeakRight',
'lwrPeak',
'lwrPeakLeft',
'cornerFrontLeft',
'cornerPeakLeft']
crv_base = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in l_baseTmp],
baseName='base')
l_curves_baseLoft.append( crv_base)
_l_clean.append(crv_base)
#End...
l_baseTmp = ['cornerLwrRight', 'lwrOverRight', 'lwrOver', 'lwrOverLeft', 'cornerLwrLeft']
crv_end = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in l_baseTmp],
baseName='base')
l_curves_baseLoft.append( crv_end)
_l_clean.append(crv_end)
#Make our new over start curve
l_underStart = ['cornerPeakRight'] + d_curveCreation['lwr_Peak']['keys'] + ['cornerPeakLeft']
d_curveCreation['underStart'] = {'keys':l_underStart,
'rebuild':1}
#Loft/baseCurves ----------------------------------------------------------------------------------
_res_tmp = mc.loft(l_curves_baseLoft,
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
_l_clean.extend(_res_tmp)
#Get our curves...
l_crvs = SURF.get_surfaceSplitCurves(str_meshShape,count = _count_under+2, mode='u',cullStartEnd=False)
#We need the end curve in this case...
mc.delete(l_crvs[0])
l_crvs.pop(0)
_l_clean.extend(l_crvs)
tag = 'underJoin'
l_keys_crv = []
for i,crv in enumerate(l_crvs):
_l_split = CURVES.getUSplitList(crv,d_lipDat['lwr']['count'],rebuild=1)
l_handlesLeft = []
l_handlesRight = []
d_overUnderDat['under'][i] = {}
#Pop start and end as we'll use the upr handles
#_l_split.pop(0)
#_l_split.pop(-1)
#Now to split the positional data by left right
_mid = MATH.get_midIndex(len(_l_split))
if b_even:
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid:]
else:
_midV = _l_split[_mid]
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid+1:]
_keyCenter = "{0}_{1}_center".format(tag,i)
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['center']
d_use['pos'] = _midV
d_defPos[_keyCenter] = _midV
d_creation[_keyCenter] = d_use
l_order.append(_keyCenter)
_l_left.reverse()#reverse dat for mirror indexing
#Now we need to split out our handle create dat
for ii,p in enumerate(_l_right):
#if crv == l_crvs[-1] and p == _l_right[0]:
#l_handlesRight.append('smileRight')
#l_handlesLeft.append('smileLeft')
#continue
_key_l = "{0}_{1}_{2}_left".format(tag,i,ii)
_key_r = "{0}_{1}_{2}_right".format(tag,i,ii)
d_pairs[_key_l] = _key_r
l_order.extend([_key_l,_key_r])
#Right...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['right']
d_use['pos'] = p
d_creation[_key_r] = d_use
l_handlesRight.append(_key_r)
d_defPos[_key_r] = p
#Left...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['left']
d_use['pos'] = _l_left[ii]
d_creation[_key_l] = d_use
d_defPos[_key_l] = _l_left[ii]
l_handlesLeft.append(_key_l)
#LOC.create(position=_l_left[ii],name=_key_l)
#LOC.create(position=p,name=_key_r)
#Then curve create dat...
_keys = copy.copy(l_handlesRight)
if _keyCenter:
_keys.append(_keyCenter)
l_handlesLeft.reverse()
_keys.extend(l_handlesLeft)
#_keys.insert(0,d_bridgeDat['upr']['right']['handles'][-1])
#_keys.append(d_bridgeDat['upr']['left']['handles'][-1])
d_overUnderDat['under'][i]['handles'] = copy.copy(_keys)
k_crv = "{0}_{1}".format(tag,i)
l_keys_crv.append(k_crv)
d_curveCreation[k_crv] = {'keys':_keys,
'rebuild':1}
#mc.delete(l_crvs + _res_tmp + [crv_start,crv_end])
#l_keys_crv.insert(0,'lwr_Peak')
l_underHandles.append(_keys[-1])
r_underHandles.append(_keys[0])
#l_keys_crv.insert(0,md_loftCreation['uprLip']['keys'][0])
l_keys_crv.insert(0,'underStart')
#l_keys_crv.append('overEnd')
md_loftCreation['underLip'] = {'keys':l_keys_crv,
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftLipUnder_u'.format(_short),
'vDriver':'{0}.numLoftLip_v'.format(_short),
'kws':{'noRebuild':True}}
mc.delete(_l_clean)
#We need to generate a surface to flow our -------------------------------------------------
#Need to make lipline curve
l_lipLineUpr = d_curveCreation['overStart']['keys'][:2] + d_curveCreation['upr_Arc']['keys'] + d_curveCreation['overStart']['keys'][-2:]
l_lipLineLwr = d_curveCreation['overStart']['keys'][:2] + d_curveCreation['lwr_Arc']['keys'] + d_curveCreation['overStart']['keys'][-2:]
d_curveCreation['uprLipLine'] = {'keys':l_lipLineUpr,
'rebuild':1}
d_curveCreation['lwrLipLine'] = {'keys':l_lipLineLwr,
'rebuild':1}
l_lipMask_crvs = md_loftCreation['overLip']['keys'] + ['uprLipLine','lwrLipLine'] + md_loftCreation['underLip']['keys']
md_loftCreation['attachLips'] = {'keys':l_lipMask_crvs,
'rebuild':{'spansU':12,'spansV':12,'degreeU':3},
#'uDriver':'{0}.numLoftLipUnder_u'.format(_short),
#'vDriver':'{0}.numLoftLip_v'.format(_short),
'kws':{'noRebuild':True}}
if self.jawSetup:#Bridge...chin------------------------------------------------------------------
log.debug(cgmGEN.logString_sub(_str_func,'Bridge | jaw dat'))
if self.noseSetup:
log.debug(cgmGEN.logString_sub(_str_func,'Nose to Jaw Bridge'))
d_bridgeTargets = {'left':{
'start':['sneerLeft',
'bridge_2_0_left',
'bridgeOuterLeft',
'bridge_0_0_left',
'nostrilTopLeft',
'bulb_4_0_left',
'nostrilLeft'],
'end':['orbFrontLeft','cheekBoneLeft','smileLeft']},
'right':{
'start':['sneerRight',
'bridge_2_0_right',
'bridgeOuterRight',
'bridge_0_0_right',
'nostrilTopRight',
'bulb_4_0_right',
'nostrilRight',
],
'end':['orbFrontRight','cheekBoneRight','smileRight']}}
if l_overHandles:#Add in our over split handles if they're there
l_overHandles.reverse()
r_overHandles.reverse()
l_overHandles.append('cornerPeakLeft')
r_overHandles.append('cornerPeakRight')
d_bridgeTargets['left']['start'].extend(l_overHandles)
d_bridgeTargets['right']['start'].extend(r_overHandles)
if self.numBridgeSplit:
#First get our start curves to split
log.debug(cgmGEN.logString_msg(_str_func,'Split...'))
for side,d_side in d_bridgeTargets.iteritems():
d_tmpCurves = {}
d_dat = d_bridgeDat['upr']
d_dat[side] = {'handles':{},
'crvs':[]}
_cap = STR.capFirst(side)
#Declare our start /end
k_startCrv = 'uprJoin'+STR.capFirst(side)+'Start'
k_endCrv = 'uprJoin'+STR.capFirst(side)+'End'
d_curveCreation[k_startCrv] = {'keys':d_bridgeTargets[side]['start'],'rebuild':1}
d_curveCreation[k_endCrv] = {'keys':d_bridgeTargets[side]['end'],'rebuild':1}
for tag,keys in d_side.iteritems():
l_pos = []
for k in keys:
l_pos.append(d_defPos[k])
d_tmpCurves[tag] = CORERIG.create_at(create='curve',l_pos = l_pos)
_res_tmp = mc.loft([d_tmpCurves['start'],d_tmpCurves['end']],
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
#Get our curves...
l_crvs = SURF.get_surfaceSplitCurves(str_meshShape,count = self.numBridgeSplit + 2,
mode='u')
#Get our handle values...
for i,crv in enumerate(l_crvs):
_l_split = CURVES.getUSplitList(crv,3,rebuild=1)
d_dat[side]['handles'][i] = []
for ii,p in enumerate(_l_split):
_key = "uprJoin_{0}_{1}_{2}".format(i,ii,side)
if side == 'left':d_pairs[_key] = "uprJoin_{0}_{1}_{2}".format(i,ii,'right')
l_order.append(_key)
d_defPos[_key] = p
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color[side]
d_use['pos'] = p
d_creation[_key] = d_use
d_dat[side]['handles'][i].append(_key)
#LOC.create(position=p,name=_key)
k_crv = 'uprJoin_{0}_{1}'.format(i,side)
d_dat[side]['crvs'].append(k_crv)
d_curveCreation[k_crv] = {'keys':d_dat[side]['handles'][i],
'rebuild':1}
mc.delete([d_tmpCurves['start'],d_tmpCurves['end']] + l_crvs + _res_tmp)
l_crv_keys = [k_startCrv] + d_dat[side]['crvs'] + [k_endCrv]
if side == 'left':
l_crv_keys.reverse()
md_loftCreation['uprJoin'+_cap] = {'keys':l_crv_keys,
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftBridge_v'.format(_short),
'vDriver':'{0}.numLoftBridge_u'.format(_short),
'kws':{'noRebuild':True}}
#pprint.pprint(md_loftCreation['uprJoin'+_cap])
else:
log.debug(cgmGEN.logString_sub(_str_func,'simple bridge'))
d_curveCreation['noseToCheekLeftStart'] = {'keys':['sneerLeft',
'bridge_2_0_left',
'bridgeOuterLeft',
'bridge_0_0_left',
'nostrilTopLeft'],
'rebuild':1}
d_curveCreation['noseToCheekRightStart'] = {'keys':['sneerRight',
'bridge_2_0_right',
'bridgeOuterRight',
'bridge_0_0_right',
'nostrilTopRight'],
'rebuild':1}
d_curveCreation['noseToCheekLeftEnd'] = {'keys':['orbFrontLeft','cheekBoneLeft'],
'rebuild':0}
d_curveCreation['noseToCheekRightEnd'] = {'keys':['orbFrontRight', 'cheekBoneRight'],
'rebuild':0}
md_loftCreation['noseJoinLeft'] = {'keys':['noseToCheekLeftStart',
'noseToCheekLeftEnd'],
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftJaw_u'.format(_short),
'vDriver':'{0}.numLoftJaw_v'.format(_short),
'kws':{'noRebuild':True}}
md_loftCreation['noseJoinRight'] = {'keys':['noseToCheekRightStart',
'noseToCheekRightEnd'],
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftJaw_u'.format(_short),
'vDriver':'{0}.numLoftJaw_v'.format(_short),
'kws':{'noRebuild':True}}
if self.lipSetup:
log.debug(cgmGEN.logString_sub(_str_func,'Lip to jaw bridge'))
d_curveCreation['lipToChinEnd'] = {'keys':['smileRight',
'cheek_3_0_right',
'cheek_4_0_right',
'chinRight',
'chin_0_0_center',
'chinLeft',
'cheek_4_0_left',
'cheek_3_0_left',
'smileLeft'],
'rebuild':0}
d_bridgeTargets = {'start': ['cornerPeakRight',
'cornerFrontRight',
'lwrPeakRight',
'lwrPeak',
'lwrPeakLeft',
'cornerFrontLeft',
'cornerPeakLeft'],
'end':['smileRight',
'cheek_3_0_right',
'cheek_4_0_right',
'chinRight',
'chin_0_0_center',
'chinLeft',
'cheek_4_0_left',
'cheek_3_0_left',
'smileLeft']}
l_startKeys = ['cornerPeakRight']
if r_underHandles:
#r_underHandles.reverse()
l_startKeys.extend(r_underHandles)
l_startKeys.extend(d_curveCreation[md_loftCreation['underLip']['keys'][-1]]['keys'])
if l_underHandles:
l_underHandles.reverse()
l_startKeys.extend(l_underHandles)
l_startKeys.append('cornerPeakLeft')
d_bridgeTargets['start'] = l_startKeys
d_curveCreation['lipToChinStart'] = {'keys':l_startKeys,
'rebuild':1}
"""
if l_overHandles:#Add in our over split handles if they're there
l_overHandles.reverse()
r_overHandles.reverse()
d_bridgeTargets['left']['start'].extend(l_overHandles)
d_bridgeTargets['right']['start'].extend(r_overHandles)
l_overHandles.append('cornerPeakLeft')
r_overHandles.append('cornerPeakRight')"""
if self.numBridgeSplit:
#First get our start curves to split
log.debug(cgmGEN.logString_msg(_str_func,'Split...'))
crv_start = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in d_bridgeTargets['start']],baseName='start')
crv_end = CORERIG.create_at(create='curve',
l_pos = [d_defPos[k] for k in d_bridgeTargets['end']],baseName='end')
_res_tmp = mc.loft([crv_start,crv_end],
o = True, d = 1, po = 0, c = False,u=False, autoReverse=0,ch=True)
str_meshShape = TRANS.shapes_get(_res_tmp[0])[0]
#Get our curves...
l_crvs = SURF.get_surfaceSplitCurves(str_meshShape,count = self.numBridgeSplit + 2,
mode='u')
tag = 'lwrJoin'
l_keys_crv = []
for i,crv in enumerate(l_crvs):
_l_split = CURVES.getUSplitList(crv,d_lipDat['lwr']['count'],rebuild=1)
#Pop start and end as we'll use the upr handles
if self.noseSetup:
_l_split.pop(0)
_l_split.pop(-1)
#Now to split the positional data by left right
_mid = MATH.get_midIndex(len(_l_split))
if b_even:
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid:]
else:
_midV = _l_split[_mid]
_l_right = _l_split[:_mid]
_l_left = _l_split[_mid+1:]
_keyCenter = "{0}_{1}_center".format(tag,i)
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['center']
d_use['pos'] = _midV
d_defPos[_keyCenter] = _midV
d_creation[_keyCenter] = d_use
l_order.append(_keyCenter)
_l_left.reverse()#reverse dat for mirror indexing
#Now we need to split out our handle create dat
l_handlesLeft = []
l_handlesRight = []
for ii,p in enumerate(_l_right):
_key_l = "{0}_{1}_{2}_left".format(tag,i,ii)
_key_r = "{0}_{1}_{2}_right".format(tag,i,ii)
d_pairs[_key_l] = _key_r
l_order.extend([_key_l,_key_r])
#Right...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['right']
d_use['pos'] = p
d_creation[_key_r] = d_use
l_handlesRight.append(_key_r)
d_defPos[_key_r] = p
#Left...
d_use = copy.copy(d_handleBase)
d_use['color'] = d_color['left']
d_use['pos'] = _l_left[ii]
d_creation[_key_l] = d_use
d_defPos[_key_l] = _l_left[ii]
l_handlesLeft.append(_key_l)
#LOC.create(position=_l_left[ii],name=_key_l)
#LOC.create(position=p,name=_key_r)
#Then curve create dat...
_keys = copy.copy(l_handlesRight)
if _keyCenter:
_keys.append(_keyCenter)
l_handlesLeft.reverse()
_keys.extend(l_handlesLeft)
if self.noseSetup:
_keys.insert(0,d_bridgeDat['upr']['right']['handles'][i][-1])
_keys.append(d_bridgeDat['upr']['left']['handles'][i][-1])
k_crv = "{0}_{1}".format(tag,i)
l_keys_crv.append(k_crv)
d_curveCreation[k_crv] = {'keys':_keys,
'rebuild':1}
mc.delete(l_crvs + _res_tmp + [crv_start,crv_end])
#l_keys_crv.insert(0,md_loftCreation['lwrLip']['keys'][0])
l_keys_crv.insert(0,'lipToChinStart')
l_keys_crv.append('lipToChinEnd')
md_loftCreation['lipToChin'] = {'keys':l_keys_crv,
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftBridge_v'.format(_short),
'vDriver':'{0}.numLoftBridge_u'.format(_short),
'kws':{'noRebuild':True}}
else:
log.debug(cgmGEN.logString_sub(_str_func,'simple lwr bridge'))
md_loftCreation['lipToChin'] = {'keys':[md_loftCreation['lwrLip']['keys'][0],
'lipToChinEnd'],
'rebuild':{'spansU':30,'spansV':5,'degreeU':3},
'uDriver':'{0}.numLoftJaw_u'.format(_short),
'vDriver':'{0}.numLoftJaw_v'.format(_short),
'kws':{'noRebuild':True}}
#return
# ==========================================================================================
# Final bits
# ==========================================================================================
#Hiding unused define handles
l_dTagsUsed = []
for k,dat in d_curveCreation.iteritems():
for t in dat['keys']:
if t not in l_dTagsUsed:
l_dTagsUsed.append(t)
#l_dTagsUsed.sort()
#pprint.pprint(l_dTagsUsed)
md_res = self.UTILS.create_defineHandles(self, l_order, d_creation, _size / 10,
mFormNull)
ml_subHandles.extend(md_res['ml_handles'])
md_handles.update(md_res['md_handles'])
md_res = self.UTILS.create_defineCurve(self, d_curveCreation, md_handles,
mNoTransformNull,
crvType='formCrv')
md_resCurves = md_res['md_curves']
for k,d in md_loftCreation.iteritems():
ml_curves = [md_resCurves[k2] for k2 in d['keys']]
for mObj in ml_curves:
#mObj.template =1
mObj.v = 0
"""
self.UTILS.create_simpleFormLoftMesh(self,
[mObj.mNode for mObj in ml_curves],
mFormNull,
polyType = 'faceLoft',
d_rebuild = d.get('rebuild',{}),
baseName = k,
transparent = False,
vDriver = "{0}.numLidSplit_v".format(_short),
uDriver = "{0}.numLidSplit_u".format(_short),
**d.get('kws',{}))"""
mSurf = self.UTILS.create_simpleFormLoftMesh(self,
[mObj.mNode for mObj in ml_curves],
mFormNull,
polyType = 'faceNurbsLoft',
d_rebuild = d.get('rebuild',{}),
transparent = False,
baseName = k,
vDriver = d.get('vDriver'),#'"{0}.numLidSplit_v".format(_short),
uDriver = d.get('uDriver'),#"{0}.numLidSplit_u".format(_short),
**d.get('kws',{}))
if 'attach' in k:
mSurf.template = 1
#Mirror indexing -------------------------------------
log.debug("|{0}| >> Mirror Indexing...".format(_str_func)+'-'*40)
idx_ctr = 0
idx_side = 0
d = {}
for tag,mHandle in md_handles.iteritems():
if cgmGEN.__mayaVersion__ >= 2018:
mController = mHandle.controller_get()
mController.visibilityMode = 2
if mHandle in ml_defineHandles:
continue
mHandle._verifyMirrorable()
_center = True
for p1,p2 in d_pairs.iteritems():
if p1 == tag or p2 == tag:
_center = False
break
if _center:
log.debug("|{0}| >> Center: {1}".format(_str_func,tag))
mHandle.mirrorSide = 0
mHandle.mirrorIndex = idx_ctr
idx_ctr +=1
mHandle.mirrorAxis = "translateX,rotateY,rotateZ"
l_dTagsUsed.extend(['cornerFrontLeft','cornerFrontRight'])
for mHandle in ml_defineHandles:
if mHandle.handleTag not in l_dTagsUsed:
mHandle.v=False
else:
mHandle.v=True
#Self mirror wiring -------------------------------------------------------
for k,m in d_pairs.iteritems():
log.debug("{0} -|- {1}".format(k,m))
try:
md_handles[k].mirrorSide = 1
md_handles[m].mirrorSide = 2
md_handles[k].mirrorIndex = idx_side
md_handles[m].mirrorIndex = idx_side
md_handles[k].doStore('mirrorHandle',md_handles[m])
md_handles[m].doStore('mirrorHandle',md_handles[k])
idx_side +=1
except Exception,err:
log.error('Mirror error: {0}'.format(err))
self.msgList_connect('formHandles',ml_subHandles)#Connect
self.msgList_connect('formCurves',md_res['ml_curves'])#Connect
return
except Exception,err:
#raise Exception,err
cgmGEN.cgmExceptCB(Exception,err)
#=============================================================================================================
#>> Prerig
#=============================================================================================================
def prerigDelete(self):
self.noTransFormNull.v=True
self.formNull.template=False
for mObj in self.msgList_get('defineSubHandles') + self.msgList_get('formHandles'):
mLabel = mObj.getMessageAsMeta('jointLabel')
if mLabel:
mLabel.v=1
def create_handle(self,tag,pos,mJointTrack=None,
trackAttr=None,visualConnection=True,
nameEnd = 'BrowHandle'):
mHandle = cgmMeta.validateObjArg( CURVES.create_fromName('circle', size = _size_sub),
'cgmObject',setClass=1)
mHandle.doSnapTo(self)
mHandle.p_position = pos
mHandle.p_parent = mStateNull
mHandle.doStore('cgmName',tag)
mHandle.doStore('cgmType','formHandle')
mHandle.doName()
mHandleFactory.color(mHandle.mNode,controlType='sub')
self.connectChildNode(mHandle.mNode,'{0}nameEnd'.format(tag),'block')
return mHandle
#joinHandle ------------------------------------------------
mJointHandle = cgmMeta.validateObjArg( CURVES.create_fromName('jack',
size = _size_sub*.75),
'cgmObject',
setClass=1)
mJointHandle.doStore('cgmName',tag)
mJointHandle.doStore('cgmType','jointHelper')
mJointHandle.doName()
mJointHandle.p_position = pos
mJointHandle.p_parent = mStateNull
mHandleFactory.color(mJointHandle.mNode,controlType='sub')
mHandleFactory.addJointLabel(mJointHandle,tag)
mHandle.connectChildNode(mJointHandle.mNode,'jointHelper','handle')
mTrackGroup = mJointHandle.doGroup(True,True,
asMeta=True,
typeModifier = 'track',
setClass='cgmObject')
if trackAttr and mJointTrack:
mPointOnCurve = cgmMeta.asMeta(CURVES.create_pointOnInfoNode(mJointTrack.mNode,turnOnPercentage=True))
mPointOnCurve.doConnectIn('parameter',"{0}.{1}".format(self.mNode,trackAttr))
mTrackLoc = mJointHandle.doLoc()
mPointOnCurve.doConnectOut('position',"{0}.translate".format(mTrackLoc.mNode))
mTrackLoc.p_parent = mNoTransformNull
mTrackLoc.v=False
mc.pointConstraint(mTrackLoc.mNode,mTrackGroup.mNode)
elif mJointTrack:
mLoc = mHandle.doLoc()
mLoc.v=False
mLoc.p_parent = mNoTransformNull
mc.pointConstraint(mHandle.mNode,mLoc.mNode)
res = DIST.create_closest_point_node(mLoc.mNode,mJointTrack.mNode,True)
#mLoc = cgmMeta.asMeta(res[0])
mTrackLoc = cgmMeta.asMeta(res[0])
mTrackLoc.p_parent = mNoTransformNull
mTrackLoc.v=False
mc.pointConstraint(mTrackLoc.mNode,mTrackGroup.mNode)
mAimGroup = mJointHandle.doGroup(True,True,
asMeta=True,
typeModifier = 'aim',
setClass='cgmObject')
mc.aimConstraint(mLidRoot.mNode,
mAimGroup.mNode,
maintainOffset = False, weight = 1,
aimVector = [0,0,-1],
upVector = [0,1,0],
worldUpVector = [0,1,0],
worldUpObject = self.mNode,
worldUpType = 'objectRotation' )
if visualConnection:
log.debug("|{0}| >> visualConnection ".format(_str_func, tag))
trackcrv,clusters = CORERIG.create_at([mLidRoot.mNode,
mJointHandle.mNode],#ml_handleJoints[1]],
'linearTrack',
baseName = '{0}_midTrack'.format(tag))
mTrackCrv = cgmMeta.asMeta(trackcrv)
mTrackCrv.p_parent = mNoTransformNull
mHandleFactory.color(mTrackCrv.mNode, controlType = 'sub')
for s in mTrackCrv.getShapes(asMeta=True):
s.overrideEnabled = 1
s.overrideDisplayType = 2
return mHandle
def prerig(self):
try:
_str_func = 'prerig'
log.debug("|{0}| >> {1}".format(_str_func,self)+ '-'*80)
self.blockState = 'prerig'
_side = self.UTILS.get_side(self)
self.atUtils('module_verify')
mStateNull = self.UTILS.stateNull_verify(self,'prerig')
mNoTransformNull = self.atUtils('noTransformNull_verify','prerig')
#self.noTransFormNull.v=False
#self.formNull.template=True
_offset = self.atUtils('get_shapeOffset')/4.0
_size = MATH.average(self.baseSize[1:])
_size_base = _size * .25
_size_sub = _size_base * .5
_size_anchor = _size_sub/4
_muzzleSize = _offset * 4.0
#mRoot = self.getMessageAsMeta('rootHelper')
mHandleFactory = self.asHandleFactory()
vec_self = self.getAxisVector('z+')
vec_selfUp = self.getAxisVector('y+')
vec_selfBack = self.getAxisVector('z-')
#---------------------------------------------------------------
log.debug("|{0}| >> Gather define/form handles/curves in a useful format...".format(_str_func))
d_pairs = {}
ml_handles = []
md_handles = {}
md_dHandles = {}
md_dCurves = {}
md_jointHandles = {}
ml_jointHandles = []
ml_defineHandles = []
d_basePosDat = {}
md_mirrorDat = {'center':[],
'left':[],
'right':[]}
for mObj in self.msgList_get('defineSubHandles') + self.msgList_get('formHandles'):
_handleTag = mObj.handleTag
md_dHandles[_handleTag] = mObj
#mLabel = mObj.getMessageAsMeta('jointLabel')
#if mLabel:
#mLabel.v=0
ml_defineHandles.append(mObj)
d_basePosDat[_handleTag] = mObj.p_position
for mObj in self.msgList_get('defineCurves') + self.msgList_get('formCurves') :
md_dCurves[mObj.handleTag] = mObj
mObj.template=1
d_baseHandeKWS = {'mStateNull' : mStateNull,
'mNoTransformNull' : mNoTransformNull,
'jointSize': self.jointRadius}
#==================================================================================================
# Processing
#==================================================================================================
mCrv_lwrBack = self.getMessageAsMeta('lwr_LipBackFormCrv')
p_lwrLipBack = CRVPCT(mCrv_lwrBack.mNode, .5)
mCrv_lwrGum = self.getMessageAsMeta('lwr_GumFormCrv')
p_gumLwr = CRVPCT(mCrv_lwrGum.mNode, .5)
mCrv_uprGum = self.getMessageAsMeta('upr_GumFormCrv')
p_gumUpr = CRVPCT(mCrv_uprGum.mNode, .5)
#p_teethBase = DIST.get_pos_by_vec_dist(p_lwrLipBack,vec_selfBack,_offset)
dist_mouthWidth = DIST.get_distance_between_points(md_dHandles['cornerFrontLeft'].p_position,
md_dHandles['cornerFrontRight'].p_position)
if self.jawSetup:# Jaw setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'jaw'))
#Shape...
l_jaw = ['jawTopRight',
'jawRight',
'jawNeckRight',
'jawFrontRight',
'jawFrontLeft',
'jawNeckLeft',
'jawLeft',
'jawTopLeft']
_crv = CORERIG.create_at(create='curve',l_pos=[md_dHandles[k].p_position for k in l_jaw])
#md_dCurves['jawLine'].mNode
_shape = mc.offsetCurve(_crv,rn=0,cb=1,st=1,cl=1,cr=0,ch=0,
d=1,tol=.0001,sd=1,ugn=0,
distance =-_offset*2.0)
mc.delete(_crv)
mShape = cgmMeta.validateObjArg(_shape[0],'cgmControl',setClass=1)
mHandleFactory.color(mShape.mNode,side = 'center', controlType='main')
_d_name = {'cgmName':'jaw',
'cgmType':'jointHelper'}
_d_kws = copy.copy(d_baseHandeKWS)
_d_kws['jointSize'] *= 2
mShape,mDag = BLOCKSHAPES.create_face_handle(self, None,'jaw',None,'center',
mHandleShape=mShape,
size = _muzzleSize,
nameDict=_d_name,
aimGroup=0,
**_d_kws)
md_jointHandles['jawLwr'] = mDag
mDag.p_position = DGETAVG([md_dHandles['jawRight'].p_position,
md_dHandles['jawLeft'].p_position])
ml_jointHandles.append(mDag)
ml_handles.append(mShape)
md_handles['jaw'] = mShape
md_handles['jawJoint'] = mDag
md_mirrorDat['center'].extend([mShape,mDag])
mDag.p_parent = mStateNull
#Tongue =========================================================================================
_tongueSetup = self.tongueSetup
if _tongueSetup:#============================================================
log.debug(cgmGEN.logString_sub(_str_func,'tongue'))
if _tongueSetup == 1:
p_base = DGETAVG([p_lwrLipBack,p_gumLwr])
f_distLipLwr = DIST.get_distance_between_points(p_gumLwr, p_lwrLipBack)
p_tongue = DIST.get_pos_by_vec_dist(p_base,vec_selfBack,f_distLipLwr)
#------------------------------------------------------------
_d_name = {'cgmName':'tongue',
'cgmType':'jointHelper'}
_d_kws = copy.copy(d_baseHandeKWS)
_d_kws['jointSize'] *= 2
mShape,mDag = BLOCKSHAPES.create_face_handle(self, p_tongue,'tongue',None,'center',
mainShape = 'semiSphere',
size = 1.0,
nameDict=_d_name,
aimGroup=0,
**_d_kws)
TRANS.scale_to_boundingBox(mShape.mNode,
[dist_mouthWidth,f_distLipLwr,f_distLipLwr])
mShape.p_parent = mStateNull
mShape.p_position = p_tongue
md_handles['tongue'] = mDag
md_handles['tongue'] = mDag
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['jawJoint'],
'tongue',mNoTransformNull)
#Teeth =========================================================================================
_teethUprSetup = self.teethUprSetup
_teethLwrSetup = self.teethLwrSetup
if _teethUprSetup:
log.debug(cgmGEN.logString_sub(_str_func,'teeth upr: {0}'.format(_teethUprSetup)))
if _teethUprSetup == 1:
f_distLip = DIST.get_distance_between_points(p_gumUpr, p_lwrLipBack)
p_shape = DIST.get_pos_by_vec_dist(DGETAVG([p_lwrLipBack,p_gumUpr]),
vec_self,
_offset)
_tag = 'teeth'+'Upr'
#------------------------------------------------------------
_d_name = {'cgmName':_tag,
'cgmType':'jointHelper'}
_d_kws = copy.copy(d_baseHandeKWS)
mShape,mDag = BLOCKSHAPES.create_face_handle(self, p_lwrLipBack,_tag,
None,'center',
mainShape = 'loftTriUp',
size = f_distLip * .75,
nameDict=_d_name,
aimGroup=0,
**_d_kws)
mShape.p_parent = mStateNull
mShape.p_position = p_shape
md_handles[_tag] = mDag
md_handles[_tag] = mDag
if _teethLwrSetup:
log.debug(cgmGEN.logString_sub(_str_func,'teeth lwr: {0}'.format(_teethUprSetup)))
if _teethLwrSetup == 1:
f_distLip = DIST.get_distance_between_points(p_gumLwr, p_lwrLipBack)
p_shape = DIST.get_pos_by_vec_dist(DGETAVG([p_lwrLipBack,p_gumLwr]),
vec_self,
_offset)
_tag = 'teeth'+'Lwr'
#------------------------------------------------------------
_d_name = {'cgmName':_tag,
'cgmType':'jointHelper'}
_d_kws = copy.copy(d_baseHandeKWS)
mShape,mDag = BLOCKSHAPES.create_face_handle(self, p_lwrLipBack,_tag,
None,'center',
mainShape = 'loftTriDown',
size = f_distLip * .75,
nameDict=_d_name,
aimGroup=0,
**_d_kws)
mShape.p_parent = mStateNull
mShape.p_position = p_shape
md_handles[_tag] = mDag
md_handles[_tag] = mDag
if self.chinSetup:# chin setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'chin setup'))
str_chinSetup = self.getEnumValueString('chinSetup')
if str_chinSetup == 'single':
log.debug(cgmGEN.logString_msg(_str_func, str_chinSetup))
mSurf = self.jawFormLoft
_tag = 'chin'
_dTmp = {'cgmName':_tag}
#_dTmp = copy.copy(_d_name)
#_dTmp['cgmName'] = _tag
p_chinBase = DGETAVG([d_basePosDat['chinLeft'],
d_basePosDat['chinRight']])
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToDriver':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
p_chinBase,
_tag,
None,
'center',
offsetAttr = 'conDirectOffset',
**d_handleKWS)
#ml_handles.extend([mAnchor,mShape,mDag])
#md_handles[_tag] = mShape
#md_handles[_tag+'Joint'] = mDag
#ml_jointHandles.append(mDag)
#md_mirrorDat['center'].extend([mAnchor,mShape,mDag])
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['jawJoint'],
_tag,mNoTransformNull)
"""
mShape,mDag = BLOCKSHAPES.create_face_handle(self,
p_chinBase,
_tag,
None,
'center',
mSurface=mSurf,
mainShape='semiSphere',
size = _size_sub,
nameDict=_dTmp,
**d_baseHandeKWS)
"""
else:
raise ValueError,"Invalid chinSetup: {0}".format(str_chinSetup)
if self.muzzleSetup:#Muzzle ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'muzzle'))
_d_name = {'cgmName':'muzzle',
'cgmType':'jointHelper'}
pMuzzleBase = md_dHandles['bridge'].p_position
pMuzzleBase = DIST.get_pos_by_vec_dist(pMuzzleBase,
vec_selfUp,
_offset*2)
p = DIST.get_pos_by_vec_dist(pMuzzleBase,
vec_self,
-_offset*4)
mShape = cgmMeta.asMeta(CURVES.create_fromName('pyramid',size = _muzzleSize, direction = 'z+'))
mShape,mDag = BLOCKSHAPES.create_face_handle(self, p,'muzzle',None,'center',
mHandleShape=mShape,
size = _muzzleSize,
nameDict=_d_name,
aimGroup=0,
**d_baseHandeKWS)
BLOCKSHAPES.color(self,mShape)
TRANS.scale_to_boundingBox(mShape.mNode, [_muzzleSize,_muzzleSize,_muzzleSize/1.5])
mShape.p_position = DIST.get_pos_by_vec_dist(pMuzzleBase,
vec_self,
_offset*2)
mShape.p_parent = mStateNull
mDag.p_parent = mStateNull
ml_handles.append(mShape)
md_handles['muzzle'] = mShape
md_handles['muzzleJoint'] = mDag
ml_jointHandles.append(mDag)
md_jointHandles['muzzle'] = mDag
md_mirrorDat['center'].extend([mShape,mDag])
if self.sneerSetup:# Nose setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'sneer setup'))
str_sneerSetup = self.getEnumValueString('sneerSetup')
_d_name = {'cgmName':'sneer',
'cgmType':'handleHelper'}
if str_sneerSetup == 'single':
mSurf = self.noseFormLoft
#d_pairs['nostrilLeft'] = 'nostrilRight'
#d_pairs['nostrilLeftJoint'] = 'nostrilRightJoint'
for side in ['left','right']:
#Get our position
_cap = side.capitalize()
_tag = 'sneer'+_cap
log.debug(cgmGEN.logString_msg(_str_func, 'sneer | {0}'.format(_tag)))
mSurf = self.getMessageAsMeta('uprJoin{0}FormLoft'.format(_cap))
_dTmp = {'cgmName':'sneer',
'cgmDirection':side}
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToDriver':True,
'orientToSurf':False,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat['sneer'+_cap],
_tag,
None,
side,
offsetAttr = 'conDirectOffset',
**d_handleKWS)
if self.noseSetup:# Nose setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'nose setup'))
str_noseSetup = self.getEnumValueString('noseSetup')
_d_name = {'cgmName':'nose',
'cgmType':'handleHelper'}
if str_noseSetup == 'simple':
log.debug(cgmGEN.logString_msg(_str_func, str_noseSetup))
mSurf = self.noseFormLoft
#NoseBase -------------------------------------------------------------------
_tag = 'noseBase'
_dTmp = copy.copy(_d_name)
_dTmp['cgmName'] = _tag
p_noseBase = DGETAVG([d_basePosDat['nostrilLeft'],
d_basePosDat['nostrilRight']])
mShape,mDag = BLOCKSHAPES.create_face_handle(self,
p_noseBase,
'noseBase',
None,
'center',
mainShape='loftWideDown',
size = _size_sub*2.0,
nameDict=_dTmp,
**d_baseHandeKWS)
mShape.p_position = DGETAVG([d_basePosDat['noseTipLeft'],
d_basePosDat['noseTipRight']])
ml_handles.append(mShape)
md_handles[_tag] = mShape
md_handles[_tag+'Joint'] = mDag
ml_jointHandles.append(mDag)
md_mirrorDat['center'].extend([mShape,mDag])
vec_nose = MATH.get_vector_of_two_points(p_noseBase, d_basePosDat['noseTip'])
if self.numJointsNoseTip:#NoseTip ----------------------------------------------------
log.debug(cgmGEN.logString_msg(_str_func, 'nosetip...'))
_tag = 'noseTip'
_dTmp = copy.copy(_d_name)
_dTmp['cgmName'] = 'noseTip'
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToDriver':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat['noseTip'],
'noseTip',
None,
'center',
**d_handleKWS)
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['noseBaseJoint'],_tag,mNoTransformNull)
if self.numJointsNostril:#Nostrils --------------------------------------
#d_pairs['nostrilLeft'] = 'nostrilRight'
#d_pairs['nostrilLeftJoint'] = 'nostrilRightJoint'
for side in ['left','right']:
#Get our position
_tag = 'nostril'+side.capitalize()
log.debug(cgmGEN.logString_msg(_str_func, 'nosetip | {0}'.format(_tag)))
_dTmp = {'cgmName':'nostril',
'cgmDirection':side}
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToDriver':True,
'orientToSurf':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat['nostril'+side.capitalize()],
_tag,
None,
side,
offsetAttr = 'conDirectOffset',
**d_handleKWS)
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['noseBaseJoint'],
_tag,mNoTransformNull)
if self.cheekSetup:# cheek setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'Cheek setup'))
str_cheekSetup = self.getEnumValueString('cheekSetup')
if str_cheekSetup == 'single':
log.debug(cgmGEN.logString_msg(_str_func, 'single'))
mSurf = self.jawFormLoft
_d_name = {'cgmName':'cheek',
'cgmType':'handleHelper'}
d_pairs['cheekLeft'] = 'cheekRight'
d_pairs['cheekLeftJoint'] = 'cheekRightJoint'
for side in ['left','right']:
#Get our position
_tag = 'cheek'+side.capitalize()
log.debug(cgmGEN.logString_msg(_str_func, 'cheek | {0}'.format(_tag)))
_dTmp = copy.copy(_d_name)
_dTmp['cgmDirection'] = side
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToSurf':True,
'orientToDriver':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat[_tag],
_tag,
None,
side,
offsetAttr = 'conDirectOffset',
**d_handleKWS)
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['jawJoint'],
_tag,mNoTransformNull)
else:
raise ValueError,"Invalid cheekSetup: {0}".format(str_cheekSetup)
if self.cheekUprSetup:# cheek setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'Cheek Upr setup'))
str_cheekUprSetup = self.getEnumValueString('cheekUprSetup')
if str_cheekUprSetup == 'single':
log.debug(cgmGEN.logString_msg(_str_func, 'single'))
mSurf = self.jawFormLoft
_d_name = {'cgmName':'cheekUpr',
'cgmType':'handleHelper'}
d_pairs['cheekUprLeft'] = 'cheekUprRight'
d_pairs['cheekUprLeftJoint'] = 'cheekUprRightJoint'
for side in ['left','right']:
#Get our position
_tag = 'cheekUpr'+side.capitalize()
_handleKey = 'cheek_0_1_'+side
log.debug(cgmGEN.logString_msg(_str_func, 'cheek | {0}'.format(_tag)))
_dTmp = copy.copy(_d_name)
_dTmp['cgmDirection'] = side
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToSurf':True,
'orientToDriver':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat[_handleKey],
_tag,
None,
side,
offsetAttr = 'conDirectOffset',
**d_handleKWS)
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['jawJoint'],
_tag,mNoTransformNull)
else:
raise ValueError,"Invalid cheekSetup: {0}".format(str_cheekUprSetup)
if self.smileSetup:# cheek setup ============================================================
log.debug(cgmGEN.logString_sub(_str_func,'Smile setup'))
str_smileSetup = self.getEnumValueString('smileSetup')
if str_smileSetup == 'single':
log.debug(cgmGEN.logString_msg(_str_func, 'single'))
#mSurf = self.jawFormLoft
_d_name = {'cgmName':'smile',
'cgmType':'handleHelper'}
d_pairs['smileLeft'] = 'smileRight'
d_pairs['smileLeftJoint'] = 'smileRightJoint'
for side in ['left','right']:
#Get our position
_tag = 'smile'+side.capitalize()
mSurf = self.getMessageAsMeta('uprJoin{0}FormLoft'.format(side.capitalize()))
_handleKey = _tag#'smile'+side
log.debug(cgmGEN.logString_msg(_str_func, 'smile | {0}'.format(_tag)))
_dTmp = copy.copy(_d_name)
_dTmp['cgmDirection'] = side
d_handleKWS = {
'mode' : 'handle',
'mSurface':mSurf,
'handleShape' :'semiSphere',
'handleSize' : _size_sub,
'anchorSize' : _size_anchor,
'orientToSurf':True,
'orientToDriver':True,
'attachToSurf':True,
'nameDict':_dTmp,
'md_mirrorDat':md_mirrorDat,
'ml_handles':ml_handles,
'md_handles':md_handles,
'ml_jointHandles':ml_jointHandles,
}
d_handleKWS.update(d_baseHandeKWS)
mAnchor,mShape,mDag = BLOCKSHAPES.create_face_anchorHandleCombo(self,
d_basePosDat[_handleKey],
_tag,
None,
side,
offsetAttr = 'conDirectOffset',
**d_handleKWS)
BLOCKSHAPES.create_visualTrack(self, mDag, md_handles['jawJoint'],
_tag,mNoTransformNull)
else:
raise ValueError,"Invalid cheekSetup: {0}".format(str_smileSetup)
if self.lipSetup:
log.debug(cgmGEN.logString_sub(_str_func, 'lipSetup'))
log.debug(cgmGEN.logString_msg(_str_func, 'mouthMove'))
#------------------------------------------------------------
_d = {'cgmName':'mouthMove',
'cgmType':'shapeHelper'}
dist_width = DIST.get_distance_between_points(md_dHandles['cornerFrontLeft'].p_position,
md_dHandles['cornerFrontRight'].p_position)
mShape = cgmMeta.validateObjArg(CURVES.create_fromName(name='dumbell',
size=3.0,
direction='z+'),'cgmObject',setClass=1)
#mHandleFactory.buildBaseShape('dumbell',baseSize = 3.0, shapeDirection = 'z+')
mShape.p_parent = mStateNull
mShape.p_position = DIST.get_pos_by_vec_dist(DIST.get_average_position([md_dHandles['uprPeak'].p_position,
md_dHandles['lwrPeak'].p_position]),
vec_self,
_offset)
mHandleFactory.color(mShape.mNode)
RIGGEN.store_and_name(mShape,_d)
_d['cgmType'] = 'handleHelper'
mDag = mHandleFactory.buildBaseShape('sphere',baseSize = dist_width, shapeDirection = 'z+')
#TRANS.scale_to_boundingBox(mDag.mNode, [_muzzleSize,_muzzleSize,_muzzleSize/2.0])
mDag.p_parent = mStateNull
mDag.p_position = DIST.get_pos_by_vec_dist(md_dHandles['uprFront'].p_position,
vec_self,
-dist_width/2.0)
mHandleFactory.color(mDag.mNode,controlType='sub')
RIGGEN.store_and_name(mDag,_d)
mDag.doStore('shapeHelper',mShape)
mShape.doStore('dagHelper',mDag)
mDag.p_parent = mStateNull
mStateNull.connectChildNode(mDag, 'mouthMove'+'Dag','block')
md_handles['mouthMove'] = mDag
md_handles['mouthMoveShape'] = mDag
# Lips -------------------------------------------------------------------
log.debug(cgmGEN.logString_msg(_str_func, 'lips'))
d_anchorDat = {}
md_anchors = {}
for tag in ['upr','lwr']:
mCrv = md_dCurves[tag+'_Peak']
#SURF.get_surfaceSplitCurves()
_l_split = CURVES.getUSplitList(mCrv.mNode,self.numConLips + 2,rebuild=1)
d_split = MATH.get_evenSplitDict(_l_split)
d_anchorDat[tag] = {}
for t,l in d_split.iteritems():
d_anchorDat[tag][t] = l
#for i,p in enumerate(l):
# LOC.create(position=p,name="{0}_{1}_{2}".format(tag,t,i))
#Lip Anchors....
_d = {'cgmName':'lip',
'cgmType':'preAnchor'}
mLipLoft = self.attachLipsFormLoft
for section,sectionDat in d_anchorDat.iteritems():
md_anchors[section] = {}
#_d['cgmPosition'] = section
_base = 0
if section == 'lwr':
_base = 1
l_tags = ["{0}Lip".format(section)]
for side,sideDat in sectionDat.iteritems():
if side == 'start':side='right'
elif side =='end':side = 'left'
md_anchors[section][side] = {}
md_anchors[section][side]['tags'] = []
md_anchors[section][side]['ml'] = []
d_tmp = md_anchors[section][side]
b_more = False
if len(sideDat) > 2:
b_more = True
if side == 'left':
sideDat.reverse()
if section == 'lwr' and len(sideDat)>1:
sideDat.pop(0)
for i,p in enumerate(sideDat):
if side == 'center':
tag = ''.join(l_tags)
else:
if not i and section == 'upr':
# l_use = copy.copy(l_tags)
#l_use.append('Corner')
#tag = ''.join(l_use)
tag = 'lipCorner'
else:
l_use = copy.copy(l_tags)
if b_more:l_use.append("_{0}".format(i+_base))
tag = ''.join(l_use)
#LOC.create(position=p,name=tag)
_dUse = copy.copy(_d)
_dUse['cgmName'] = tag
_dUse['cgmDirection'] = side
mAnchor = BLOCKSHAPES.create_face_anchor(self,p,
mLipLoft,
tag,
None,
side,
nameDict=_dUse,
mStateNull=mStateNull,
size= _size_sub/4)
#mAnchor.rotate = 0,0,0
d_tmp['tags'].append(tag)
d_tmp['ml'].append(mAnchor)
ml_handles.append(mAnchor)
md_mirrorDat[side].append(mAnchor)
#...get my anchors in lists...-----------------------------------------------------------------
ml_uprLeft = copy.copy(md_anchors['upr']['left']['ml'])
ml_uprLeft.reverse()
ml_uprRight = md_anchors['upr']['right']['ml']
ml_lwrLeft = copy.copy(md_anchors['lwr']['left']['ml'])
ml_lwrLeft.reverse()
ml_lwrRight = md_anchors['lwr']['right']['ml']
md_anchorsLists = {}
if md_anchors['upr'].get('center'):
ml_uprCenter = md_anchors['upr']['center']['ml']
ml_lwrCenter = md_anchors['lwr']['center']['ml']
md_anchorsLists['upr'] = ml_uprRight + ml_uprCenter + ml_uprLeft
md_anchorsLists['lwr'] = ml_uprRight[:1] + ml_lwrRight + ml_lwrCenter + ml_lwrLeft + ml_uprLeft[-1:]
else:
md_anchorsLists['upr'] = ml_uprRight + ml_uprLeft
md_anchorsLists['lwr'] = ml_uprRight[:1] + ml_lwrRight + ml_lwrLeft + ml_uprLeft[-1:]
#pprint.pprint(md_anchors)
#pprint.pprint(d_anchorDat)
#pprint.pprint(md_anchorsLists)
#...make our driver curves...---------------------------------------------------------------
log.debug(cgmGEN.logString_msg('driver curves'))
d_curveCreation = {}
for section,sectionDat in md_anchorsLists.iteritems():
#for side,dat in sectionDat.iteritems():
d_curveCreation[section+'Driver'] = {'ml_handles': sectionDat,
'rebuild':1}
#...anchor | aim ----------------------------------------------------------------------------
log.debug(cgmGEN.logString_msg('anchor | aim'))
for tag,sectionDat in md_anchors.iteritems():
for side,sideDat in sectionDat.iteritems():
if side == 'center':
continue
if side == 'left':
_aim = [-1,0,0]
else:
_aim = [1,0,0]
for i,mDriver in enumerate(sideDat['ml']):
_mode = None
if tag == 'upr' and not i:
_mode = 'simple'
if _mode == 'simple':
loc = LOC.create(position = DGETAVG([md_anchors['upr'][side]['ml'][1].p_position,
md_anchors['lwr'][side]['ml'][0].p_position]))
mc.delete(mc.aimConstraint(loc,
mDriver.mNode,
maintainOffset = False, weight = 1,
aimVector = _aim,
upVector = [0,1,0],
worldUpVector = [0,1,0],
worldUpObject = self.mNode,
worldUpType = 'objectRotation'))
mc.delete(loc)
else:
try:_tar=sideDat[i+1].mNode
except:_tar=md_anchors[tag]['center']['ml'][0].mNode
mc.delete(mc.aimConstraint(_tar,
mDriver.mNode,
maintainOffset = False, weight = 1,
aimVector = _aim,
upVector = [0,1,0],
worldUpVector = [0,1,0],
worldUpObject = self.mNode,
worldUpType = 'objectRotation' ))
#Make our Lip handles...-------------------------------------------------------------------------
log.debug(cgmGEN.logString_sub('Handles'))
md_prerigDags = {}
md_jointHelpers = {}
_d = {'cgmName':''}
#...get our driverSetup
for section,sectionDat in md_anchors.iteritems():
log.debug(cgmGEN.logString_msg(section))
#md_handles[section] = {}
md_prerigDags[section] = {}
md_jointHelpers[section] = {}
if section == 'upr':
_mainShape = 'loftCircleHalfUp'
else:
_mainShape = 'loftCircleHalfDown'
for side,dat in sectionDat.iteritems():
log.debug(cgmGEN.logString_msg(side))
#md_handles[section][side] = []
md_prerigDags[section][side] = []
md_jointHelpers[side] = []
_ml_shapes = []
_ml_prerigDags = []
_ml_jointShapes = []
_ml_jointHelpers = []
tag = section+'Lip'+STR.capFirst(side)
_ml_anchors = dat['ml']
if side == 'center':
mAnchor = _ml_anchors[0]
p = mAnchor.p_position
d_use = mAnchor.getNameDict(ignore=['cgmType'])
mShape, mDag = BLOCKSHAPES.create_face_handle(self,p,
tag,
None,
side,
mDriver=mAnchor,
mSurface=mLipLoft,
mainShape=_mainShape,
jointShape='locatorForm',
controlType='main',#_controlType,
mode='handle',
depthAttr = 'jointDepthLip',
plugDag= 'preDag',
plugShape= 'preShape',
attachToSurf=True,
orientToDriver = True,
nameDict= d_use,**d_baseHandeKWS)
_ml_shapes.append(mShape)
_ml_prerigDags.append(mDag)
else:
#mCrv = md_resCurves.get(section+'Driver')
#if mCrv:
for i,mAnchor in enumerate(_ml_anchors):
_shapeUse = _mainShape
if section == 'upr' and not i:
if side == 'left':
_shapeUse = 'widePos'
else:
_shapeUse = 'wideNeg'
p = mAnchor.p_position
d_use = mAnchor.getNameDict(ignore=['cgmType'])
mShape, mDag = BLOCKSHAPES.create_face_handle(self,p,
tag,
None,
side,
mDriver=mAnchor,
mSurface=mLipLoft,
mainShape=_shapeUse,
jointShape='locatorForm',
depthAttr = 'jointDepthLip',
controlType='main',#_controlType,
mode='handle',
plugDag= 'preDag',
plugShape= 'preShape',
attachToSurf=True,
orientToDriver = True,
nameDict= d_use,**d_baseHandeKWS)
_ml_shapes.append(mShape)
_ml_prerigDags.append(mDag)
mStateNull.msgList_connect('{0}PrerigShapes'.format(tag),_ml_shapes)
mStateNull.msgList_connect('{0}PrerigHandles'.format(tag),_ml_prerigDags)
md_mirrorDat[side].extend(_ml_shapes + _ml_prerigDags)
md_prerigDags[section][side] = _ml_prerigDags
ml_handles.extend(_ml_shapes + _ml_prerigDags)
#...get control joint handles...-----------------------------------------------------------------
ml_uprCenter = md_prerigDags['upr']['center']
ml_uprLeft = copy.copy(md_prerigDags['upr']['left'])
ml_uprLeft.reverse()
ml_uprRight = md_prerigDags['upr']['right']
ml_lwrCenter = md_prerigDags['lwr']['center']
ml_lwrLeft = copy.copy(md_prerigDags['lwr']['left'])
ml_lwrLeft.reverse()
ml_lwrRight = md_prerigDags['lwr']['right']
md_handleCrvDrivers = {}
md_handleCrvDrivers['upr'] = ml_uprRight + ml_uprCenter + ml_uprLeft
md_handleCrvDrivers['lwr'] = ml_uprRight[:1] + ml_lwrRight + ml_lwrCenter + ml_lwrLeft + ml_uprLeft[-1:]
#pprint.pprint(md_anchors)
#pprint.pprint(d_anchorDat)
#pprint.pprint(md_crvDrivers)
#...make our driver curves...---------------------------------------------------------------
log.debug(cgmGEN.logString_msg('driven curves'))
for section,sectionDat in md_handleCrvDrivers.iteritems():
d_curveCreation[section+'Driven'] = {'ml_handles': sectionDat,
'rebuild':1}
md_res = self.UTILS.create_defineCurve(self, d_curveCreation, {}, mNoTransformNull,'preCurve')
md_resCurves = md_res['md_curves']
ml_resCurves = md_res['ml_curves']
#Joint handles =============================================================================
log.debug(cgmGEN.logString_sub('joints'))
d_lipDrivenDat = {}
d_lipDriverDat = {}
md_lipDrivers = {}
#...get our spilt data ---------------------------------------------------------------------
log.debug(cgmGEN.logString_msg('joints | split data'))
for tag in 'upr','lwr':
mDriverCrv = md_resCurves[tag+'Driver']
mDrivenCrv = md_resCurves[tag+'Driven']
#_crv = CORERIG.create_at(create='curveLinear',
# l_pos=[mObj.p_position for mObj in md_handleCrvDrivers[tag]])
_count = self.getMayaAttr('numJointsLip'+tag.capitalize())
l_driverPos = CURVES.getUSplitList(mDriverCrv.mNode,_count + 2,rebuild=0)
l_drivenPos = CURVES.getUSplitList(mDrivenCrv.mNode,_count + 2,rebuild=0)
d_split_driven = MATH.get_evenSplitDict(l_drivenPos)
d_split_driver = MATH.get_evenSplitDict(l_driverPos)
d_lipDrivenDat[tag] = {}
d_lipDriverDat[tag] = {}
for t,l in d_split_driven.iteritems():
d_lipDrivenDat[tag][t] = l
#for i,p in enumerate(l):
#LOC.create(position=p,name="{0}_{1}_{2}".format(tag,t,i))
for t,l in d_split_driver.iteritems():
d_lipDriverDat[tag][t] = l
_d = {'cgmName':'lip',
'cgmType':'preAnchor'}
_sizeDirect = _size_sub * .4
md_lipJoints = {}
for section,sectionDat in d_lipDrivenDat.iteritems():
mDriverCrv = md_resCurves[section+'Driver']
mDriverCrv.v = 0
mDrivenCrv = md_resCurves[section+'Driven']
mDrivenCrv.v = 0
md_lipJoints[section] = {}
md_lipDrivers[section] = {}
#_d['cgmPosition'] = section
_base = 0
if section == 'lwr':
_base = 1
for side,sideDat in sectionDat.iteritems():
driverDat = d_lipDriverDat[section][side]
if side == 'start':side='right'
elif side =='end':side = 'left'
_ml_jointShapes = []
_ml_jointHelpers = []
_ml_lipDrivers = []
md_lipJoints[section][side] = []
md_lipDrivers[section][side] = []
l_bridge = md_lipJoints[section][side]
l_tags = ['{0}Lip'.format(section)]
b_more = False
if len(sideDat) > 2:
b_more = True
if side == 'left':
sideDat.reverse()
driverDat.reverse()
if section == 'lwr' and len(sideDat)>1:
sideDat.pop(0)
driverDat.pop(0)
for i,p_driven in enumerate(sideDat):
p_driver = driverDat[i]
_dUse = copy.copy(_d)
if side == 'center':
tag = ''.join(l_tags)
else:
if not i and section == 'upr':
#l_use = copy.copy(l_tags)
#l_use.append('Corner')
#tag = ''.join(l_use)
tag = 'lipCorner'
#_dUse['cgmDirectionModifier'] = 'corner'
else:
l_use = copy.copy(l_tags)
if b_more:l_use.append("_{0}".format(i+_base))
tag = ''.join(l_use)
#_dUse['cgmIterator'] = i+_base
_dUse['cgmName'] = tag#'lip' #+ STR.capFirst(tag)
_dUse['cgmDirection'] = side
#Driver ...
mDriver = self.doCreateAt(setClass=1)#self.doLoc()#
mDriver.rename("{0}_{1}_{2}_{3}_driver".format(section, side,_dUse['cgmName'],i))
mDriver.p_position = p_driver
mDriver.p_parent = mNoTransformNull#mStateNull
_res = RIGCONSTRAINT.attach_toShape(mDriver.mNode,mDriverCrv.mNode,'conPoint')
TRANS.parent_set(_res[0], mNoTransformNull.mNode)
mShape, mDag = BLOCKSHAPES.create_face_handle(self,
p_driven,tag,None,side,
mDriver=mDriver,
mSurface = mLipLoft,
mAttachCrv = mDrivenCrv,
mainShape='semiSphere',
#jointShape='sphere',
size= _sizeDirect,
mode='joint',
controlType='sub',
plugDag= 'jointHelper',
plugShape= 'directShape',
offsetAttr = 'conDirectOffset',
attachToSurf=True,
orientToDriver=True,
nameDict= _dUse,**d_baseHandeKWS)
md_mirrorDat[side].append(mShape)
md_mirrorDat[side].append(mDag)
_ml_jointShapes.append(mShape)
_ml_jointHelpers.append(mDag)
_ml_lipDrivers.append(mDriver)
tag = section+'Lip'+STR.capFirst(side)
mStateNull.msgList_connect('{0}JointHelpers'.format(tag),_ml_jointHelpers)
mStateNull.msgList_connect('{0}JointShapes'.format(tag),_ml_jointShapes)
md_jointHelpers[section][side] = _ml_jointHelpers
ml_handles.extend(_ml_jointShapes)
ml_handles.extend(_ml_jointHelpers)
md_mirrorDat[side].extend(_ml_jointShapes + _ml_jointHelpers)
md_lipDrivers[section][side] = _ml_lipDrivers
#Aim our lip drivers...------------------------------------------------------------------
log.debug(cgmGEN.logString_msg('aim lip drivers'))
for tag,sectionDat in md_lipDrivers.iteritems():
for side,sideDat in sectionDat.iteritems():
ml_check = md_anchorsLists[tag]
l_check = [mObj.mNode for mObj in ml_check]
if side == 'left':
_aim = [-1,0,0]
else:
_aim = [1,0,0]
for i,mDriver in enumerate(sideDat):
_mode = None
if tag == 'upr' and not i:
_mode = 'simple'
if side == 'center':
_mode = 'simple'
_closest = DIST.get_closestTarget(mDriver.mNode,l_check)
if _mode == 'simple':
mc.orientConstraint(_closest, mDriver.mNode, maintainOffset = False)
else:
if mDriver == sideDat[-1]:
_tar = md_lipDrivers[tag]['center'][0].mNode
else:
_tar = sideDat[i+1].mNode
mc.aimConstraint(_tar,
mDriver.mNode,
maintainOffset = False, weight = 1,
aimVector = _aim,
upVector = [0,0,1],
worldUpVector = [0,0,1],
worldUpObject = _closest,
worldUpType = 'objectRotation' )
#Driven Curve
ml_uprCenter = md_jointHelpers['upr']['center']
ml_uprLeft = copy.copy(md_jointHelpers['upr']['left'])
ml_uprLeft.reverse()
ml_uprRight = md_jointHelpers['upr']['right']
ml_lwrCenter = md_jointHelpers['lwr']['center']
ml_lwrLeft = copy.copy(md_jointHelpers['lwr']['left'])
ml_lwrLeft.reverse()
ml_lwrRight = md_jointHelpers['lwr']['right']
md_crvDrivers = {}
md_crvDrivers['upr'] = ml_uprRight + ml_uprCenter + ml_uprLeft
md_crvDrivers['lwr'] = ml_uprRight[:1] + ml_lwrRight + ml_lwrCenter + ml_lwrLeft + ml_uprLeft[-1:]
#pprint.pprint(md_anchors)
#pprint.pprint(d_anchorDat)
#pprint.pprint(md_crvDrivers)
d_driven = {}
#...make our driver curves...---------------------------------------------------------------
log.debug(cgmGEN.logString_msg('driven curves'))
for section,sectionDat in md_crvDrivers.iteritems():
#for side,dat in sectionDat.iteritems():
d_driven[section+'Result'] = {'ml_handles': sectionDat,
'rebuild':1}
md_res = self.UTILS.create_defineCurve(self, d_driven, {}, mNoTransformNull,'preCurve')
md_resCurves.update(md_res['md_curves'])
ml_resCurves.extend(md_res['ml_curves'])
#Mirror setup --------------------------------
log.debug(cgmGEN.logString_sub('mirror'))
idx_ctr = 0
idx_side = 0
log.debug(cgmGEN.logString_msg('mirror | center'))
for mHandle in md_mirrorDat['center']:
mHandle = cgmMeta.validateObjArg(mHandle,'cgmControl')
mHandle._verifyMirrorable()
mHandle.mirrorSide = 0
mHandle.mirrorIndex = idx_ctr
idx_ctr +=1
mHandle.mirrorAxis = "translateX,rotateY,rotateZ"
log.debug(cgmGEN.logString_msg('mirror | sides'))
for i,mHandle in enumerate(md_mirrorDat['left']):
mLeft = cgmMeta.validateObjArg(mHandle,'cgmControl')
mRight = cgmMeta.validateObjArg(md_mirrorDat['right'][i],'cgmControl')
for mObj in mLeft,mRight:
mObj._verifyMirrorable()
mObj.mirrorAxis = "translateX,rotateY,rotateZ"
mObj.mirrorIndex = idx_side
mLeft.mirrorSide = 1
mRight.mirrorSide = 2
mLeft.doStore('mirrorHandle',mRight)
mRight.doStore('mirrorHandle',mLeft)
idx_side +=1
# Connect -------------------------------------------------
self.msgList_connect('prerigHandles', ml_handles)
self.msgList_connect('jointHandles', ml_jointHandles)
self.blockState = 'prerig'
return
except Exception,err:
cgmGEN.cgmExceptCB(Exception,err)
#=============================================================================================================
#>> Skeleton
#=============================================================================================================
def create_jointFromHandle(mHandle=None,mParent = False,cgmType='skinJoint'):
mJnt = mHandle.doCreateAt('joint')
mJnt.doCopyNameTagsFromObject(mHandle.mNode,ignore = ['cgmType'])
mJnt.doStore('cgmType',cgmType)
mJnt.doName()
JOINT.freezeOrientation(mJnt.mNode)
mJnt.p_parent = mParent
try:ml_joints.append(mJnt)
except:pass
return mJnt
def skeleton_check(self):
return True
def skeleton_build(self, forceNew = True):
_short = self.mNode
_str_func = '[{0}] > skeleton_build'.format(_short)
log.debug("|{0}| >> ...".format(_str_func))
_radius = self.atUtils('get_shapeOffset') * .25# or 1
ml_joints = []
mModule = self.atUtils('module_verify')
mRigNull = mModule.rigNull
if not mRigNull:
raise ValueError,"No rigNull connected"
mPrerigNull = self.prerigNull
if not mPrerigNull:
raise ValueError,"No prerig null"
mRoot = self.UTILS.skeleton_getAttachJoint(self)
mLipRoot = mRoot
#>> If skeletons there, delete --------------------------------------------------------------------------
_bfr = mRigNull.msgList_get('moduleJoints',asMeta=True)
if _bfr:
log.debug("|{0}| >> Joints detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
_baseNameAttrs = ATTR.datList_getAttrs(self.mNode,'nameList')
_l_baseNames = ATTR.datList_get(self.mNode, 'nameList')
if self.muzzleSetup == 2:
log.debug("|{0}| >> muzzle joint...".format(_str_func)+ '-'*40)
mObj = mPrerigNull.getMessageAsMeta('muzzle'+'DagHelper')
mJnt = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('muzzleJoint',mJnt)
mJnt.p_parent = mRoot
ml_joints.append(mJnt)
mLipRoot = mJnt
if self.jawSetup:
mObj = mPrerigNull.getMessageAsMeta('jaw'+'DagHelper')
mJaw = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('jawJoint',mJaw)
ml_joints.append(mJaw)
else:
mJaw = mRoot
if self.lipSetup:
str_lipSetup = self.getEnumValueString('lipSetup')
log.debug("|{0}| >> lipSetup...".format(_str_func)+ '-'*40)
_d_lip = {'cgmName':'lip'}
for d in 'upr','lwr':
log.debug("|{0}| >> lip {1}...".format(_str_func,d)+ '-'*20)
d_dir = copy.copy(_d_lip)
d_dir['cgmPosition'] = d
for side in ['right','center','left']:
d_dir['cgmDirection'] = side
key = d+'Lip'+side.capitalize()
mHandles = mPrerigNull.msgList_get('{0}JointHelpers'.format(key))
ml = []
for mHandle in mHandles:
mJnt = create_jointFromHandle(mHandle,mLipRoot)
ml.append(mJnt)
mShape = mHandle.shapeHelper
mShape.connectChildNode(mJnt,'targetJoint')
mPrerigNull.msgList_connect('{0}Joints'.format(key),ml)
ml_joints.extend(ml)
_tongueSetup = self.tongueSetup
if _tongueSetup:#============================================================
log.debug(cgmGEN.logString_sub(_str_func,'tongue'))
if _tongueSetup == 1:
mObj = mPrerigNull.getMessageAsMeta('tongue'+'DagHelper')
mJnt = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('tongueJoint',mJnt)
mJnt.p_parent = mJaw
ml_joints.append(mJnt)
if self.teethUprSetup:
log.debug("|{0}| >> teethUpr...".format(_str_func)+ '-'*40)
mObj = mPrerigNull.getMessageAsMeta('teethUpr'+'DagHelper')
mJnt = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('teethUprJoint',mJnt)
mJnt.p_parent = mRoot
ml_joints.append(mJnt)
if self.teethLwrSetup:
log.debug("|{0}| >> teethLwr...".format(_str_func)+ '-'*40)
mObj = mPrerigNull.getMessageAsMeta('teethLwr'+'DagHelper')
mJnt = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('teethLwrJoint',mJnt)
mJnt.p_parent = mJaw
ml_joints.append(mJnt)
if self.chinSetup:
log.debug("|{0}| >> chinSetup...".format(_str_func)+ '-'*40)
mObj = mPrerigNull.getMessageAsMeta('chin'+'DagHelper')
mJnt = create_jointFromHandle(mObj,mRoot)
mPrerigNull.doStore('chinJoint',mJnt)
mJnt.p_parent = mJaw
ml_joints.append(mJnt)
if self.noseSetup:
log.debug("|{0}| >> noseSetup".format(_str_func)+ '-'*40)
str_noseSetup = self.getEnumValueString('noseSetup')
if str_noseSetup == 'simple':
log.debug("|{0}| >> noseSetup: {1}".format(_str_func,str_noseSetup))
_tag = 'noseBase'
mNoseBase = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mRoot)
mPrerigNull.doStore('{0}Joint'.format(_tag),mNoseBase)
ml_joints.append(mNoseBase)
#NoseTip ----------------------------------------------------------------------
if self.numJointsNoseTip:
log.debug("|{0}| >> {1}...".format(_str_func,'noseTip'))
_tag = 'noseTip'
mNoseTip = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mNoseBase)
mPrerigNull.doStore('{0}Joint'.format(_tag),mNoseTip)
ml_joints.append(mNoseTip)
#Nostrils -------------------------------------------------------------------
if self.numJointsNostril:
for side in ['left','right']:
_tag = 'nostril'+side.capitalize()
log.debug("|{0}| >> {1}...".format(_str_func,_tag))
mJnt = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mNoseBase)
mPrerigNull.doStore('{0}Joint'.format(_tag),mJnt)
ml_joints.append(mJnt)
else:
raise ValueError,"Invalid noseSetup: {0}".format(str_noseSetup)
if self.cheekSetup:
log.debug("|{0}| >> Cheeksetup".format(_str_func)+ '-'*40)
str_cheekSetup = self.getEnumValueString('cheekSetup')
if str_cheekSetup == 'single':
log.debug("|{0}| >> cheekSetup: {1}".format(_str_func,str_cheekSetup))
for side in ['left','right']:
_tag = 'cheek'+side.capitalize()
log.debug("|{0}| >> {1}...".format(_str_func,_tag))
mJnt = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mJaw)
mPrerigNull.doStore('{0}Joint'.format(_tag),mJnt)
ml_joints.append(mJnt)
else:
raise ValueError,"Invalid cheekSetup: {0}".format(str_cheekSetup)
if self.cheekUprSetup:
log.debug("|{0}| >> CheekUpr Setup".format(_str_func)+ '-'*40)
str_cheekUprSetup = self.getEnumValueString('cheekUprSetup')
if str_cheekUprSetup == 'single':
log.debug("|{0}| >> cheekUprSetup: {1}".format(_str_func,str_cheekUprSetup))
for side in ['left','right']:
_tag = 'cheekUpr'+side.capitalize()
log.debug("|{0}| >> {1}...".format(_str_func,_tag))
mJnt = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mJaw)
mPrerigNull.doStore('{0}Joint'.format(_tag),mJnt)
ml_joints.append(mJnt)
else:
raise ValueError,"Invalid cheekUprSetup: {0}".format(str_cheekUprSetup)
if self.smileSetup:
log.debug("|{0}| >> Smile Setup".format(_str_func)+ '-'*40)
str_smileSetup = self.getEnumValueString('smileSetup')
if str_smileSetup == 'single':
log.debug("|{0}| >> smileSetup: {1}".format(_str_func,str_smileSetup))
for side in ['left','right']:
_tag = 'smile'+side.capitalize()
log.debug("|{0}| >> {1}...".format(_str_func,_tag))
mJnt = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mJaw)
mPrerigNull.doStore('{0}Joint'.format(_tag),mJnt)
ml_joints.append(mJnt)
else:
raise ValueError,"Invalid smileSetup: {0}".format(str_smileSetup)
if self.sneerSetup:
log.debug("|{0}| >> sneerSetup".format(_str_func)+ '-'*40)
str_sneerSetup = self.getEnumValueString('sneerSetup')
if str_sneerSetup == 'single':
log.debug("|{0}| >> sneerSetup: {1}".format(_str_func,str_sneerSetup))
for side in ['left','right']:
_tag = 'sneer'+side.capitalize()
log.debug("|{0}| >> {1}...".format(_str_func,_tag))
mJnt = create_jointFromHandle(mPrerigNull.getMessageAsMeta('{0}DagHelper'.format(_tag)),
mJaw)
mPrerigNull.doStore('{0}Joint'.format(_tag),mJnt)
ml_joints.append(mJnt)
else:
raise ValueError,"Invalid cheekSetup: {0}".format(str_sneerSetup)
#>> ===========================================================================
mRigNull.msgList_connect('moduleJoints', ml_joints)
self.msgList_connect('moduleJoints', ml_joints)
#pprint.pprint(ml_joints)
for mJnt in ml_joints:
mJnt.displayLocalAxis = 1
mJnt.radius = _radius
for mJnt in ml_joints:mJnt.rotateOrder = 5
return ml_joints
#>> Head ===================================================================================
log.debug("|{0}| >> Head...".format(_str_func))
p = POS.get( ml_prerigHandles[-1].jointHelper.mNode )
mHeadHelper = ml_formHandles[0].orientHelper
#...create ---------------------------------------------------------------------------
mHead_jnt = cgmMeta.cgmObject(mc.joint (p=(p[0],p[1],p[2])))
mHead_jnt.parent = False
#self.copyAttrTo(_baseNameAttrs[-1],mHead_jnt.mNode,'cgmName',driven='target')
#...orient ----------------------------------------------------------------------------
#cgmMeta.cgmObject().getAxisVector
CORERIG.match_orientation(mHead_jnt.mNode, mHeadHelper.mNode)
JOINT.freezeOrientation(mHead_jnt.mNode)
#...name ----------------------------------------------------------------------------
#mHead_jnt.doName()
#mHead_jnt.rename(_l_namesToUse[-1])
for k,v in _l_namesToUse[-1].iteritems():
mHead_jnt.doStore(k,v)
mHead_jnt.doName()
if self.neckBuild:#...Neck =====================================================================
log.debug("|{0}| >> neckBuild...".format(_str_func))
if len(ml_prerigHandles) == 2 and self.neckJoints == 1:
log.debug("|{0}| >> Single neck joint...".format(_str_func))
p = POS.get( ml_prerigHandles[0].jointHelper.mNode )
mBaseHelper = ml_prerigHandles[0].orientHelper
#...create ---------------------------------------------------------------------------
mNeck_jnt = cgmMeta.cgmObject(mc.joint (p=(p[0],p[1],p[2])))
#self.copyAttrTo(_baseNameAttrs[0],mNeck_jnt.mNode,'cgmName',driven='target')
#...orient ----------------------------------------------------------------------------
#cgmMeta.cgmObject().getAxisVector
TRANS.aim_atPoint(mNeck_jnt.mNode,
mHead_jnt.p_position,
'z+', 'y+', 'vector',
vectorUp=mHeadHelper.getAxisVector('z-'))
JOINT.freezeOrientation(mNeck_jnt.mNode)
#mNeck_jnt.doName()
mHead_jnt.p_parent = mNeck_jnt
ml_joints.append(mNeck_jnt)
#mNeck_jnt.rename(_l_namesToUse[0])
for k,v in _l_namesToUse[0].iteritems():
mNeck_jnt.doStore(k,v)
mNeck_jnt.doName()
else:
log.debug("|{0}| >> Multiple neck joint...".format(_str_func))
_d = self.atBlockUtils('skeleton_getCreateDict', self.neckJoints +1)
mOrientHelper = ml_prerigHandles[0].orientHelper
ml_joints = JOINT.build_chain(_d['positions'][:-1], parent=True, worldUpAxis= mOrientHelper.getAxisVector('z-'))
for i,mJnt in enumerate(ml_joints):
#mJnt.rename(_l_namesToUse[i])
for k,v in _l_namesToUse[i].iteritems():
mJnt.doStore(k,v)
mJnt.doName()
#self.copyAttrTo(_baseNameAttrs[0],ml_joints[0].mNode,'cgmName',driven='target')
mHead_jnt.p_parent = ml_joints[-1]
ml_joints[0].parent = False
else:
mHead_jnt.parent = False
#mHead_jnt.rename(_l_namesToUse[-1])
ml_joints.append(mHead_jnt)
for mJnt in ml_joints:
mJnt.displayLocalAxis = 1
mJnt.radius = _radius
if len(ml_joints) > 1:
mHead_jnt.radius = ml_joints[-1].radius * 5
mRigNull.msgList_connect('moduleJoints', ml_joints)
self.msgList_connect('moduleJoints', ml_joints)
self.atBlockUtils('skeleton_connectToParent')
return ml_joints
#=============================================================================================================
#>> rig
#=============================================================================================================
#NOTE - self here is a rig Factory....
d_preferredAngles = {}#In terms of aim up out for orientation relative values, stored left, if right, it will invert
d_rotateOrders = {}
#Rig build stuff goes through the rig build factory ------------------------------------------------------
@cgmGEN.Timer
def rig_prechecks(self):
_str_func = 'rig_prechecks'
log.debug(cgmGEN.logString_start(_str_func))
mBlock = self.mBlock
str_faceType = mBlock.getEnumValueString('faceType')
if str_faceType not in ['default']:
self.l_precheckErrors.append("faceType setup not completed: {0}".format(str_faceType))
str_jawSetup = mBlock.getEnumValueString('jawSetup')
if str_jawSetup not in ['none','simple']:
self.l_precheckErrors.append("Jaw setup not completed: {0}".format(str_jawSetup))
str_muzzleSetup = mBlock.getEnumValueString('muzzleSetup')
if str_muzzleSetup not in ['none','simple','joint','dag']:
self.l_precheckErrors.append("Muzzle setup not completed: {0}".format(str_muzzleSetup))
str_noseSetup = mBlock.getEnumValueString('noseSetup')
if str_noseSetup not in ['none','simple']:
self.l_precheckErrors.append("Nose setup not completed: {0}".format(str_noseSetup))
str_nostrilSetup = mBlock.getEnumValueString('nostrilSetup')
if str_nostrilSetup not in ['none','default']:
self.l_precheckErrors.append("Nostril setup not completed: {0}".format(str_nostrilSetup))
str_cheekSetup = mBlock.getEnumValueString('cheekSetup')
if str_cheekSetup not in ['none','single']:
self.l_precheckErrors.append("Cheek setup not completed: {0}".format(str_cheekSetup))
str_cheekUprSetup = mBlock.getEnumValueString('cheekUprSetup')
if str_cheekUprSetup not in ['none','single']:
self.l_precheckErrors.append("Cheek upr setup not completed: {0}".format(str_cheekUprSetup))
str_lipSetup = mBlock.getEnumValueString('lipSetup')
if str_lipSetup not in ['none','default']:
self.l_precheckErrors.append("Lip setup not completed: {0}".format(str_lipSetup))
str_chinSetup = mBlock.getEnumValueString('chinSetup')
if str_chinSetup not in ['none','single']:
self.l_precheckErrors.append("Chin setup not completed: {0}".format(str_chinSetup))
@cgmGEN.Timer
def rig_dataBuffer(self):
_short = self.d_block['shortName']
_str_func = 'rig_dataBuffer'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mModule = self.mModule
mRigNull = self.mRigNull
mPrerigNull = mBlock.prerigNull
self.mPrerigNull = mPrerigNull
ml_handleJoints = mPrerigNull.msgList_get('handleJoints')
mMasterNull = self.d_module['mMasterNull']
self.b_scaleSetup = mBlock.scaleSetup
for k in ['jaw','muzzle','nose','nostril','cheek','bridge',
'teethUpr','teethLwr',
'chin','sneer','cheekUpr',
'lip','lipSeal','teeth','tongue','uprJaw','smile']:
_tag = "{0}Setup".format(k)
self.__dict__['str_{0}'.format(_tag)] = False
_v = mBlock.getEnumValueString(_tag)
if _v != 'none':
self.__dict__['str_{0}'.format(_tag)] = _v
log.debug("|{0}| >> self.str_{1} = {2}".format(_str_func,_tag,self.__dict__['str_{0}'.format(_tag)]))
for k in ['buildSDK']:
self.__dict__['str_{0}'.format(k)] = ATTR.get_enumValueString(mBlock.mNode,k)
self.__dict__['v_{0}'.format(k)] = mBlock.getMayaAttr(k)
#DynParents =============================================================================
self.UTILS.get_dynParentTargetsDat(self)
log.debug(cgmGEN._str_subLine)
#Offset ============================================================================
self.v_offset = self.mPuppet.atUtils('get_shapeOffset')
log.debug("|{0}| >> self.v_offset: {1}".format(_str_func,self.v_offset))
log.debug(cgmGEN._str_subLine)
#Size =======================================================================================
self.v_baseSize = [mBlock.blockScale * v for v in mBlock.baseSize]
self.f_sizeAvg = MATH.average(self.v_baseSize)
log.debug("|{0}| >> size | self.v_baseSize: {1} | self.f_sizeAvg: {2}".format(_str_func,
self.v_baseSize,
self.f_sizeAvg ))
#Settings =============================================================================
mModuleParent = self.d_module['mModuleParent']
if mModuleParent:
mSettings = mModuleParent.rigNull.settings
else:
log.debug("|{0}| >> using puppet...".format(_str_func))
mSettings = self.d_module['mMasterControl'].controlVis
log.debug("|{0}| >> mSettings | self.mSettings: {1}".format(_str_func,mSettings))
self.mSettings = mSettings
log.debug("|{0}| >> self.mPlug_visSub_moduleParent: {1}".format(_str_func,
self.mPlug_visSub_moduleParent))
log.debug("|{0}| >> self.mPlug_visDirect_moduleParent: {1}".format(_str_func,
self.mPlug_visDirect_moduleParent))
#rotateOrder =============================================================================
_str_orientation = self.d_orientation['str']
_l_orient = [_str_orientation[0],_str_orientation[1],_str_orientation[2]]
self.ro_base = "{0}{1}{2}".format(_str_orientation[1],_str_orientation[2],_str_orientation[0])
"""
self.ro_head = "{2}{0}{1}".format(_str_orientation[0],_str_orientation[1],_str_orientation[2])
self.ro_headLookAt = "{0}{2}{1}".format(_str_orientation[0],_str_orientation[1],_str_orientation[2])
log.debug("|{0}| >> rotateOrder | self.ro_base: {1}".format(_str_func,self.ro_base))
log.debug("|{0}| >> rotateOrder | self.ro_head: {1}".format(_str_func,self.ro_head))
log.debug("|{0}| >> rotateOrder | self.ro_headLookAt: {1}".format(_str_func,self.ro_headLookAt))"""
log.debug(cgmGEN._str_subLine)
return True
@cgmGEN.Timer
def rig_skeleton(self):
_short = self.d_block['shortName']
_str_func = 'rig_skeleton'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
mPrerigNull = self.mPrerigNull
ml_jointsToConnect = []
ml_jointsToHide = []
ml_joints = mRigNull.msgList_get('moduleJoints')
self.d_joints['ml_moduleJoints'] = ml_joints
#---------------------------------------------
BLOCKUTILS.skeleton_pushSettings(ml_joints, self.d_orientation['str'],
self.d_module['mirrorDirection'])
#d_rotateOrders, d_preferredAngles)
#Rig Joints =================================================================================
ml_rigJoints = BLOCKUTILS.skeleton_buildDuplicateChain(mBlock,
ml_joints,
'rig',
self.mRigNull,
'rigJoints',
'rig',
cgmType = False,
blockNames=False)
ml_driverJoints = BLOCKUTILS.skeleton_buildDuplicateChain(mBlock,
ml_joints,
None,
self.mRigNull,
'driverJoints',
'driver',
cgmType = 'driver',
blockNames=False)
for i,mJnt in enumerate(ml_rigJoints):
mJnt.p_parent = ml_driverJoints[i]
ml_jointsToHide.extend(ml_driverJoints)
"""
ml_segmentJoints = BLOCKUTILS.skeleton_buildDuplicateChain(mBlock,ml_joints, None,
mRigNull,'segmentJoints','seg',
cgmType = 'segJnt')
ml_jointsToHide.extend(ml_segmentJoints) """
#Processing joints ================================================================================
log.debug("|{0}| >> Processing Joints...".format(_str_func)+ '-'*40)
#Need to sort our joint lists:
md_skinJoints = {}
md_rigJoints = {}
md_segJoints = {}
md_driverJoints = {}
md_handles = {}
md_handleShapes = {}
md_directShapes = {}
md_directJoints = {}
def doSingleJoint(tag,mParent = None):
log.debug("|{0}| >> gathering {1}...".format(_str_func,tag))
mJntSkin = mPrerigNull.getMessageAsMeta('{0}Joint'.format(tag))
mJntRig = mJntSkin.getMessageAsMeta('rigJoint')
mJntDriver = mJntSkin.getMessageAsMeta('driverJoint')
if mParent is not None:
mJntDriver.p_parent = mParent
md_skinJoints[tag] = mJntSkin
md_rigJoints[tag] = mJntRig
md_driverJoints[tag] = mJntDriver
md_handleShapes[tag] = mPrerigNull.getMessageAsMeta('{0}ShapeHelper'.format(tag))
def mirrorConnect(tag1,tag2):
md_rigJoints[tag1].doStore('mirrorControl',md_rigJoints[tag2])
md_rigJoints[tag2].doStore('mirrorControl', md_rigJoints[tag1])
md_driverJoints[tag1].doStore('mirrorControl',md_driverJoints[tag2])
md_driverJoints[tag2].doStore('mirrorControl', md_driverJoints[tag1])
if self.str_muzzleSetup == 'joint':
doSingleJoint('muzzle')
#Jaw ---------------------------------------------------------------
if self.str_jawSetup:
log.debug("|{0}| >> jaw...".format(_str_func))
mJntSkin = mPrerigNull.getMessageAsMeta('jawJoint')
mJntRig = mJntSkin.getMessageAsMeta('rigJoint')
mJntDriver = mJntSkin.getMessageAsMeta('driverJoint')
md_skinJoints['jaw'] = mJntSkin
md_rigJoints['jaw'] = mJntRig
md_driverJoints['jaw'] = mJntDriver
if self.str_tongueSetup:
doSingleJoint('tongue')
if self.str_teethUprSetup:
doSingleJoint('teethUpr')
if self.str_teethLwrSetup:
doSingleJoint('teethLwr')
if self.str_chinSetup:
log.debug("|{0}| >> chinSetup...".format(_str_func))
mJntSkin = mPrerigNull.getMessageAsMeta('chinJoint')
mJntRig = mJntSkin.getMessageAsMeta('rigJoint')
mJntDriver = mJntSkin.getMessageAsMeta('driverJoint')
md_skinJoints['chin'] = mJntSkin
md_rigJoints['chin'] = mJntRig
md_driverJoints['chin'] = mJntDriver
if self.str_noseSetup:
log.debug("|{0}| >> nose...".format(_str_func)+'-'*40)
_l = ['noseBase']
if mBlock.numJointsNoseTip:
_l.append('noseTip')
if mBlock.numJointsNostril:
_l.extend(['nostrilLeft','nostrilRight'])
for t in _l:
mParent = None
if t == 'noseBase':
mParent = False
doSingleJoint(t,mParent)
if mBlock.numJointsNostril:
mirrorConnect('nostrilLeft','nostrilRight')
if self.str_cheekSetup:
log.debug("|{0}| >> cheek...".format(_str_func))
for t in ['cheekLeft','cheekRight']:
doSingleJoint(t,False)
mirrorConnect('cheekLeft','cheekRight')
if self.str_cheekUprSetup:
log.debug("|{0}| >> cheekUpr...".format(_str_func))
for t in ['cheekUprLeft','cheekUprRight']:
doSingleJoint(t,False)
mirrorConnect('cheekUprLeft','cheekUprRight')
if self.str_sneerSetup:
log.debug("|{0}| >> sneer...".format(_str_func))
for t in ['sneerLeft','sneerRight']:
doSingleJoint(t,False)
mirrorConnect('sneerLeft','sneerRight')
if self.str_smileSetup:
log.debug("|{0}| >> smile...".format(_str_func))
for t in ['smileLeft','smileRight']:
doSingleJoint(t,False)
mirrorConnect('smileLeft','smileRight')
#Processing Handles ================================================================================
log.debug("|{0}| >> Processing...".format(_str_func)+ '-'*40)
if self.str_lipSetup:
log.debug("|{0}| >> lip ".format(_str_func)+ '-'*20)
for d in 'upr','lwr':
log.debug("|{0}| >> lip {1}...".format(_str_func,d)+ '-'*5)
_k = 'lip'+d.capitalize()
md_directShapes[_k] = {}
md_directJoints[_k] = {}
for _d in md_skinJoints,md_handles,md_handleShapes,md_rigJoints,md_segJoints:
if not _d.get(_k):
_d[_k] = {}
for side in ['right','center','left']:
#key = 'lip'+d.capitalize()+side.capitalize()
key = d+'Lip'+STR.capFirst(side)
md_directShapes[_k][side] = mPrerigNull.msgList_get('{0}JointShapes'.format(key))
ml_skin = mPrerigNull.msgList_get('{0}Joints'.format(key))
ml_rig = []
ml_driver = []
for mJnt in ml_skin:
mRigJoint = mJnt.getMessageAsMeta('rigJoint')
ml_rig.append(mRigJoint)
mDriver = mJnt.getMessageAsMeta('driverJoint')
ml_driver.append(mDriver)
mDriver.p_parent = False
mRigJoint.doStore('driverJoint',mDriver)
mRigJoint.p_parent = mDriver
md_rigJoints[_k][side] = ml_rig
md_skinJoints[_k][side] = ml_skin
md_segJoints[_k][side] = ml_driver
md_directJoints[_k][side] = ml_rig
mHandles = mPrerigNull.msgList_get('{0}PrerigHandles'.format(key))
mHelpers = mPrerigNull.msgList_get('{0}PrerigShapes'.format(key))
ml = []
for ii,mHandle in enumerate(mHandles):
mJnt = create_jointFromHandle(mHandle,False,'handle')
ml.append(mJnt)
if d == 'upr' and side in ['right','left'] and ii == 0:
log.debug("|{0}| >> influenceJoints for {1}...".format(_str_func,mHandle))
for k in 'upr','lwr':
mSub = create_jointFromHandle(mHandle,False,'{0}Influence'.format(k))
mSub.doStore('mClass','cgmObject')
mSub.p_parent = mJnt
mJnt.doStore('{0}Influence'.format(k),mSub)
ml_jointsToConnect.append(mSub)
ml_jointsToHide.append(mSub)
ml_jointsToHide.extend(ml)
md_handles[_k][side] = ml
md_handleShapes[_k][side] = mHelpers
for i,mObj in enumerate(md_directJoints[_k]['right']):
mObj.doStore('mirrorControl',md_directJoints[_k]['left'][i])
md_directJoints[_k]['left'][i].doStore('mirrorControl', mObj)
for i,mObj in enumerate(md_handles[_k]['right']):
mObj.doStore('mirrorControl',md_handles[_k]['left'][i])
md_handles[_k]['left'][i].doStore('mirrorControl', mObj)
log.debug(cgmGEN._str_subLine)
self.md_rigJoints = md_rigJoints
self.md_skinJoints = md_skinJoints
self.md_segJoints = md_segJoints
self.md_handles = md_handles
self.md_handleShapes = md_handleShapes
self.md_driverJoints = md_driverJoints
self.md_directShapes = md_directShapes
self.md_directJoints = md_directJoints
#...joint hide -----------------------------------------------------------------------------------
for mJnt in ml_jointsToHide:
try:mJnt.drawStyle =2
except:mJnt.radius = .00001
#pprint.pprint(vars())
#...connect...
self.fnc_connect_toRigGutsVis( ml_jointsToConnect )
return
@cgmGEN.Timer
def rig_shapes(self):
try:
_short = self.d_block['shortName']
_str_func = 'rig_shapes'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
#_baseNameAttrs = ATTR.datList_getAttrs(mBlock.mNode,'nameList')
mHandleFactory = mBlock.asHandleFactory()
mRigNull = self.mRigNull
mPrerigNull = self.mPrerigNull
ml_rigJoints = mRigNull.msgList_get('rigJoints')
if self.md_rigJoints.get('jaw'):
log.debug("|{0}| >> Jaw setup...".format(_str_func)+ '-'*40)
mJaw_fk = self.md_driverJoints.get('jaw')
CORERIG.shapeParent_in_place(mJaw_fk.mNode, mPrerigNull.getMessageAsMeta('jawShapeHelper').mNode)
mRigNull.doStore('controlJaw',mJaw_fk)
#if not self.mParentSettings:
# log.debug("|{0}| >> Jaw settings!...".format(_str_func))
mRigNull.doStore('settings',mJaw_fk)
#else:
# mRigNull.doStore('settings',self.mParentSettings)
log.debug(cgmGEN._str_subLine)
for k in 'teethUpr','teethLwr','tongue','chin':
mDag = self.md_driverJoints.get(k)
if mDag:
log.debug("|{0}| >> {1} setup...".format(_str_func,k)+ '-'*40)
mShapeHelper = mPrerigNull.getMessageAsMeta('{0}ShapeHelper'.format(k))
CORERIG.shapeParent_in_place(mDag.mNode, mShapeHelper.mNode)
mRigNull.doStore('control{0}'.format(STR.capFirst(k)),mDag)
log.debug(cgmGEN._str_subLine)
"""
if self.md_rigJoints.get('chin'):
log.debug("|{0}| >> chin setup...".format(_str_func)+ '-'*40)
mChin = self.md_driverJoints.get('chin')
CORERIG.shapeParent_in_place(mChin.mNode, mPrerigNull.getMessageAsMeta('chinShapeHelper').mNode)
mRigNull.doStore('controlChin',mChin)
log.debug(cgmGEN._str_subLine)"""
if self.str_muzzleSetup:
log.debug("|{0}| >> Muzzle setup...".format(_str_func)+ '-'*40)
mMuzzleDagHelper = mPrerigNull.getMessageAsMeta('muzzleDagHelper')
if self.md_driverJoints.get('muzzle'):
mMuzzleDag = self.md_driverJoints.get('muzzle')
else:
mMuzzleDag = mMuzzleDagHelper.doCreateAt()
mMuzzleDag.doCopyNameTagsFromObject(mMuzzleDagHelper.mNode,'cgmType')
mMuzzleDag.doName()
CORERIG.shapeParent_in_place(mMuzzleDag.mNode,
mMuzzleDagHelper.getMessageAsMeta('shapeHelper').mNode)
mRigNull.doStore('controlMuzzle',mMuzzleDag)
log.debug(cgmGEN._str_subLine)
if self.str_cheekSetup:
log.debug("|{0}| >> cheek setup...".format(_str_func)+ '-'*40)
for k in ['cheekLeft','cheekRight']:
mDriver = self.md_driverJoints.get(k)
CORERIG.shapeParent_in_place(mDriver.mNode, self.md_handleShapes[k].mNode)
log.debug(cgmGEN._str_subLine)
if self.str_cheekUprSetup:
log.debug("|{0}| >> cheek upr setup...".format(_str_func)+ '-'*40)
for k in ['cheekUprLeft','cheekUprRight']:
mDriver = self.md_driverJoints.get(k)
CORERIG.shapeParent_in_place(mDriver.mNode, self.md_handleShapes[k].mNode)
log.debug(cgmGEN._str_subLine)
if self.str_smileSetup:
log.debug("|{0}| >> smile setup...".format(_str_func)+ '-'*40)
for k in ['smileLeft','smileRight']:
mDriver = self.md_driverJoints.get(k)
CORERIG.shapeParent_in_place(mDriver.mNode, self.md_handleShapes[k].mNode)
log.debug(cgmGEN._str_subLine)
if self.str_sneerSetup:
log.debug("|{0}| >> sneer setup...".format(_str_func)+ '-'*40)
for k in ['sneerLeft','sneerRight']:
mDriver = self.md_driverJoints.get(k)
CORERIG.shapeParent_in_place(mDriver.mNode, self.md_handleShapes[k].mNode)
log.debug(cgmGEN._str_subLine)
if self.str_noseSetup:
log.debug("|{0}| >> nose setup...".format(_str_func)+ '-'*40)
_l = ['noseBase']
if mBlock.numJointsNoseTip:
_l.append('noseTip')
if mBlock.numJointsNostril:
_l.extend(['nostrilLeft','nostrilRight'])
for k in _l:
mDriver = self.md_driverJoints.get(k)
if mDriver:
log.debug("|{0}| >> found: {1}".format(_str_func,k))
CORERIG.shapeParent_in_place(mDriver.mNode, self.md_handleShapes[k].mNode)
log.debug(cgmGEN._str_subLine)
if self.str_lipSetup:
log.debug("|{0}| >> Lip setup...".format(_str_func)+ '-'*40)
mDagHelper = mPrerigNull.getMessageAsMeta('mouthMoveDag')
mMouthMove = mDagHelper.doCreateAt()
mMouthMove.doCopyNameTagsFromObject(mDagHelper.mNode,'cgmType')
mMouthMove.doName()
CORERIG.shapeParent_in_place(mMouthMove.mNode,
mDagHelper.getMessageAsMeta('shapeHelper').mNode)
mRigNull.doStore('controlMouth',mMouthMove)
#Handles ================================================================================
log.debug("|{0}| >> Handles...".format(_str_func)+ '-'*80)
for k in 'lipLwr','lipUpr':
log.debug("|{0}| >> {1}...".format(_str_func,k)+ '-'*40)
for side,ml in self.md_handles[k].iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,side)+ '-'*10)
for i,mHandle in enumerate(ml):
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
CORERIG.shapeParent_in_place(mHandle.mNode,
self.md_handleShapes[k][side][i].mNode)
#if side == 'center':
#mHandleFactory.color(mHandle.mNode,side='center',controlType='sub')
log.debug(cgmGEN._str_subLine)
#Direct ================================================================================
log.debug("|{0}| >> Direct...".format(_str_func)+ '-'*80)
#Lip direct shapes
ml_processed = []
for k,d in self.md_directJoints.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,k)+ '-'*40)
for side,ml in d.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,side)+ '-'*10)
for i,mHandle in enumerate(ml):
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
CORERIG.shapeParent_in_place(mHandle.mNode,
self.md_directShapes[k][side][i].mNode)
ml_processed.append(mHandle)
_radius = mBlock.jointRadius
for k,d in self.md_rigJoints.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,k)+ '-'*40)
if VALID.isListArg(d):
for i,mHandle in enumerate(d):
if mHandle in ml_processed:continue
else:ml_processed.append(mHandle)
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
side = mHandle.getMayaAttr('cgmDirection') or False
crv = CURVES.create_fromName(name='cube',
direction = 'z+',
size = _radius)
SNAP.go(crv,mHandle.mNode)
mHandleFactory.color(crv,side=side,controlType='sub')
CORERIG.shapeParent_in_place(mHandle.mNode,
crv,keepSource=False)
elif issubclass(type(d),dict):
for side,ml in d.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,side)+ '-'*10)
for i,mHandle in enumerate(ml):
if mHandle in ml_processed:continue
else:ml_processed.append(mHandle)
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
crv = CURVES.create_fromName(name='cube',
direction = 'z+',
size = _radius)
SNAP.go(crv,mHandle.mNode)
mHandleFactory.color(crv,side=side,controlType='sub')
CORERIG.shapeParent_in_place(mHandle.mNode,
crv,keepSource=False)
else:
log.debug("|{0}| >> {1}...".format(_str_func,d))
side = d.getMayaAttr('cgmDirection') or 'center'
crv = CURVES.create_fromName(name='cube',
direction = 'z+',
size = _radius)
SNAP.go(crv,d.mNode)
mHandleFactory.color(crv,side=side,controlType='sub')
CORERIG.shapeParent_in_place(d.mNode,
crv,keepSource=False)
log.debug(cgmGEN._str_subLine)
for mJnt in ml_rigJoints:
try:
mJnt.drawStyle =2
except:
mJnt.radius = .00001
return
except Exception,error:
cgmGEN.cgmExceptCB(Exception,error,msg=vars())
@cgmGEN.Timer
def rig_controls(self):
try:
_short = self.d_block['shortName']
_str_func = 'rig_controls'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mRigNull = self.mRigNull
mBlock = self.mBlock
ml_controlsAll = []#we'll append to this list and connect them all at the end
mRootParent = self.mDeformNull
ml_segmentHandles = []
ml_rigJoints = mRigNull.msgList_get('rigJoints')
mSettings = self.mSettings
if not mSettings:
raise ValueError,"Should have settings"
#mPlug_visSub = self.atBuilderUtils('build_visSub')
mPlug_visDirect = self.mPlug_visDirect_moduleParent
mPlug_visSub = self.mPlug_visSub_moduleParent
self.mDeformNull.overrideEnabled = 1
ATTR.connect(self.mPlug_visModule.p_combinedShortName,
"{0}.overrideVisibility".format(self.mDeformNull.mNode))
b_sdk=False
if self.str_buildSDK in ['dag']:
b_sdk = True
def simpleRegister(mObj):
_dir = mObj.getMayaAttr('cgmDirection')
if not _dir:
_dir = self.d_module['mirrorDirection']
else:
if _dir in ['left','right']:
_dir = STR.capFirst(_dir)
else:
_dir = 'Centre'
_d = MODULECONTROL.register(mObj,
addSDKGroup=b_sdk,
mirrorSide= _dir,
mirrorAxis="translateX,rotateY,rotateZ",
makeAimable = False)
ml_controlsAll.append(_d['mObj'])
return _d['mObj']
for k in 'teethUpr','teethLwr','tongue','jaw','muzzle','mouth','chin':
link = 'control{0}'.format(STR.capFirst(k))
log.debug("|{0}| >> {1} setup...".format(_str_func,link)+ '-'*40)
mLink = mRigNull.getMessageAsMeta(link)
if mLink:
log.debug("|{0}| >> {1}...".format(_str_func,link))
_d = MODULECONTROL.register(mLink,
addSDKGroup=b_sdk,
mirrorSide= self.d_module['mirrorDirection'],
mirrorAxis="translateX,rotateY,rotateZ",
makeAimable = False)
ml_controlsAll.append(_d['mObj'])
log.debug(cgmGEN._str_subLine)
"""
for link in ['controlJaw','controlMuzzle','controlMouth','controlChin']:
mLink = mRigNull.getMessageAsMeta(link)
if mLink:
log.debug("|{0}| >> {1}...".format(_str_func,link))
_d = MODULECONTROL.register(mLink,
mirrorSide= self.d_module['mirrorDirection'],
mirrorAxis="translateX,rotateY,rotateZ",
makeAimable = False)
ml_controlsAll.append(_d['mObj']) """
#ml_segmentHandles.append(_d['mObj'])
log.debug(cgmGEN._str_subLine)
if self.str_cheekSetup:
log.debug("|{0}| >> cheek setup...".format(_str_func)+ '-'*40)
for k in ['cheekLeft','cheekRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
simpleRegister(self.md_driverJoints.get(k))
if self.str_cheekUprSetup:
log.debug("|{0}| >> cheek upr setup...".format(_str_func)+ '-'*40)
for k in ['cheekUprLeft','cheekUprRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
simpleRegister(self.md_driverJoints.get(k))
if self.str_sneerSetup:
log.debug("|{0}| >> sneer setup...".format(_str_func)+ '-'*40)
for k in ['sneerLeft','sneerRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
simpleRegister(self.md_driverJoints.get(k))
if self.str_smileSetup:
log.debug("|{0}| >> smile setup...".format(_str_func)+ '-'*40)
for k in ['smileLeft','smileRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
simpleRegister(self.md_driverJoints.get(k))
if self.str_noseSetup:
log.debug("|{0}| >> nose setup...".format(_str_func)+ '-'*40)
_l = ['noseBase']
if mBlock.numJointsNoseTip:
_l.append('noseTip')
if mBlock.numJointsNostril:
_l.extend(['nostrilLeft','nostrilRight'])
for k in _l:
log.debug("|{0}| >> {1}...".format(_str_func,k))
simpleRegister(self.md_driverJoints.get(k))
#Handles ================================================================================
log.debug("|{0}| >> Handles...".format(_str_func)+ '-'*80)
for k,d in self.md_handles.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,k)+ '-'*40)
for side,ml in d.iteritems():
log.debug("|{0}| >> {1}...".format(_str_func,side)+ '-'*10)
for i,mHandle in enumerate(ml):
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
_d = MODULECONTROL.register(mHandle,
addSDKGroup=b_sdk,
mirrorSide= side,
mirrorAxis="translateX,rotateY,rotateZ",
makeAimable = False)
ml_controlsAll.append(_d['mObj'])
ml_segmentHandles.append(_d['mObj'])
if side == 'right':
mTarget = d['left'][i]
log.debug("|{0}| >> mirrorControl connect | {1} <<>> {2}".format(_str_func, mHandle.mNode, mTarget.mNode))
mHandle.doStore('mirrorControl',mTarget)
mTarget.doStore('mirrorControl',mHandle)
log.debug(cgmGEN._str_subLine)
#Direct ================================================================================
log.debug("|{0}| >> Direct...".format(_str_func)+ '-'*80)
for mHandle in ml_rigJoints:
log.debug("|{0}| >> {1}...".format(_str_func,mHandle))
side = mHandle.getMayaAttr('cgmDirection') or 'center'
_d = MODULECONTROL.register(mHandle,
typeModifier='direct',
mirrorSide= side,
mirrorAxis="translateX,rotateY,rotateZ",
makeAimable = False)
mObj = _d['mObj']
ml_controlsAll.append(_d['mObj'])
if mObj.hasAttr('cgmIterator'):
ATTR.set_hidden(mObj.mNode,'cgmIterator',True)
for mShape in mObj.getShapes(asMeta=True):
ATTR.connect(mPlug_visDirect.p_combinedShortName, "{0}.overrideVisibility".format(mShape.mNode))
log.debug(cgmGEN._str_subLine)
#Close out...
mHandleFactory = mBlock.asHandleFactory()
for mCtrl in ml_controlsAll:
ATTR.set(mCtrl.mNode,'rotateOrder',self.ro_base)
if mCtrl.hasAttr('radius'):
ATTR.set(mCtrl.mNode,'radius',0)
ATTR.set_hidden(mCtrl.mNode,'radius',True)
ml_pivots = mCtrl.msgList_get('spacePivots')
if ml_pivots:
log.debug("|{0}| >> Coloring spacePivots for: {1}".format(_str_func,mCtrl))
for mPivot in ml_pivots:
mHandleFactory.color(mPivot.mNode, controlType = 'sub')
"""
if mHeadIK:
ATTR.set(mHeadIK.mNode,'rotateOrder',self.ro_head)
if mHeadLookAt:
ATTR.set(mHeadLookAt.mNode,'rotateOrder',self.ro_headLookAt)
"""
mRigNull.msgList_connect('handleJoints',ml_segmentHandles)
mRigNull.msgList_connect('controlsFace',ml_controlsAll)
mRigNull.msgList_connect('controlsAll',ml_controlsAll,'rigNull')
mRigNull.moduleSet.extend(ml_controlsAll)
mRigNull.faceSet.extend(ml_controlsAll)
except Exception,error:
cgmGEN.cgmExceptCB(Exception,error,msg=vars())
@cgmGEN.Timer
def rig_frame(self):
try:
_short = self.d_block['shortName']
_str_func = ' rig_rigFrame'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
mRootParent = self.mDeformNull
mModule = self.mModule
mDeformNull = self.mDeformNull
mFollowParent = self.mDeformNull
mFollowBase = self.mDeformNull
mdD = self.md_driverJoints
#Process our main controls ==============================================================
mMuzzle = mRigNull.getMessageAsMeta('controlMuzzle')
mJaw = mRigNull.getMessageAsMeta('controlJaw')
_str_rigNull = mRigNull.mNode
if mMuzzle:
log.debug("|{0}| >> Muzzle setup...".format(_str_func))
mMuzzle.masterGroup.p_parent = self.mDeformNull
mFollowParent = mMuzzle
mFollowBase = mMuzzle.doCreateAt('null',setClass=True)
mFollowBase.rename('{0}_followBase'.format(self.d_module['partName']))
mFollowBase.p_parent = self.mDeformNull
if mJaw:
log.debug("|{0}| >> Jaw setup...".format(_str_func))
mJaw.masterGroup.p_parent = mFollowParent
if not mMuzzle:
mFollowParent = mJaw
mUprTeeth = mRigNull.getMessageAsMeta('controlUprTeeth')
if mUprTeeth:
log.debug("|{0}| >> uprTeeth setup...".format(_str_func))
mUprTeeth.masterGroup.p_parent = mFollowParent
mTongue = mRigNull.getMessageAsMeta('controlTongue')
if mTongue:
log.debug("|{0}| >> tongue setup...".format(_str_func))
mTongue.masterGroup.p_parent = mJaw
if self.str_lipSetup:
log.debug("|{0}| >> lip setup...".format(_str_func)+ '-'*40)
log.debug("|{0}| >> mouth move...".format(_str_func))
mMouth = mRigNull.getMessageAsMeta('controlMouth')
log.debug("|{0}| >> mMouth: {1}".format(_str_func,mMouth))
mMouth.masterGroup.p_parent = mFollowParent
_str_mouth = mMouth.mNode
if mJaw:
mJawSpaceMouth = mMouth.doCreateAt(setClass=1)
mJawSpaceMouth.p_parent = mJaw
mJawSpaceMouth.rename('{0}_mouthJawSpace'.format(self.d_module['partName']))
mJawSpaceMouth.doGroup(True,asMeta=True,typeModifier = 'zero')
_str_mouthJawSpace = mJawSpaceMouth.mNode
#Wire our jaw space mouth move
for a in 'translate','rotate','scale':
ATTR.connect("{0}.{1}".format(_str_mouth,a), "{0}.{1}".format(_str_mouthJawSpace,a))
#mMouth.doConnectOut(a,mJawSpaceMouth.mNode)
#Lip handles ------------------------------------------------------
log.debug("|{0}| >> lip handles...".format(_str_func)+ '-'*20)
log.debug("|{0}| >> sort handles".format(_str_func)+ '-'*20)
mLeftCorner = self.md_handles['lipUpr']['left'][0]
mRightCorner = self.md_handles['lipUpr']['right'][0]
mUprCenter = self.md_handles['lipUpr']['center'][0]
mLwrCenter = self.md_handles['lipLwr']['center'][0]
ml_uprLeft = self.md_handles['lipUpr']['left'][1:]
ml_lwrLeft = self.md_handles['lipLwr']['left']
for ml in ml_uprLeft,ml_lwrLeft:
ml.reverse()
ml_uprLip = self.md_handles['lipUpr']['right'][1:] + ml_uprLeft#self.md_handles['lipUpr']['left'][1:]
ml_lwrLip = self.md_handles['lipLwr']['right'] + ml_lwrLeft#self.md_handles['lipLwr']['left']
ml_uprChain = self.md_handles['lipUpr']['right'][1:] + [mUprCenter] + ml_uprLeft#self.md_handles['lipUpr']['left'][1:]
ml_lwrChain = self.md_handles['lipLwr']['right'] + [mLwrCenter] + ml_lwrLeft#self.md_handles['lipLwr']['left']
for mHandle in mLeftCorner,mRightCorner:
log.debug("|{0}| >> lip handles | {1}".format(_str_func,mHandle))
if mJaw:
mHandle.masterGroup.p_parent = mFollowBase
mMainTrack = mHandle.doCreateAt(setClass=1)
mMainTrack.doStore('cgmName',mHandle)
mMainTrack.doStore('cgmType','mainTrack')
mMainTrack.doName()
mMainTrack.p_parent = mFollowParent
mJawTrack = mHandle.doCreateAt(setClass=1)
mJawTrack.doStore('cgmName',mHandle)
mJawTrack.doStore('cgmType','jawTrack')
mJawTrack.doName()
mJawTrack.p_parent = mJawSpaceMouth
mc.parentConstraint([mMainTrack.mNode,mJawTrack.mNode],
mHandle.masterGroup.mNode,
maintainOffset=True)
else:
mHandle.masterGroup.p_parent = mMouth
mUprCenter.masterGroup.p_parent = mMouth
if mJaw:
mLwrCenter.masterGroup.p_parent = mJawSpaceMouth
else:
mLwrCenter.masterGroup.p_parent = mMouth
#side handles ---------------------------
#First we're going to attach our handles to a surface to ge general placement. Then we're going to try
d_lipSetup = {'upr':{'ml_chain':[mRightCorner] + ml_uprChain + [mLeftCorner],
'mInfluences':[mRightCorner.uprInfluence,mUprCenter,mLeftCorner.uprInfluence],
'mHandles':ml_uprLip},
'lwr':{'ml_chain':[mRightCorner] + ml_lwrChain + [mLeftCorner],
'mInfluences':[mRightCorner.lwrInfluence,mLwrCenter,mLeftCorner.lwrInfluence],
'mHandles':ml_lwrLip}}
for k,d in d_lipSetup.iteritems():
#need our handle chain to make a ribbon
ml_chain = d['ml_chain']
mInfluences = d['mInfluences']
l_surfaceReturn = IK.ribbon_createSurface([mJnt.mNode for mJnt in ml_chain],
'z+')
mControlSurface = cgmMeta.validateObjArg( l_surfaceReturn[0],'cgmObject',setClass = True )
mControlSurface.addAttr('cgmName',"{0}HandlesFollow_lip".format(k),attrType='string',lock=True)
mControlSurface.addAttr('cgmType','controlSurface',attrType='string',lock=True)
mControlSurface.doName()
mControlSurface.p_parent = _str_rigNull
log.debug("|{0}| >> Skinning surface: {1}".format(_str_func,mControlSurface))
mSkinCluster = cgmMeta.validateObjArg(mc.skinCluster ([mObj.mNode for mObj in mInfluences],
mControlSurface.mNode,
tsb=True,nurbsSamples=4,
maximumInfluences = 3,
normalizeWeights = 1,dropoffRate=10.0),
'cgmNode',
setClass=True)
mSkinCluster.doStore('cgmName', mControlSurface)
mSkinCluster.doName()
for mHandle in d['mHandles']:
mHandle.masterGroup.p_parent = mFollowParent
_resAttach = RIGCONSTRAINT.attach_toShape(mHandle.masterGroup.mNode,
mControlSurface.mNode,
'conParent')
TRANS.parent_set(_resAttach[0],_str_rigNull)
for mObj in [mControlSurface]:
mObj.overrideEnabled = 1
cgmMeta.cgmAttr(_str_rigNull,'gutsVis',lock=False).doConnectOut("%s.%s"%(mObj.mNode,'overrideVisibility'))
cgmMeta.cgmAttr(_str_rigNull,'gutsLock',lock=False).doConnectOut("%s.%s"%(mObj.mNode,'overrideDisplayType'))
mObj.parent = mRigNull
ml_lwrLeft = self.md_handles['lipLwr']['left']
ml_lwrRight = self.md_handles['lipLwr']['right']
d_lipAim = {'upr':{'left':self.md_handles['lipUpr']['left'][1:],
'right':self.md_handles['lipUpr']['right'][1:]},
'lwr':{'left':self.md_handles['lipLwr']['left'],
'right':self.md_handles['lipLwr']['right']}}
for tag,sectionDat in d_lipAim.iteritems():
for side,sideDat in sectionDat.iteritems():
if side == 'left':
_aim = [-1,0,0]
_corner = mLeftCorner.mNode
else:
_aim = [1,0,0]
_corner = mRightCorner.mNode
for i,mJnt in enumerate(sideDat):
_mode = None
if not i:
_tar = _corner
else:
_tar=sideDat[i-1].mNode
mAimGroup = mJnt.doGroup(True,True,
asMeta=True,
typeModifier = 'aim',
setClass='cgmObject')
mc.aimConstraint(_tar,
mAimGroup.mNode,
maintainOffset = True, weight = 1,
aimVector = _aim,
upVector = [0,1,0],
worldUpVector = [0,1,0],
worldUpObject = mJnt.masterGroup.mNode,
worldUpType = 'objectRotation' )
#Lip Corner influences ------------------------------------------------------
log.debug("|{0}| >> lip corner influences...".format(_str_func)+ '-'*20)
for i,mHandle in enumerate([mRightCorner,mLeftCorner]):
mPlug_upr = cgmMeta.cgmAttr(mHandle,'twistUpper',value = 0,
attrType='float',defaultValue = 0.0,keyable = True,hidden = False)
mPlug_lwr = cgmMeta.cgmAttr(mHandle,'twistLower',value = 0,
attrType='float',defaultValue = 0.0,keyable = True,hidden = False)
if not i:
_aim = [-1,0,0]
else:
_aim = [1,0,0]
mUprInfluence = mHandle.uprInfluence
mLwrInfluence = mHandle.lwrInfluence
for ii,mInfl in enumerate([mUprInfluence,mLwrInfluence]):
if not ii:
_tar = mUprCenter.mNode
else:
_tar = mLwrCenter.mNode
mAimGroup = mInfl.doGroup(True,True,
asMeta=True,
typeModifier = 'aim',
setClass='cgmObject')
mc.aimConstraint(_tar,
mAimGroup.mNode,
maintainOffset = True, weight = 1,
aimVector = _aim,
upVector = [0,1,0],
worldUpVector = [0,1,0],
worldUpObject = mHandle.mNode,
worldUpType = 'objectRotation' )
if not i:# ['right']:# and k not in ['inner','outer']:
mPlug_upr.doConnectOut("{0}.rz".format(mHandle.uprInfluence.mNode))
mPlug_lwr.doConnectOut("{0}.rz".format(mHandle.lwrInfluence.mNode))
else:
str_arg1 = "{0}.rz = -{1}".format(mHandle.uprInfluence.mNode,
mPlug_upr.p_combinedShortName)
str_arg2 = "{0}.rz = -{1}".format(mHandle.lwrInfluence.mNode,
mPlug_lwr.p_combinedShortName)
for a in str_arg1,str_arg2:
NODEFACTORY.argsToNodes(a).doBuild()
if self.str_cheekSetup:
log.debug("|{0}| >> cheek setup...".format(_str_func)+ '-'*40)
_kws_attr = {'hidden':False, 'lock':False}
mPlug_jawRXPos = cgmMeta.cgmAttr(mJaw.mNode,
"cheek_rxPos",attrType = 'float',
value = 30,
defaultValue=30,minValue= 0,
**_kws_attr)
mPlug_jawTYPos = cgmMeta.cgmAttr(mJaw.mNode,
"cheek_tyPos",attrType = 'float',
value = 1.0,
defaultValue=1.0,minValue= 0,
**_kws_attr)
mPlug_jawTYNeg = cgmMeta.cgmAttr(mJaw.mNode,
"cheek_tyNeg",attrType = 'float',
value = 3.0,
defaultValue=3.0,
**_kws_attr)
mRemap_jawRX_pos = cgmMeta.cgmNode(name = "jawRX_pos_remap", nodeType = 'remapValue')
#mRemap_jawRX_neg = cgmMeta.cgmNode(name = "jawRX_neg_remap", nodeType = 'remapValue')
mRemap_jawTY_pos = cgmMeta.cgmNode(name = "jawTY_pos_remap", nodeType = 'remapValue')
mRemap_jawTY_neg = cgmMeta.cgmNode(name = "jawTY_neg_remap", nodeType = 'remapValue')
mRemap_jawRX_pos.doConnectIn('inputValue',"{0}.rx".format(mJaw.mNode))
#mRemap_jawRX_neg.doConnectIn('inputValue',"{0}.rx".format(mJaw.mNode))
mRemap_jawTY_pos.doConnectIn('inputValue',"{0}.ty".format(mJaw.mNode))
#mRemap_jawTY_neg.doConnectIn('inputValue',"{0}.ty".format(mJaw.mNode))
mRemap_jawRX_pos.doConnectIn('inputMax',mPlug_jawRXPos.p_combinedShortName)
mRemap_jawTY_pos.doConnectIn('inputMax',mPlug_jawTYPos.p_combinedShortName)
mRemap_jawTY_neg.doConnectIn('inputMax',mPlug_jawTYNeg.p_combinedShortName)
#mRemap_jawTY_neg.inputMax = 0
#"%s.condResult = if %s.ty == 3:5 else 1
l_argBuild = []
mPlug_jawTYNegInv = cgmMeta.cgmAttr(mJaw.mNode,
"jaw_tyNegInverse",attrType = 'float',
**_kws_attr)
l_argBuild.append("{0} = -{1}.ty".format(mPlug_jawTYNegInv.p_combinedName,
mJaw.mNode))
l_argBuild.append("{0}.inputValue = if {1}.ty < 0:{2} else 0".format(mRemap_jawTY_neg.mNode,
mJaw.mNode,
mPlug_jawTYNegInv.p_combinedName))
for arg in l_argBuild:
log.debug("|{0}| >> Building arg: {1}".format(_str_func,arg))
NODEFACTORY.argsToNodes(arg).doBuild()
"""
mRemap_jawRX_neg.inputMin = -180
mRemap_jawRX_neg.outputMax = -180
mRemap_jawRX_neg.inputMin = -180
mRemap_jawRX_neg.outputMax = -180"""
for k in ['cheekLeft','cheekRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
mHandle = mdD[k]
mdD[k].masterGroup.p_parent = self.mDeformNull
mc.parentConstraint([mFollowParent.mNode, mJaw.mNode],
mdD[k].masterGroup.mNode,maintainOffset=True)
mOffsetGroup = mdD[k].doGroup(True,asMeta=True,typeModifier = 'offset')
#offsetAmount, offsetThreshold
mHandle.addAttr('offsetOut',value = 1.0, attrType='float',visible=True)
mHandle.addAttr('offsetIn',value = -.5, attrType='float',visible=True)
l_outDrivers = []
l_inDrivers = []
for mRemap in mRemap_jawTY_neg,mRemap_jawRX_pos:
l_inDrivers.append("{0}.outValue".format(mRemap.mNode))
for mRemap in [mRemap_jawTY_pos]:
l_outDrivers.append("{0}.outValue".format(mRemap.mNode))
#Make our side rotate push...
#Wire up... -------------------------------------------------------------
mPlug_resOut = cgmMeta.cgmAttr(mHandle.mNode,
"res_Out",attrType = 'float',
lock=True)
mPlug_resIn = cgmMeta.cgmAttr(mHandle.mNode,
"res_In",attrType = 'float',
lock=True)
mPlug_blendOut = cgmMeta.cgmAttr(mHandle.mNode,
"blend_Out",attrType = 'float',
lock=True)
mPlug_blendIn = cgmMeta.cgmAttr(mHandle.mNode,
"blend_In",attrType = 'float',
lock=True)
mPlug_resBlend = cgmMeta.cgmAttr(mHandle.mNode,
"res_blend",attrType = 'float',
lock=True)
l_argBuild = []
#distance by master
l_argBuild.append("{0} = {1}".format(mPlug_blendOut.p_combinedName,
' + '.join(l_outDrivers)))
l_argBuild.append("{0} = {1} * {2}".format(mPlug_resOut.p_combinedName,
mPlug_blendOut.p_combinedName,
'{0}.offsetOut'.format(mHandle.mNode)))
l_argBuild.append("{0} = {1}".format(mPlug_blendIn.p_combinedName,
' + '.join(l_inDrivers)))
l_argBuild.append("{0} = {1} * {2}".format(mPlug_resIn.p_combinedName,
mPlug_blendIn.p_combinedName,
'{0}.offsetIn'.format(mHandle.mNode)))
l_argBuild.append("{0} = {1} + {2}".format(mPlug_resBlend.p_combinedName,
mPlug_resOut.p_combinedName,
mPlug_resIn.p_combinedName))
for arg in l_argBuild:
log.debug("|{0}| >> Building arg: {1}".format(_str_func,arg))
NODEFACTORY.argsToNodes(arg).doBuild()
mOffsetGroup.doConnectIn('tz',mPlug_resBlend.p_combinedShortName)
"""
#Offset sdks ------------------------
inTangent='linear'
outTangent='linear'
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.rx".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 0,value = 0.0)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.rz".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 0,value = 0.0)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.ty".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 0,value = 0.0)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.tx".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 0,value = 0.0)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.rx".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 30,value = -1)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.ty".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = -4,value = -1)
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.ty".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 3,value = 1)
if k == 'cheekLeft':
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.rz".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = 20,value = 1)
else:
mc.setDrivenKeyframe("{0}.tz".format(mOffsetGroup.mNode),
currentDriver = "{0}.rz".format(mJaw.mNode),
itt=inTangent,ott=outTangent,
driverValue = -20,value = 1)"""
if self.str_sneerSetup:
log.debug("|{0}| >> sneer setup...".format(_str_func)+ '-'*40)
for k in ['sneerLeft','sneerRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
mdD[k].masterGroup.p_parent = self.mDeformNull
mTrack = mdD[k].masterGroup.doCreateAt(setClass=1)
mTrack.p_parent = mFollowParent
mTrack.rename("{0}_baseTrack".format(mdD[k].p_nameBase))
_c = mc.parentConstraint([mFollowBase.mNode, mTrack.mNode],
mdD[k].masterGroup.mNode,maintainOffset=True)[0]
targetWeights = mc.parentConstraint(_c,q=True,
weightAliasList=True,
maintainOffset=True)
ATTR.set(_c,targetWeights[0],.8)
ATTR.set(_c,targetWeights[1],.2)
if self.str_smileSetup:
log.debug("|{0}| >> smile setup...".format(_str_func)+ '-'*40)
for s in ['Left','Right']:
#k in ['smileLeft','smileRight']:
k = 'smile'+s
log.debug("|{0}| >> {1}...".format(_str_func,k))
mdD[k].masterGroup.p_parent = self.mDeformNull
mTrack = mdD[k].masterGroup.doCreateAt(setClass=1)
mTrack.p_parent = mFollowParent
mTrack.rename("{0}_baseTrack".format(mdD[k].p_nameBase))
l_targets = []
l_targets.append(self.md_handles['lipUpr'][s.lower()][0].mNode)
for k2 in ['nostril','cheek','noseBase','sneer','cheekUpr']:
if k2 not in ['noseBase']:
_k2 = k2+s
else:
_k2 = k2
if mdD.get(_k2):
l_targets.append(mdD.get(_k2).mNode)
_c = mc.pointConstraint(l_targets,
mTrack.mNode,maintainOffset=True)[0]
targetWeights = mc.pointConstraint(_c,q=True,
weightAliasList=True,
maintainOffset=True)
ATTR.set(_c,targetWeights[0],1.25)
#ATTR.set(_c,targetWeights[1],.9)
mc.parentConstraint(mTrack.mNode,
mdD[k].masterGroup.mNode,maintainOffset=True)
if self.str_cheekUprSetup:
log.debug("|{0}| >> cheekUpr setup...".format(_str_func)+ '-'*40)
for k in ['cheekUprLeft','cheekUprRight']:
log.debug("|{0}| >> {1}...".format(_str_func,k))
mdD[k].masterGroup.p_parent = self.mDeformNull
mTrack = mdD[k].masterGroup.doCreateAt(setClass=1)
mTrack.p_parent = mFollowParent
_c = mc.parentConstraint([mFollowBase.mNode, mTrack.mNode],
mdD[k].masterGroup.mNode,maintainOffset=True)[0]
targetWeights = mc.parentConstraint(_c,q=True,
weightAliasList=True,
maintainOffset=True)
ATTR.set(_c,targetWeights[0],.9)
ATTR.set(_c,targetWeights[1],.1)
if self.str_noseSetup:
log.debug("|{0}| >> nose setup...".format(_str_func)+ '-'*40)
mdD['noseBase'].masterGroup.p_parent = mDeformNull
mdD['noseBase'].addAttr('followMuzzle',value = .9, attrType='float',
minValue = 0, maxValue = 1.0, defaultValue = .9,
keyable = False)
mTrack = mdD['noseBase'].masterGroup.doCreateAt(setClass=1)
mTrack.p_parent = mFollowParent
#_c = mc.parentConstraint([mFollowBase.mNode, mTrack.mNode],
#mdD['noseBase'].masterGroup.mNode,maintainOffset=True)[0]
#targetWeights = mc.parentConstraint(_c,q=True,
# weightAliasList=True,
# maintainOffset=True)
RIGCONSTRAINT.blendChainsBy([mFollowBase.mNode],
[mTrack.mNode],[mdD['noseBase'].masterGroup.mNode],
driver = "{0}.followMuzzle".format(mdD['noseBase'].mNode),
l_constraints=['parent'],maintainOffset=True)
#ATTR.set(_c,targetWeights[0],.25)
#ATTR.set(_c,targetWeights[1],.75)
"""
mc.pointConstraint([mFollowBase.mNode, mTrack.mNode],
mdD['noseBase'].masterGroup.mNode,maintainOffset=True)
mc.aimConstraint(mFollowBase.mNode, mdD['noseBase'].masterGroup.mNode, maintainOffset = True,
aimVector = [0,1,0], upVector = [0,0,1],
worldUpObject = mFollowBase.mNode,
worldUpType = 'objectrotation',
worldUpVector = [0,0,1])"""
for k in ['noseBase','noseTip','nostrilLeft','nostrilRight']:
pass
return
except Exception,error:
cgmGEN.cgmExceptCB(Exception,error)
@cgmGEN.Timer
def rig_lipSegments(self):
_short = self.d_block['shortName']
_str_func = ' rig_lipSegments'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
mRootParent = self.mDeformNull
mModule = self.mModule
mDeformNull = self.mDeformNull
mFollowParent = self.mDeformNull
mFollowBase = self.mDeformNull
mMouth = mRigNull.getMessageAsMeta('controlMouth')
log.debug("|{0}| >> mMouth: {1}".format(_str_func,mMouth))
mdD = self.md_driverJoints
if not self.str_lipSetup:
log.debug("|{0}| >> No lip setup...".format(_str_func))
return False
log.debug("|{0}| >> sort influences".format(_str_func))
mLeftCorner = self.md_handles['lipUpr']['left'][0]
mRightCorner = self.md_handles['lipUpr']['right'][0]
mUprCenter = self.md_handles['lipUpr']['center'][0]
mLwrCenter = self.md_handles['lipLwr']['center'][0]
ml_uprLipInfluences = [mRightCorner.uprInfluence] + self.md_handles['lipUpr']['right'][1:] + self.md_handles['lipUpr']['center']+ self.md_handles['lipUpr']['left'][1:] + [mLeftCorner.uprInfluence]
ml_lwrLipInfluences = [mRightCorner.lwrInfluence] + self.md_handles['lipLwr']['right'] + self.md_handles['lipLwr']['center']+ self.md_handles['lipLwr']['left'] + [mLeftCorner.lwrInfluence]
log.debug("|{0}| >> sort driven".format(_str_func))
dUpr = self.md_rigJoints['lipUpr']
dLwr = self.md_rigJoints['lipLwr']
_revUprLeft = copy.copy(dUpr['left'])
_revLwrLeft = copy.copy(dLwr['left'])
for l in _revLwrLeft,_revUprLeft:
l.reverse()
ml_uprRig = dUpr['right'] + dUpr['center']+ _revUprLeft
ml_lwrRig = dLwr['right'] + dLwr['center']+ _revLwrLeft
mMidDag = cgmMeta.cgmObject(name='midSealMarker')
mMidDag.p_position = DIST.get_average_position([mUprCenter.p_position,
mLwrCenter.p_position])
mMidDag.p_parent = mDeformNull
d_lips = {'driven1':ml_uprRig,
'driven2':ml_lwrRig,
'influences1':ml_uprLipInfluences,
'influences2':ml_lwrLipInfluences,
'baseName':'lipRibbons',
'settingsControl':mMouth,
'baseName1' :"uprLip",
'baseName2':"lwrLip",
'extendEnds':False,
'sealDriver1':mLeftCorner,
'sealDriver2':mRightCorner,
'sealDriverMid':mMidDag,#mUprCenter
'sealSplit':True,
'specialMode':'noStartEnd',#'endsToInfluences',
'moduleInstance':mModule,
'msgDriver':'driverJoint'}
#pprint.pprint(d_test)
reload(IK)
IK.ribbon_seal(**d_lips)
mc.parentConstraint(mLeftCorner.mNode, ml_uprRig[-1].masterGroup.mNode, maintainOffset = True)
mc.parentConstraint(mRightCorner.mNode, ml_uprRig[0].masterGroup.mNode, maintainOffset = True)
for mObj in ml_uprRig + ml_lwrRig:
mObj.driverJoint.p_parent = mDeformNull
return
"""
driven1 = [u'L_lip_corner_rig',u'L_lip_upr_rig',u'CTR_lip_upr_rig',u'R_lip_upr_rig',u'R_lip_corner_rig']
driven2 = [u'L_lip_corner_rig',u'L_lip_lwr_rig',u'CTR_lip_lwr_rig',u'R_lip_lwr_rig',u'R_lip_corner_rig']
influences1 =[u'L_lip_corner_anim',u'L_lip_upr_anim',u'CTR_lip_upr_anim',u'R_lip_upr_anim',u'R_lip_corner_anim']
influences2 =[u'L_lip_corner_anim',u'L_lip_lwr_anim',u'CTR_lip_lwr_anim',u'R_lip_lwr_anim',u'R_lip_corner_anim']
d_test = {'driven1':driven1,
'driven2':driven2,
'influences1':influences1,
'influences2':influences2,
'baseName':'lipRibbons',
'baseName1' :"uprLip",
'baseName2':"lwrLip",
'extendEnds':True,
'msgDriver':'driverGroup'}
reload(MORPHYUTILS)
MORPHYUTILS.ribbon_seal(**d_test) """
#Process our main controls ==============================================================
mMuzzle = mRigNull.getMessageAsMeta('controlMuzzle')
mJaw = mRigNull.getMessageAsMeta('controlJaw')
_str_rigNull = mRigNull.mNode
if mMuzzle:
log.debug("|{0}| >> Muzzle setup...".format(_str_func))
mMuzzle.masterGroup.p_parent = self.mDeformNull
mFollowParent = mMuzzle
mFollowBase = mMuzzle.doCreateAt('null',setClass=True)
mFollowBase.rename('{0}_followBase'.format(self.d_module['partName']))
mFollowBase.p_parent = self.mDeformNull
if mJaw:
log.debug("|{0}| >> Jaw setup...".format(_str_func))
mJaw.masterGroup.p_parent = mFollowParent
if not mMuzzle:
mFollowParent = mJaw
if self.str_lipSetup:
log.debug("|{0}| >> lip setup...".format(_str_func)+ '-'*40)
log.debug("|{0}| >> mouth move...".format(_str_func))
mMouth = mRigNull.getMessageAsMeta('controlMouth')
log.debug("|{0}| >> mMouth: {1}".format(_str_func,mMouth))
mMouth.masterGroup.p_parent = mFollowParent
mJawSpaceMouth = mMouth.doCreateAt(setClass=1)
mJawSpaceMouth.p_parent = mJaw
mJawSpaceMouth.rename('{0}_mouthJawSpace'.format(self.d_module['partName']))
mJawSpaceMouth.doGroup(True,asMeta=True,typeModifier = 'zero')
_str_mouth = mMouth.mNode
_str_mouthJawSpace = mJawSpaceMouth.mNode
#Wire our jaw space mouth move
for a in 'translate','rotate','scale':
ATTR.connect("{0}.{1}".format(_str_mouth,a), "{0}.{1}".format(_str_mouthJawSpace,a))
#mMouth.doConnectOut(a,mJawSpaceMouth.mNode)
#Lip handles ------------------------------------------------------
log.debug("|{0}| >> lip handles...".format(_str_func)+ '-'*20)
log.debug("|{0}| >> sort handles".format(_str_func)+ '-'*20)
mLeftCorner = self.md_handles['lipUpr']['left'][0]
mRightCorner = self.md_handles['lipUpr']['right'][0]
mUprCenter = self.md_handles['lipUpr']['center'][0]
mLwrCenter = self.md_handles['lipLwr']['center'][0]
ml_uprLip = self.md_handles['lipUpr']['right'][1:] + self.md_handles['lipUpr']['left'][1:]
ml_lwrLip = self.md_handles['lipLwr']['right'] + self.md_handles['lipLwr']['left']
for mHandle in mLeftCorner,mRightCorner:
log.debug("|{0}| >> lip handles | {1}".format(_str_func,mHandle))
mHandle.masterGroup.p_parent = mFollowBase
mMainTrack = mHandle.doCreateAt(setClass=1)
mMainTrack.doStore('cgmName',mHandle)
mMainTrack.doStore('cgmType','mainTrack')
mMainTrack.doName()
mMainTrack.p_parent = mFollowBase
mJawTrack = mHandle.doCreateAt(setClass=1)
mJawTrack.doStore('cgmName',mHandle)
mJawTrack.doStore('cgmType','jawTrack')
mJawTrack.doName()
mJawTrack.p_parent = mJawSpaceMouth
mc.parentConstraint([mMainTrack.mNode,mJawTrack.mNode],
mHandle.masterGroup.mNode,
maintainOffset=True)
mUprCenter.masterGroup.p_parent = mMouth
mLwrCenter.masterGroup.p_parent = mJawSpaceMouth
#side handles ---------------------------
d_lipSetup = {'upr':{'ml_chain':[mRightCorner] + ml_uprLip + [mLeftCorner],
'mInfluences':[mRightCorner,mUprCenter,mLeftCorner],
'mHandles':ml_uprLip},
'lwr':{'ml_chain':[mRightCorner] + ml_lwrLip + [mLeftCorner],
'mInfluences':[mRightCorner,mLwrCenter,mLeftCorner],
'mHandles':ml_lwrLip} }
for k,d in d_lipSetup.iteritems():
#need our handle chain to make a ribbon
ml_chain = d['ml_chain']
mInfluences = d['mInfluences']
l_surfaceReturn = IK.ribbon_createSurface([mJnt.mNode for mJnt in ml_chain],
'z+')
mControlSurface = cgmMeta.validateObjArg( l_surfaceReturn[0],'cgmObject',setClass = True )
mControlSurface.addAttr('cgmName',"{0}HandlesFollow_lip".format(k),attrType='string',lock=True)
mControlSurface.addAttr('cgmType','controlSurface',attrType='string',lock=True)
mControlSurface.doName()
mControlSurface.p_parent = _str_rigNull
log.debug("|{0}| >> Skinning surface: {1}".format(_str_func,mControlSurface))
mSkinCluster = cgmMeta.validateObjArg(mc.skinCluster ([mObj.mNode for mObj in mInfluences],
mControlSurface.mNode,
tsb=True,
maximumInfluences = 2,
normalizeWeights = 1,dropoffRate=5.0),
'cgmNode',
setClass=True)
mSkinCluster.doStore('cgmName', mControlSurface)
mSkinCluster.doName()
for mHandle in d['mHandles']:
mHandle.masterGroup.p_parent = mFollowParent
_resAttach = RIGCONSTRAINT.attach_toShape(mHandle.masterGroup.mNode,
mControlSurface.mNode,
'conParent')
TRANS.parent_set(_resAttach[0],_str_rigNull)
for mObj in [mControlSurface]:
mObj.overrideEnabled = 1
cgmMeta.cgmAttr(_str_rigNull,'gutsVis',lock=False).doConnectOut("%s.%s"%(mObj.mNode,'overrideVisibility'))
cgmMeta.cgmAttr(_str_rigNull,'gutsLock',lock=False).doConnectOut("%s.%s"%(mObj.mNode,'overrideDisplayType'))
mObj.parent = mRigNull
@cgmGEN.Timer
def rig_cleanUp(self):
_short = self.d_block['shortName']
_str_func = 'rig_cleanUp'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
mMasterControl= self.d_module['mMasterControl']
mMasterDeformGroup= self.d_module['mMasterDeformGroup']
mMasterNull = self.d_module['mMasterNull']
mModuleParent = self.d_module['mModuleParent']
mPlug_globalScale = self.d_module['mPlug_globalScale']
#Settings =================================================================================
#log.debug("|{0}| >> Settings...".format(_str_func))
#mSettings.visDirect = 0
#mPlug_FKIK = cgmMeta.cgmAttr(mSettings,'FKIK')
#mPlug_FKIK.p_defaultValue = 1
#mPlug_FKIK.value = 1
mTongue = mRigNull.getMessageAsMeta('controlTongue')
mUprTeeth = mRigNull.getMessageAsMeta('controlTeethUpr')
mMuzzle = mRigNull.getMessageAsMeta('controlMuzzle')
mJaw = mRigNull.getMessageAsMeta('controlJaw')
if mTongue:
mChild = mTongue
#Get space stuff
ml_targetDynParents = []#self.ml_dynParentsAbove + self.ml_dynEndParents
ml_targetDynParents.append(self.md_dynTargetsParent['world'])
#ml_targetDynParents.extend(mControlIK.msgList_get('spacePivots',asMeta = True))
if mJaw:
ml_targetDynParents.insert(0,mJaw)
if mUprTeeth:
ml_targetDynParents.insert(1,mUprTeeth)
if mMuzzle:
ml_targetDynParents.insert(1,mMuzzle)
mDynGroup = cgmRIGMETA.cgmDynParentGroup(dynChild=mChild,dynMode=0)
for mTar in ml_targetDynParents:
mDynGroup.addDynParent(mTar)
mDynGroup.rebuild()
log.debug("|{0}| >> IK targets...".format(_str_func))
#pprint.pprint(ml_targetDynParents)
log.debug(cgmGEN._str_subLine)
#Lock and hide =================================================================================
ml_controls = mRigNull.msgList_get('controlsAll')
self.UTILS.controls_lockDown(ml_controls)
if not mBlock.scaleSetup:
log.debug("|{0}| >> No scale".format(_str_func))
ml_controlsToLock = copy.copy(ml_controls)
for mCtrl in ml_controlsToLock:
ATTR.set_standardFlags(mCtrl.mNode, ['scale'])
else:
log.debug("|{0}| >> scale setup...".format(_str_func))
self.mDeformNull.dagLock(True)
#Close out ===============================================================================================
mRigNull.version = self.d_block['buildVersion']
mBlock.blockState = 'rig'
mBlock.UTILS.set_blockNullFormState(mBlock)
self.UTILS.rigNodes_store(self)
def create_simpleMesh(self, deleteHistory = True, cap=True, **kws):
_str_func = 'create_simpleMesh'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
#>> Head ===================================================================================
log.debug("|{0}| >> Head...".format(_str_func))
mGroup = self.msgList_get('headMeshProxy')[0].getParent(asMeta=True)
l_headGeo = mGroup.getChildren(asMeta=False)
ml_headStuff = []
for i,o in enumerate(l_headGeo):
log.debug("|{0}| >> geo: {1}...".format(_str_func,o))
if ATTR.get(o,'v'):
log.debug("|{0}| >> visible head: {1}...".format(_str_func,o))
mObj = cgmMeta.validateObjArg(mc.duplicate(o, po=False, ic = False)[0])
ml_headStuff.append( mObj )
mObj.p_parent = False
if self.neckBuild:#...Neck =====================================================================
log.debug("|{0}| >> neckBuild...".format(_str_func))
ml_neckMesh = self.UTILS.create_simpleLoftMesh(self,deleteHistory,cap)
ml_headStuff.extend(ml_neckMesh)
_mesh = mc.polyUnite([mObj.mNode for mObj in ml_headStuff],ch=False)
_mesh = mc.rename(_mesh,'{0}_0_geo'.format(self.p_nameBase))
return cgmMeta.validateObjListArg(_mesh)
def asdfasdfasdf(self, forceNew = True, skin = False):
"""
Build our proxyMesh
"""
_short = self.d_block['shortName']
_str_func = 'build_proxyMesh'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
mHeadIK = mRigNull.headIK
mSettings = mRigNull.settings
mPuppetSettings = self.d_module['mMasterControl'].controlSettings
ml_rigJoints = mRigNull.msgList_get('rigJoints',asMeta = True)
if not ml_rigJoints:
raise ValueError,"No rigJoints connected"
#>> If proxyMesh there, delete ---------------------------------------------------------------------------
_bfr = mRigNull.msgList_get('proxyMesh',asMeta=True)
if _bfr:
log.debug("|{0}| >> proxyMesh detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
#>> Head ===================================================================================
log.debug("|{0}| >> Head...".format(_str_func))
if directProxy:
log.debug("|{0}| >> directProxy... ".format(_str_func))
_settings = self.mRigNull.settings.mNode
mGroup = mBlock.msgList_get('headMeshProxy')[0].getParent(asMeta=True)
l_headGeo = mGroup.getChildren(asMeta=False)
l_vis = mc.ls(l_headGeo, visible = True)
ml_headStuff = []
for i,o in enumerate(l_vis):
log.debug("|{0}| >> visible head: {1}...".format(_str_func,o))
mObj = cgmMeta.validateObjArg(mc.duplicate(o, po=False, ic = False)[0])
ml_headStuff.append( mObj )
mObj.parent = ml_rigJoints[-1]
ATTR.copy_to(ml_rigJoints[-1].mNode,'cgmName',mObj.mNode,driven = 'target')
mObj.addAttr('cgmIterator',i)
mObj.addAttr('cgmType','proxyGeo')
mObj.doName()
if directProxy:
CORERIG.shapeParent_in_place(ml_rigJoints[-1].mNode,mObj.mNode,True,False)
CORERIG.colorControl(ml_rigJoints[-1].mNode,_side,'main',directProxy=True)
if mBlock.neckBuild:#...Neck =====================================================================
log.debug("|{0}| >> neckBuild...".format(_str_func))
def build_proxyMesh(self, forceNew = True, puppetMeshMode = False):
"""
Build our proxyMesh
"""
_str_func = 'build_proxyMesh'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self
mModule = self.moduleTarget
mRigNull = mModule.rigNull
mDeformNull = mModule.deformNull
#mSettings = mRigNull.settings
mPuppet = self.atUtils('get_puppet')
mMaster = mPuppet.masterControl
mPuppetSettings = mMaster.controlSettings
str_partName = mModule.get_partNameBase()
mPrerigNull = mBlock.prerigNull
_side = BLOCKUTILS.get_side(self)
ml_rigJoints = mRigNull.msgList_get('rigJoints',asMeta = True)
if not ml_rigJoints:
raise ValueError,"No rigJoints connected"
self.v_baseSize = [mBlock.blockScale * v for v in mBlock.baseSize]
#>> If proxyMesh there, delete ---------------------------------------------------------------------------
if puppetMeshMode:
_bfr = mRigNull.msgList_get('puppetProxyMesh',asMeta=True)
if _bfr:
log.debug("|{0}| >> puppetProxyMesh detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
else:
_bfr = mRigNull.msgList_get('proxyMesh',asMeta=True)
if _bfr:
log.debug("|{0}| >> proxyMesh detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
ml_proxy = []
ml_curves = []
ml_rigJoints = mRigNull.msgList_get('rigJoints')
ml_use = []
for mObj in ml_rigJoints:
if 'teeth' in mObj.mNode:
pass
elif 'tongue' in mObj.mNode:
pass
else:
ml_use.append(mObj)
ml_rigJoints = ml_use
ml_new = []
mTeethUpr = mPrerigNull.getMessageAsMeta('teethUprJoint')
mTeethLwr = mPrerigNull.getMessageAsMeta('teethLwrJoint')
mTongue = mPrerigNull.getMessageAsMeta('tongueJoint')
if mTeethUpr:
log.debug("|{0}| >> mTeethUpr".format(_str_func)+'-'*40)
#Let's gather our proxy mesh
for lnk in ['jaw','nose','uprLip','lwrLip','overLip','underLip','noseToCheekLeft','noseToCheekRight',
'lipToChin','uprJoinLeft','uprJoinRight']:
mBase = mBlock.getMessageAsMeta(lnk+'FormLoft')
if mBase:
log.debug("|{0}| >> On: {1}".format(_str_func,lnk)+'-'*40)
mLoftSurface = mBase.doDuplicate(po=False,ic=False)
_surf = mc.nurbsToPoly(mLoftSurface.mNode, mnd=1, f=0,
pt = 1,ch=0, pc=200, chr = .9,
ft=.01, mel = .001, d = .1, ut=1, un = 3,
vt=1, vn=3, uch = 0, cht = .01, ntr = 0, mrt = 0, uss = 1 )
#mLoftSurface.p_parent=False
mLoftSurface.delete()
mNew = cgmMeta.asMeta(_surf[0])
ml_new.append(mNew)
mNew.p_parent = False
mNew.doStore('cgmName',lnk)
mNew.doName()
ml_use = copy.copy(ml_rigJoints)
ml_remove = []
if lnk in ['uprLip','overLip']:
for mObj in ml_use:
#if 'LWR_lip' in mObj.mNode:
if 'lwrLip' in mObj.mNode:
log.debug("|{0}| >> removing: {1}".format(_str_func,mObj))
ml_remove.append(mObj)
#if mObj.getMayaAttr('cgmPosition') == 'lwr' and mObj.cgmName == 'lip':
# log.debug("|{0}| >> removing: {1}".format(_str_func,mObj))
# ml_remove.append(mObj)
if lnk in ['lwrLip','underLip']:
for mObj in ml_use:
if 'uprLip' in mObj.mNode:
log.debug("|{0}| >> removing: {1}".format(_str_func,mObj))
ml_remove.append(mObj)
#if 'UPR_lip' in mObj.mNode:
#if mObj.getMayaAttr('cgmPosition') == 'upr' and mObj.cgmName == 'lip':
#ml_remove.append(mObj)
#log.debug("|{0}| >> removing: {1}".format(_str_func,mObj))
for mObj in ml_remove:
ml_use.remove(mObj)
log.debug("|{0}| >> Skinning surface: {1}".format(_str_func,mNew))
mSkinCluster = cgmMeta.validateObjArg(mc.skinCluster ([mObj.mNode for mObj in ml_use],
mNew.mNode,
tsb=True,
maximumInfluences = 3,
heatmapFalloff = 1.0,
bindMethod = 0,
normalizeWeights = 1,dropoffRate=10.0),
'cgmNode',
setClass=True)
mSkinCluster.doStore('cgmName', mNew)
mSkinCluster.doName()
ml_proxy.append(mNew)
for mProxy in ml_proxy:
CORERIG.colorControl(mProxy.mNode,_side,'main',transparent=False,proxy=True)
#mc.makeIdentity(mProxy.mNode, apply = True, t=1, r=1,s=1,n=0,pn=1)
#Vis connect -----------------------------------------------------------------------
mProxy.overrideEnabled = 1
ATTR.connect("{0}.proxyVis".format(mPuppetSettings.mNode),"{0}.visibility".format(mProxy.mNode) )
ATTR.connect("{0}.proxyLock".format(mPuppetSettings.mNode),"{0}.overrideDisplayType".format(mProxy.mNode) )
for mShape in mProxy.getShapes(asMeta=1):
str_shape = mShape.mNode
mShape.overrideEnabled = 0
#ATTR.connect("{0}.proxyVis".format(mPuppetSettings.mNode),"{0}.visibility".format(str_shape) )
ATTR.connect("{0}.proxyLock".format(mPuppetSettings.mNode),"{0}.overrideDisplayTypes".format(str_shape) )
#if directProxy:
# for mObj in ml_rigJoints:
# for mShape in mObj.getShapes(asMeta=True):
#mShape.overrideEnabled = 0
# mShape.overrideDisplayType = 0
# ATTR.connect("{0}.visDirect".format(_settings), "{0}.overrideVisibility".format(mShape.mNode))
mRigNull.msgList_connect('proxyMesh', ml_proxy + ml_curves)
def build_proxyMeshBAK(self, forceNew = True, puppetMeshMode = False):
"""
Build our proxyMesh
"""
_short = self.d_block['shortName']
_str_func = 'build_proxyMesh'
log.debug("|{0}| >> ".format(_str_func)+ '-'*80)
log.debug("{0}".format(self))
mBlock = self.mBlock
mRigNull = self.mRigNull
m#Settings = mRigNull.settings
mPuppetSettings = self.d_module['mMasterControl'].controlSettings
mPrerigNull = mBlock.prerigNull
#directProxy = mBlock.proxyDirect
_side = BLOCKUTILS.get_side(self.mBlock)
ml_rigJoints = mRigNull.msgList_get('rigJoints',asMeta = True)
if not ml_rigJoints:
raise ValueError,"No rigJoints connected"
self.v_baseSize = [mBlock.blockScale * v for v in mBlock.baseSize]
#>> If proxyMesh there, delete ---------------------------------------------------------------------------
if puppetMeshMode:
_bfr = mRigNull.msgList_get('puppetProxyMesh',asMeta=True)
if _bfr:
log.debug("|{0}| >> puppetProxyMesh detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
else:
_bfr = mRigNull.msgList_get('proxyMesh',asMeta=True)
if _bfr:
log.debug("|{0}| >> proxyMesh detected...".format(_str_func))
if forceNew:
log.debug("|{0}| >> force new...".format(_str_func))
mc.delete([mObj.mNode for mObj in _bfr])
else:
return _bfr
ml_proxy = []
ml_curves = []
#Jaw -------------
mJaw = mRigNull.getMessageAsMeta('controlJaw')
if mJaw:
log.debug("|{0}| >> jaw...".format(_str_func))
mLoftSurface = mBlock.jawFormLoft.doDuplicate(po=False,ic=False)
#nurbsToPoly -mnd 1 -ch 1 -f 1 -pt 1 -pc 200 -chr 0.9 -ft 0.01 -mel 0.001 -d 0.1 -ut 1 -un 3 -vt 1 -vn 3 -uch 0 -ucr 0 -cht 0.01 -es 0 -ntr 0 -mrt 0 -uss 1 "jaw_fk_anim_Transform";
_surf = mc.nurbsToPoly(mLoftSurface.mNode, mnd=1, f=1, pt = 1,ch=0, pc=200, chr = .9, ft=.01, mel = .001, d = .1, ut=1, un = 3, vt=1, vn=3, uch = 0, cht = .01, ntr = 0, mrt = 0, uss = 1 )
mDag = mJaw.doCreateAt()
CORERIG.shapeParent_in_place(mDag.mNode,_surf,False)
ml_proxy.append(mDag)
#mLoftSurface.p_parent = False
mDag.p_parent = mJaw
ml_drivers = mRigNull.msgList_get('driverJoints')
for mObj in ml_drivers:
if mObj.getMayaAttr('cgmName')=='noseBase':
log.debug("|{0}| >> noseBase...".format(_str_func))
mLoftSurface = mBlock.noseFormLoft.doDuplicate(po=False,ic=False)
_surf = mc.nurbsToPoly(mLoftSurface.mNode, mnd=1, f=1, pt = 1,ch=0, pc=200, chr = .9, ft=.01, mel = .001, d = .1, ut=1, un = 3, vt=1, vn=3, uch = 0, cht = .01, ntr = 0, mrt = 0, uss = 1 )
mDag = mObj.doCreateAt()
CORERIG.shapeParent_in_place(mDag.mNode,_surf,False)
ml_proxy.append(mDag)
#mLoftSurface.p_parent = False
mDag.p_parent = mObj
for mProxy in ml_proxy:
CORERIG.colorControl(mProxy.mNode,_side,'main',transparent=False,proxy=True)
mc.makeIdentity(mProxy.mNode, apply = True, t=1, r=1,s=1,n=0,pn=1)
#Vis connect -----------------------------------------------------------------------
mProxy.overrideEnabled = 1
ATTR.connect("{0}.proxyVis".format(mPuppetSettings.mNode),"{0}.visibility".format(mProxy.mNode) )
ATTR.connect("{0}.proxyLock".format(mPuppetSettings.mNode),"{0}.overrideDisplayType".format(mProxy.mNode) )
for mShape in mProxy.getShapes(asMeta=1):
str_shape = mShape.mNode
mShape.overrideEnabled = 0
#ATTR.connect("{0}.proxyVis".format(mPuppetSettings.mNode),"{0}.visibility".format(str_shape) )
ATTR.connect("{0}.proxyLock".format(mPuppetSettings.mNode),"{0}.overrideDisplayTypes".format(str_shape) )
#if directProxy:
# for mObj in ml_rigJoints:
# for mShape in mObj.getShapes(asMeta=True):
#mShape.overrideEnabled = 0
# mShape.overrideDisplayType = 0
# ATTR.connect("{0}.visDirect".format(_settings), "{0}.overrideVisibility".format(mShape.mNode))
mRigNull.msgList_connect('proxyMesh', ml_proxy + ml_curves)
#UI ================================================================================================
def uiFunc_getDefineScaleSpace(self):
ml_handles = self.msgList_get('defineHandles')
for mObj in ml_handles:
if 'Left' in mObj.handleTag:
ml_handles.remove(mObj)
self.atUtils('get_handleScaleSpace',ml_handles)
_handleKey = {'define':'defineSubHandles',
'form':'formHandles',
'prerig':'prerigHandles'}
def uiFunc_sealFix(self, ml = None, reset = False):
if not ml:
ml = cgmMeta.asMeta(mc.ls(sl=1))
if not ml:
log.warning("Nothing Selected")
return False
md = {}
for mObj in ml:
mSeal = mObj.getMessageAsMeta('mTrackSeal')
mBase = mObj.getMessageAsMeta('mTrackBase')
if not mSeal:
log.warning("Lacks seal: {0}".format(mObj))
continue
if reset:
mSeal.p_position = mBase.p_position
mSeal.p_orient = mBase.p_orient
mObj.resetAttrs()
continue
#if reset:
# pass
#else:
#TRANS.relativePos_get(mBase.mNode,mObj.mNode)
md[mObj] = {'pos':mObj.p_position,
'orient':mObj.p_orient,
'mSeal':mSeal}
mObj.resetAttrs()
if reset:
log.warning("LipSeal Reset.")
return
for mObj in ml:
if not md.get(mObj):
continue
d = md[mObj]
mSeal = d['mSeal']
mSeal.p_position = d['pos']
mSeal.p_orient = d['orient']
#LOC.create(position = d['pos'],name="{0}_sealLoc".format(mObj.p_nameShort))
pprint.pprint(md)
log.warning("LipSeal Set.")
def uiFunc_snapStateHandles(self,ml=None):
if not ml:
ml = cgmMeta.asMeta(mc.ls(sl=1))
if not ml:
log.warning("Nothing Selected")
return False
_state = self.p_blockState
ml_handles = self.msgList_get(_handleKey.get(_state))
for mObj in ml_handles:
try:mObj.p_position = DIST.get_closest_point(mObj.mNode, ml[0].mNode)[0]
except Exception,err:
log.warning("Failed to snap: {0} | {1}".format(mObj.mNode,err))
def uiBuilderMenu(self,parent = None):
#uiMenu = mc.menuItem( parent = parent, l='Head:', subMenu=True)
_short = self.p_nameShort
mc.menuItem(en=False,divider=True,
label = "|| Muzzle")
mc.menuItem(ann = '[{0}] Get Define scale space values'.format(_short),
c = cgmGEN.Callback(uiFunc_getDefineScaleSpace,self),
label = "Get Define Scale Space Dat")
mc.menuItem(ann = '[{0}] Snap state handles'.format(_short),
c = cgmGEN.Callback(uiFunc_snapStateHandles,self),
label = "Snap the state handles to selected")
_sub = mc.menuItem(en=True,subMenu = True,tearOff=True,
label = "Seal Fix")
mc.menuItem(ann = '[{0}] Seal Fix Reset'.format(_short),
c = cgmGEN.Callback(uiFunc_sealFix,self,reset=False),
label = "Set")
mc.menuItem(ann = '[{0}] Seal Fix Reset'.format(_short),
c = cgmGEN.Callback(uiFunc_sealFix,self,reset=True),
label = "Reset")
"""
mc.menuItem(en=True,divider = True,
label = "Utilities")
_sub = mc.menuItem(en=True,subMenu = True,tearOff=True,
label = "State Picker")
"""
#self.atUtils('uiStatePickerMenu',parent)
return
| [
"[email protected]"
] | |
0b7c562f6f37bd3f506e5bbdc00055ef7da3bb3b | ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca | /phase4/260.py | 651673aea432957ccfbb450b4c912f91b85e1222 | [] | no_license | GavinPHR/code | 8a319e1223a307e755211b7e9b34c5abb00b556b | b1d8d49633db362bbab246c0cd4bd28305964b57 | refs/heads/master | 2020-05-16T04:09:19.026207 | 2020-04-30T10:00:06 | 2020-04-30T10:00:06 | 182,766,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # Single Number III
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
s = set()
for n in nums:
if n in s:
s.remove(n)
else:
s.add(n)
return list(s) | [
"[email protected]"
] | |
b7bc73e1f99352427bf7b271d7a94b320b253ffb | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /executale_binaries/register-variants/vmovapd_ymm_ymm.gen.vex.py | 2c67bcd6beddc36d418754a140f4a9d4de0e869a | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import angr
proj = angr.Project('vmovapd_ymm_ymm.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"[email protected]"
] | |
4bde09ff5b4dcd8af235f043ea5b05674c5e581d | 35b6013c1943f37d1428afd2663c8aba0a02628d | /functions/v2/log/helloworld/main_test.py | 4a2633ea2fdedff862401585737661a6830f6308 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 715 | py | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import main
def test_hello_world(capsys):
main.hello_world(None)
out, _ = capsys.readouterr()
assert "Hello, stdout!" in out
| [
"[email protected]"
] | |
b3ed0d89e201a0a78da0131558224b973b5b5960 | 80b2700b6f9940ee672f42124b2cb8a81836426e | /exception/test1.py | a4c837a4d50be53be15bca32d9979f2a5b1b85fd | [
"Apache-2.0"
] | permissive | Vayne-Lover/Python | 6c1ac5c0d62ecdf9e3cf68d3e659d49907bb29d4 | 79cfe3d6971a7901d420ba5a7f52bf4c68f6a1c1 | refs/heads/master | 2020-04-12T08:46:13.128989 | 2017-04-21T06:36:40 | 2017-04-21T06:36:40 | 63,305,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | #!/usr/local/bin/python
#class MuffledCalculator:
# muffled=False
# def calc(self,expr):
# try:
# return eval(expr)
# except (ZeroDivisionError,TypeError):
# if self.muffled:
# print "There are errors."
# else:
# raise
#a=MuffledCalculator()
#print a.calc('2/1')
##print a.calc('2/"dsf"')
##print a.calc('1/0')
#a.muffled=True
#print a.calc('1/0')
#class Test:
# def init(self):
# try:
# x=1
# y='sg'
# print x/y
# except (ZeroDivisionError,TypeError),e:
# print e
#a=Test()
#a.init()
#class Test1:
# def init(self):
# try:
# x=1
# y='sg'
# print x/y
# except Exception,e:
# print e
#a=Test1()
#a.init()
#try:
# print 'Go!'
#except Exception,e:
# print e
#else:
# print 'Planned.'
x=1
try:
x=2
print x
x=1/0
except Exception,e:
x=3
print x
print e
finally:
x=4
print x
| [
"[email protected]"
] | |
11ad2db84b11bef1ebb51299256895b7dd1552f4 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/periodicities/Semi_Month/Cycle_Semi_Month_400_SM_15.py | 1477a3a264d5e12e25a8d1f21b4d5ceb61a8a02c | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 83 | py | import tests.periodicities.period_test as per
per.buildModel((15 , 'SM' , 400));
| [
"[email protected]"
] | |
af969107fa1b60317809a206e8afae54a0ac999b | 25e989e986522cf91365a6cc51e3c68b3d29351b | /app/http/controllers/TeamController.py | 1f26e8f10cb79cc093d37b55e0170ed7b77e05c1 | [
"MIT"
] | permissive | josephmancuso/gbaleague-masonite2 | ff7a3865927705649deea07f68d89829b2132d31 | b3dd5ec3f20c07eaabcc3129b0c50379a946a82b | refs/heads/master | 2022-05-06T10:47:21.809432 | 2019-03-31T22:01:04 | 2019-03-31T22:01:04 | 136,680,885 | 0 | 1 | MIT | 2022-03-21T22:16:43 | 2018-06-09T01:33:01 | Python | UTF-8 | Python | false | false | 898 | py | ''' A Module Description '''
from app.Team import Team
from masonite import Upload
class TeamController:
''' Class Docstring Description '''
def show(self):
if request().has('back'):
request().session.flash('back', request().input('back'))
return view('create/team')
def store(self, upload: Upload):
try:
logo = request().input('logo').filename
except AttributeError:
logo = ''
create_team = Team.create(
name=request().input('name'),
owner_id=auth().id,
picture=logo
)
# upload logo
if logo:
upload.store(request().input('logo'))
if create_team:
return request().back(default='create/team?message=Created Successfully')
return request().redirect('/create/team?message=Could Not Create Team')
| [
"[email protected]"
] | |
188bb4f99769e74fbcea03d37051f0bf96544b9b | 51885da54b320351bfea42c7dd629f41985454cd | /abc181/d.py | 9a094d524253759e71cbe9da0075af9308c3e941 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | #
# abc181 d
#
import sys
from io import StringIO
import unittest
from collections import Counter
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1234"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """1333"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """8"""
output = """Yes"""
self.assertIO(input, output)
def resolve():
n = input()
if len(n) <= 2:
if int(n) % 8 == 0 or int(n[::-1]) % 8 == 0:
print("Yes")
else:
print("No")
exit()
cnt = Counter(n)
for i in range(112, 1000, 8):
tmp = Counter(str(i)) - cnt
print(tmp)
if not tmp:
print("Yes")
break
if __name__ == "__main__":
unittest.main()
# resolve()
| [
"[email protected]"
] | |
cbeb14309190629a8358f2fec79c0b07079b6bd8 | 1f620140538728b25fd0181e493975534aa0e1fb | /project/basis/test/test_admin.py | b83678eb911cbb41cdc1dfa79cf4df613d45e9b4 | [] | no_license | YukiUmetsu/recipe-app-api-python-django | 2a22f63871489cd073d5c312e20fd9fe49eee5a5 | abaf4a0826e840e990781b20aaa5d7f0577c54c5 | refs/heads/master | 2022-11-30T03:11:16.129881 | 2020-03-03T20:04:00 | 2020-03-03T20:04:00 | 244,045,701 | 0 | 0 | null | 2022-11-22T05:21:23 | 2020-02-29T21:41:25 | Python | UTF-8 | Python | false | false | 1,344 | py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
"""set up admin user (logged in) and normal user"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password1234'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password1234',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:basis_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:basis_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:basis_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
"[email protected]"
] | |
da9bbbbba39f5dc6085d921ab3cf3fe4c283bf0e | f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b | /controller/src/xmpp/test/SConscript | 8358c3272343acd70e14c431f4e7437de65829ec | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pan2za/ctrl | 8f808fb4da117fce346ff3d54f80b4e3d6b86b52 | 1d49df03ec4577b014b7d7ef2557d76e795f6a1c | refs/heads/master | 2021-01-22T23:16:48.002959 | 2015-06-17T06:13:36 | 2015-06-17T06:13:36 | 37,454,161 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,075 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
# -*- mode: python; -*-
Import('BuildEnv')
import sys
env = BuildEnv.Clone()
env.Append(LIBPATH = ['#/' + Dir('..').path,
'../../base',
'../../base/test',
'../../bgp',
'../../control-node',
'../../db',
'../../ifmap',
'../../io',
'../../xml',
'../../schema',
'.'
])
libxmpptest = env.Library('xmpptest',
['xmpp_sample_peer.cc'])
env.Prepend(LIBS = ['task_test', 'gunit', 'xmpp', 'xml', 'pugixml', 'sandesh',
'http', 'http_parser', 'curl', 'process_info',
'io', 'ssl', 'sandeshvns', 'base', 'peer_sandesh',
'boost_regex', 'xmpptest', 'control_node'])
if sys.platform != 'darwin':
env.Append(LIBS = ['rt'])
xmpp_server_test = env.UnitTest('xmpp_server_test', ['xmpp_server_test.cc'])
env.Alias('controller/xmpp:xmpp_server_test', xmpp_server_test)
xmpp_regex_test = env.UnitTest('xmpp_regex_test', ['xmpp_regex_test.cc'])
env.Alias('controller/xmpp:xmpp_regex_test', xmpp_regex_test)
xmpp_pubsub_test = env.UnitTest('xmpp_pubsub_test', ['xmpp_pubsub_test.cc'])
env.Alias('controller/xmpp:xmpp_pubsub_test', xmpp_pubsub_test)
xmpp_pubsub_client = env.UnitTest('xmpp_pubsub_client', ['xmpp_pubsub_client.cc'])
env.Alias('controller/xmpp:xmpp_pubsub_client', xmpp_pubsub_client)
xmpp_session_test = env.UnitTest('xmpp_session_test', ['xmpp_session_test.cc'])
env.Alias('controller/xmpp:xmpp_session_test', xmpp_session_test)
xmpp_client_standalone_test = env.UnitTest('xmpp_client_standalone_test',
['xmpp_client_standalone.cc'])
env.Alias('controller/xmpp:xmpp_client_standalone_test', xmpp_client_standalone_test)
xmpp_server_standalone_test = env.UnitTest('xmpp_server_standalone_test',
['xmpp_server_standalone.cc'])
env.Alias('controller/xmpp:xmpp_server_standalone_test', xmpp_server_standalone_test)
xmpp_server_sm_test = env.UnitTest('xmpp_server_sm_test',['xmpp_server_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_server_sm_test', xmpp_server_sm_test)
xmpp_server_auth_sm_test = env.UnitTest('xmpp_server_auth_sm_test',
['xmpp_server_auth_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_server_auth_sm_test', xmpp_server_auth_sm_test)
xmpp_client_sm_test = env.UnitTest('xmpp_client_sm_test', ['xmpp_client_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_client_sm_test', xmpp_client_sm_test)
xmpp_client_auth_sm_test = env.UnitTest('xmpp_client_auth_sm_test',
['xmpp_client_auth_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_client_auth_sm_test', xmpp_client_auth_sm_test)
xmpp_stream_message_client_test = env.UnitTest('xmpp_stream_message_client_test',
['xmpp_stream_message_client.cc'])
env.Alias('controller/xmpp:xmpp_stream_message_client_test', xmpp_stream_message_client_test)
xmpp_stream_message_server_test = env.UnitTest('xmpp_stream_message_server_test',
['xmpp_stream_message_server.cc'])
env.Alias('controller/xmpp:xmpp_stream_message_server_test', xmpp_stream_message_server_test)
test_suite = [
xmpp_client_sm_test,
xmpp_pubsub_test,
xmpp_regex_test,
xmpp_server_sm_test,
xmpp_server_test,
xmpp_session_test,
xmpp_server_auth_sm_test,
xmpp_client_auth_sm_test
]
flaky_test_suite = [
xmpp_stream_message_client_test,
xmpp_stream_message_server_test,
]
test = env.TestSuite('xmpp-test', test_suite)
env.Alias('controller/src/xmpp:test', test)
flaky_test = env.TestSuite('xmpp-flaky-test', flaky_test_suite)
env.Alias('controller/src/xmpp:flaky-test', flaky_test)
env.Alias('controller/src/xmpp:all-test', [test, flaky_test])
Return('test_suite')
| [
"[email protected]"
] | ||
c7ff84276f965396f8ecbc2d09c2957ac60f3715 | 832cc2dd023bcb338cdb4da4a914b077448f6adb | /src/nrega/models.py | 2e084ecd64e3c961d122d43c265b7c2563b8bb68 | [] | no_license | rajesh241/libtechDjango | 2a233edbd1f715ac2ee22ce6ceb027cc4258eac3 | 498d31fd5f069fd467c9b0a83ad2a1ffd59c50e5 | refs/heads/master | 2022-12-08T06:34:08.059624 | 2019-07-19T05:52:46 | 2019-07-19T05:52:46 | 154,110,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,611 | py | from django.db import models
from django.conf import settings
# Create your models here.
from django.db.models.signals import pre_save,post_save
from django.utils.text import slugify
from django.core.serializers import serialize
import time
import datetime
import os
import json
telanganaStateCode='36'
#File Uploads
class LibtechTag(models.Model):
'''
This Model is for generic Tags, we can define Tags, for example Survey2019 and tag any object, panchayat, jobcard etc with the tag. Then the objects can be retrieved by using the tag
'''
name=models.CharField(max_length=256,default='NONE')
description=models.CharField(max_length=256,blank=True,null=True)
slug=models.SlugField(blank=True)
class Meta:
db_table = 'libtechTag'
def __str__(self):
return self.name
def serialize(self):
data={
"id" : self.id,
"name": self.name,
"description": self.description
}
json_data = json.dumps(data)
return json_data
class PDSLocation(models.Model):
name=models.CharField(max_length=256)
displayName=models.CharField(max_length=2048)
locationType=models.CharField(max_length=64)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=255,unique=True,db_index=True)
parentLocation=models.ForeignKey('self',on_delete=models.SET_NULL,blank=True,null=True)
slug=models.SlugField(blank=True)
stateCode=models.CharField(max_length=2,null=True,blank=True)
districtCode=models.CharField(max_length=4,null=True,blank=True)
blockCode=models.CharField(max_length=7,null=True,blank=True)
panchayatCode=models.CharField(max_length=10,null=True,blank=True)
filepath=models.CharField(max_length=2048,null=True,blank=True)
remarks=models.TextField(blank=True,null=True)
priority=models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'pdslocation'
def __str__(self):
return self.code
class PDSStat(models.Model):
pdsLocation=models.ForeignKey('PDSLocation',on_delete=models.CASCADE,null=True,blank=True)
downloadDate=models.DateTimeField(null=True,blank=True)
class Meta:
db_table = 'pdsstat'
def __str__(self):
return self.pdsLocation.name
class Location(models.Model):
name=models.CharField(max_length=256)
displayName=models.CharField(max_length=2048)
locationType=models.CharField(max_length=64)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=20,unique=True,db_index=True)
parentLocation=models.ForeignKey('self',on_delete=models.SET_NULL,blank=True,null=True)
slug=models.SlugField(blank=True)
crawlIP=models.CharField(max_length=256,null=True,blank=True)
stateShortCode=models.CharField(max_length=2,null=True,blank=True)
stateCode=models.CharField(max_length=2,null=True,blank=True)
districtCode=models.CharField(max_length=4,null=True,blank=True)
blockCode=models.CharField(max_length=7,null=True,blank=True)
panchayatCode=models.CharField(max_length=10,null=True,blank=True)
filepath=models.CharField(max_length=2048,null=True,blank=True)
isNIC=models.BooleanField(default=True)
remarks=models.TextField(blank=True,null=True)
priority=models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'location'
def __str__(self):
return self.code
class Info(models.Model):
name=models.CharField(max_length=256)
slug=models.SlugField(blank=True)
location=models.ForeignKey('Location',on_delete=models.CASCADE)
finyear=models.CharField(max_length=2,null=True,blank=True)
value=models.DecimalField(max_digits=16,decimal_places=4,null=True,blank=True)
textValue=models.CharField(max_length=2048,null=True,blank=True)
class Meta:
db_table = 'info'
def __str__(self):
if self.finyear is not None:
return "%s-%s-%s-%s" % (self.name,self.location,self.finyear,str(self.value))
else:
return "%s-%s-%s" % (self.name,self.location,str(self.value))
class State(models.Model):
'''
This is Model for the States. States are identified with unique code, which is based on code on Nrega Website. Additionally each NIC Nrega state has a different server, which is identified with crawlIP.
isNIC field is true for State nrega websites which are hosted on NREGA
'''
name=models.CharField(max_length=256)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=2,unique=True,db_index=True)
slug=models.SlugField(blank=True)
crawlIP=models.CharField(max_length=256,null=True,blank=True)
stateShortCode=models.CharField(max_length=2)
isNIC=models.BooleanField(default=True)
class Meta:
db_table = 'state'
def __str__(self):
return self.name
class District(models.Model):
state=models.ForeignKey('state',on_delete=models.CASCADE)
name=models.CharField(max_length=256)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=4,db_index=True,unique=True)
slug=models.SlugField(blank=True)
tcode=models.CharField(max_length=8,blank=True,null=True)
isEnumerated=models.BooleanField(default=False)
class Meta:
db_table = 'district'
def __str__(self):
return self.name
class Block(models.Model):
district=models.ForeignKey('district',on_delete=models.CASCADE)
name=models.CharField(max_length=256)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=7,db_index=True,unique=True)
libtechTag=models.ManyToManyField('LibtechTag',related_name="blockTag",blank=True)
slug=models.SlugField(blank=True)
nicStatURL=models.URLField(max_length=2048,blank=True,null=True)
tcode=models.CharField(max_length=7,unique=True,null=True,blank=True)
class Meta:
db_table = 'block'
def __str__(self):
return self.name
class Panchayat(models.Model):
block=models.ForeignKey('block',on_delete=models.CASCADE)
name=models.CharField(max_length=256)
nameInLocalLanguage=models.BooleanField(default=False)
englishName=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=10,db_index=True,unique=True)
slug=models.SlugField(blank=True)
tcode=models.CharField(max_length=10,blank=True,null=True)
libtechTag=models.ManyToManyField('LibtechTag',related_name="panchayatTag",blank=True)
nicStatURL=models.URLField(max_length=2048,blank=True,null=True)
remarks=models.CharField(max_length=256,blank=True,null=True)
lastCrawlDate=models.DateTimeField(null=True,blank=True)
lastCrawlDuration=models.IntegerField(blank=True,null=True) #This is Duration that last Crawl took in Minutes
accuracyIndex=models.IntegerField(blank=True,null=True) #This is Accuracy Index of Last Financial Year
accuracyIndexAverage=models.IntegerField(blank=True,null=True)
isDataAccurate=models.BooleanField(default=False)
class Meta:
db_table = 'panchayat'
def __str__(self):
return "%s-%s-%s-%s-%s" % (self.code,self.block.district.state.name,self.block.district.name,self.block.name,self.name)
class Village(models.Model):
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,null=True,blank=True)
name=models.CharField(max_length=256,null=True,blank=True)
code=models.CharField(max_length=256,db_index=True,null=True,blank=True) #Field only for compatibility with otherlocations not used for TElangana
slug=models.SlugField(blank=True)
tcode=models.CharField(max_length=12,null=True,blank=True)
class Meta:
db_table = 'village'
def __str__(self):
return self.name
class MISReportURL(models.Model):
state=models.ForeignKey('state',on_delete=models.CASCADE)
finyear=models.CharField(max_length=2)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
demandRegisterURL=models.URLField(max_length=2048,blank=True,null=True)
delayPaymentURL=models.URLField(max_length=2048,blank=True,null=True)
ftoURL=models.URLField(max_length=2048,blank=True,null=True)
class Meta:
db_table = 'misReportURL'
def __str__(self):
return "%s-%s" % (self.state.slug,self.finyear)
class PanchayatCrawlInfo(models.Model):
panchayat=models.ForeignKey('panchayat',on_delete=models.CASCADE)
code=models.CharField(max_length=256,db_index=True,null=True,blank=True)
accuracy=models.PositiveSmallIntegerField(default=0)
instanceJSONURL=models.URLField(max_length=2048,blank=True,null=True)
workDetailJSONURL=models.URLField(max_length=2048,blank=True,null=True)
dataDownloadDate=models.DateTimeField(null=True,blank=True)
missingJSONURL=models.URLField(max_length=2048,blank=True,null=True)
crawlDuration=models.IntegerField(blank=True,null=True,default=0)
class Meta:
db_table = 'panchayatCrawlInfo'
def __str__(self):
return "%s-%s-%s-%s" % (self.panchayat.block.district.state.name,self.panchayat.block.district.name,self.panchayat.block.name,self.panchayat.name)
#Models for Reports
#The below Model is used to uploading generic reports like on the fly zip of existing reports etc etc
class GenericReport(models.Model):
name=models.CharField(max_length=256,default='NONE',null=True,blank=True)
description=models.CharField(max_length=256,blank=True,null=True)
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,null=True,blank=True)
libtechTag=models.ForeignKey('LibtechTag',on_delete=models.CASCADE,null=True,blank=True)
updateDate=models.DateTimeField(auto_now=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
class Meta:
db_table = 'genericReport'
def __str__(self):
if (self.panchayat is not None) and (self.libtechTag is not None):
return "%s-%s" % (self.panchayat.name,self.libtechTag.name)
else:
return str(self.id)
#The below Model is used to store block and panchayat level reports
class Report(models.Model):
location=models.ForeignKey('Location',on_delete=models.CASCADE,null=True,blank=True)
district=models.ForeignKey('District',on_delete=models.CASCADE,null=True,blank=True)
block=models.ForeignKey('Block',on_delete=models.CASCADE,null=True,blank=True)
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,null=True,blank=True)
reportType=models.CharField(max_length=256)
reportURL=models.URLField(max_length=2048,blank=True,null=True)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
finyear=models.CharField(max_length=2,blank=True,null=True)
created=models.DateTimeField(auto_now_add=True)
modified=models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('location','district','block', 'panchayat','reportType','finyear')
db_table = 'report'
def __str__(self):
if self.location is not None:
return self.location.code+"_"+self.location.name+"-"+self.reportType
elif self.district is not None:
return self.district.name+"-"+self.reportType
elif self.block is not None:
return self.block.name+"-"+self.reportType
elif self.panchayat is not None:
return self.panchayat.name+"-"+self.reportType
else:
return reportType
class CrawlState(models.Model):
name=models.CharField(max_length=256)
sequence=models.PositiveSmallIntegerField(default=0)
minhour=models.PositiveSmallIntegerField(default=6)
maxhour=models.PositiveSmallIntegerField(default=21)
runChildLevel=models.BooleanField(default=True)
isBlockLevel=models.BooleanField(default=False)
isDistrictLevel=models.BooleanField(default=False)
needFullBlockData=models.BooleanField(default=False)
iterateFinYear=models.BooleanField(default=True)
class Meta:
db_table = 'crawlState'
def __str__(self):
return self.name
class CrawlRequest(models.Model):
libtechTag=models.ManyToManyField('LibtechTag',related_name="crawlReqeustTag",blank=True)
location=models.ForeignKey('Location',on_delete=models.CASCADE,null=True,blank=True)
panchayat=models.ForeignKey('panchayat',on_delete=models.CASCADE,null=True,blank=True)
block=models.ForeignKey('block',on_delete=models.CASCADE,null=True,blank=True)
district=models.ForeignKey('district',on_delete=models.CASCADE,null=True,blank=True)
crawlState=models.ForeignKey('CrawlState',on_delete=models.SET_NULL,null=True,blank=True)
source=models.CharField(max_length=256,default="test")
sequenceType=models.CharField(max_length=256,default="default")
processName=models.CharField(max_length=256,blank=True,null=True)
priority=models.PositiveSmallIntegerField(default=0)
startFinYear=models.CharField(max_length=2,default='18')
endFinYear=models.CharField(max_length=2,blank=True,null=True)
progress=models.PositiveSmallIntegerField(default=0)
attemptCount=models.PositiveSmallIntegerField(default=0)
stepError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
crawlStartDate=models.DateTimeField(null=True,blank=True)
crawlAttemptDate=models.DateTimeField(null=True,blank=True)
isComplete=models.BooleanField(default=False)
inProgress=models.BooleanField(default=False)
isError=models.BooleanField(default=False)
error=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
remarks=models.TextField(blank=True,null=True)
class Meta:
db_table = 'crawlRequest'
def __str__(self):
if self.location is not None:
return "%s-%s" % (self.location.code,self.location.displayName)
elif self.panchayat is not None:
return "%s-%s-%s-%s-%s" % (self.panchayat.code,self.panchayat.block.district.state.name,self.panchayat.block.district.name,self.panchayat.block.name,self.panchayat.name)
elif self.block is not None:
return "%s-%s-%s-%s" % (self.block.code,self.block.district.state.name,self.block.district.name,self.block.name)
elif self.district is not None:
return "%s-%s-%s" % (self.district.code,self.district.state.name,self.district.name)
else:
return self.id
class CrawlQueue(models.Model):
CRAWL_MODE_OPTIONS = (
('FULL', 'FULL'),
('PARTIAL', 'PARTIAL'),
('ONLYSTATS','ONLYSTATS')
)
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,default=1)
libtechTag=models.ManyToManyField('LibtechTag',related_name="crawlTag",blank=True)
panchayat=models.ForeignKey('panchayat',on_delete=models.CASCADE,null=True,blank=True)
block=models.ForeignKey('block',on_delete=models.CASCADE,null=True,blank=True)
musterDownloadAccuracy=models.PositiveSmallIntegerField(null=True,blank=True)
accuracy=models.PositiveSmallIntegerField(default=0)
crawlMode=models.CharField(max_length=32,choices=CRAWL_MODE_OPTIONS,default='FULL')
downloadStage=models.CharField(max_length=256,default="A1_init")
source=models.CharField(max_length=256,default="test")
processName=models.CharField(max_length=256,blank=True,null=True)
instanceJSONURL=models.URLField(max_length=2048,blank=True,null=True)
workDetailJSONURL=models.URLField(max_length=2048,blank=True,null=True)
priority=models.PositiveSmallIntegerField(default=0)
downloadStep=models.PositiveSmallIntegerField(default=0)
startFinYear=models.CharField(max_length=2,default='18')
progress=models.PositiveSmallIntegerField(default=0)
attemptCount=models.PositiveSmallIntegerField(default=0)
stepError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
crawlStartDate=models.DateTimeField(null=True,blank=True)
crawlAttemptDate=models.DateTimeField(null=True,blank=True)
dataDownloadDate=models.DateTimeField(null=True,blank=True)
pending=models.IntegerField(blank=True,null=True,default=0)
crawlDuration=models.IntegerField(blank=True,null=True,default=0)
isComplete=models.BooleanField(default=False)
stepStarted=models.BooleanField(default=False)
stepCompleted=models.BooleanField(default=False)
inProgress=models.BooleanField(default=False)
isError=models.BooleanField(default=False)
isProcessDriven=models.BooleanField(default=False)
error=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
remarks=models.TextField(blank=True,null=True)
class Meta:
db_table = 'crawlQueue'
def __str__(self):
if self.panchayat is not None:
return "%s-%s-%s-%s-%s" % (self.panchayat.code,self.panchayat.block.district.state.name,self.panchayat.block.district.name,self.panchayat.block.name,self.panchayat.name)
elif self.block is not None:
return "%s-%s-%s" % (self.block.district.state.name,self.block.district.name,self.block.name)
else:
return self.id
@property
def owner(self):
return self.user
class WorkerStat(models.Model):
worker=models.ForeignKey('Worker',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
finyear=models.CharField(max_length=2)
workDays=models.IntegerField(blank=True,null=True)
nicDaysProvided=models.IntegerField(blank=True,null=True)
totalCredited=models.IntegerField(blank=True,null=True)
totalPending=models.IntegerField(blank=True,null=True)
totalRejected=models.IntegerField(blank=True,null=True)
totalWages=models.IntegerField(blank=True,null=True)
class Meta:
unique_together = ( 'worker','finyear')
db_table = 'workerStat'
def __str__(self):
if self.worker.jobcard.tjobcard is not None:
displayJobcard=self.worker.jobcard.tjobcard
else:
jobcard=self.worker.jobcard.jobcard
return displayJobcard+"-"+self.worker.name+"-"+finyear
class JobcardStat(models.Model):
jobcard=models.ForeignKey('Jobcard',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
finyear=models.CharField(max_length=2)
nicDaysProvided=models.IntegerField(blank=True,null=True,default=0) # As per period wise work provided report
nicDaysDemanded=models.IntegerField(blank=True,null=True,default=0) # As per demand report on panchayat page
jobcardDaysDemanded=models.IntegerField(blank=True,null=True,default=0) # as per jobcard page
jobcardDaysWorked=models.IntegerField(blank=True,null=True,default=0) # as per jobcard page
musterDaysProvided=models.IntegerField(blank=True,null=True,default=0) # as per muster
musterDaysWorked=models.IntegerField(blank=True,null=True,default=0) # as per muster
totalCredited=models.IntegerField(blank=True,null=True)
totalPending=models.IntegerField(blank=True,null=True)
totalRejected=models.IntegerField(blank=True,null=True)
totalWages=models.IntegerField(blank=True,null=True)
class Meta:
unique_together = ( 'jobcard','finyear')
db_table = 'jobcardStat'
def __str__(self):
if self.jobcard.tjobcard is not None:
displayJobcard=self.jobcard.tjobcard
else:
displayJobcard=self.jobcard.jobcard
return displayJobcard+"-"+self.finyear
class BlockStat(models.Model):
block=models.ForeignKey('Block',on_delete=models.CASCADE)
finyear=models.CharField(max_length=2)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
bankTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
bankTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
bankTotalRejected=models.IntegerField(blank=True,null=True,default=0)
bankTotalInvalid=models.IntegerField(blank=True,null=True,default=0)
bankTotalProcessed=models.IntegerField(blank=True,null=True,default=0)
bankRejectedURL=models.URLField(max_length=2048,blank=True,null=True)
bankInvalidURL=models.URLField(max_length=2048,blank=True,null=True)
postTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
postTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
postTotalRejected=models.IntegerField(blank=True,null=True,default=0)
postTotalInvalid=models.IntegerField(blank=True,null=True,default=0)
postTotalProcessed=models.IntegerField(blank=True,null=True,default=0)
postRejectedURL=models.URLField(max_length=2048,blank=True,null=True)
postInvalidURL=models.URLField(max_length=2048,blank=True,null=True)
coBankTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
coBankTotalTransactions=models.IntegerField(blank=True,null=True,default=0)
coBankTotalRejected=models.IntegerField(blank=True,null=True,default=0)
coBankTotalInvalid=models.IntegerField(blank=True,null=True,default=0)
coBankTotalProcessed=models.IntegerField(blank=True,null=True,default=0)
coBankRejectedURL=models.URLField(max_length=2048,blank=True,null=True)
coBankInvalidURL=models.URLField(max_length=2048,blank=True,null=True)
class Meta:
unique_together = ( 'block','finyear')
db_table = 'blockStat'
def __str__(self):
return self.block.name+"-"+self.block.district.name
class PanchayatStat(models.Model):
panchayat=models.ForeignKey('panchayat',on_delete=models.CASCADE)
finyear=models.CharField(max_length=2)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
zeroMusters=models.BooleanField(default=False)
isActive=models.BooleanField(default=True)
isDataAvailable=models.BooleanField(default=False)
accuracy=models.PositiveSmallIntegerField(blank=True,null=True)
jobcardsTotal=models.IntegerField(blank=True,null=True)
workersTotal=models.IntegerField(blank=True,null=True)
nicEmploymentProvided=models.IntegerField(blank=True,null=True) #This is from report 5.1
nicJobcardsTotal=models.IntegerField(blank=True,null=True)
nicWorkersTotal=models.IntegerField(blank=True,null=True)
nicDaysDemanded=models.IntegerField(blank=True,null=True) #This is as per the dmeand report on panchayat page
nicDaysProvided=models.IntegerField(blank=True,null=True,default=0) #This is as per period wise workdays report on panchayat page
jobcardDaysDemanded=models.IntegerField(blank=True,null=True) # As per Jobcard Page
jobcardDaysWorked=models.IntegerField(blank=True,null=True) # As per jobcard page
musterDaysProvided=models.IntegerField(blank=True,null=True) # As per Musters
musterDaysWorked=models.IntegerField(blank=True,null=True) # As per Muster
libtechTotalWages=models.IntegerField(blank=True,null=True)
libtechPendingWages=models.IntegerField(default=0)
libtechRejectedWages=models.IntegerField(default=0)
libtechCreditedWages=models.IntegerField(default=0)
libtechPendingWagesPCT=models.PositiveSmallIntegerField(default=100)
libtechRejectedWagesPCT=models.PositiveSmallIntegerField(default=0)
libtechCreditedWagesPCT=models.PositiveSmallIntegerField(default=0)
mustersTotal=models.IntegerField(blank=True,null=True)
mustersPending=models.IntegerField(blank=True,null=True)
mustersDownloaded=models.IntegerField(blank=True,null=True)
mustersPendingDownloadPCT=models.PositiveSmallIntegerField(default=100)
mustersInComplete=models.IntegerField(blank=True,null=True)
musterMissingApplicants=models.IntegerField(blank=True,null=True)
wagelistTotal=models.IntegerField(blank=True,null=True)
wagelistPending=models.IntegerField(blank=True,null=True)
wagelistDownloaded=models.IntegerField(blank=True,null=True)
wagelistInComplete=models.IntegerField(blank=True,null=True)
wagelistPendingDownloadPCT=models.PositiveSmallIntegerField(default=100)
musterDownloadAccuracy=models.PositiveSmallIntegerField(default=0)
jobcardDownloadAccuracy=models.PositiveSmallIntegerField(default=0)
wagelistDownloadAccuracy=models.PositiveSmallIntegerField(default=0)
musterTransactionCSV=models.URLField(max_length=2048,blank=True,null=True)
class Meta:
unique_together = ( 'panchayat','finyear')
db_table = 'panchayatStat'
def __str__(self):
return self.panchayat.name+"-"+self.panchayat.block.name
class Jobcard(models.Model):
panchayat=models.ForeignKey('Panchayat',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
location=models.ForeignKey('Location',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
village=models.ForeignKey('Village',on_delete=models.CASCADE,blank=True,null=True)
libtechTag=models.ManyToManyField('LibtechTag',related_name="jobcardTag",blank=True)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
tjobcard=models.CharField(max_length=24,null=True,blank=True,db_index=True)
jobcard=models.CharField(max_length=256,db_index=True,null=True,blank=True)
jcNo=models.BigIntegerField(blank=True,null=True)
headOfHousehold=models.CharField(max_length=512,blank=True,null=True)
surname=models.CharField(max_length=512,blank=True,null=True)
caste=models.CharField(max_length=64,blank=True,null=True)
applicationDate=models.DateField(null=True,blank=True,auto_now_add=True)
isVillageInfoMissing=models.BooleanField(default=False)
isWorkerTableMissing=models.BooleanField(default=False)
isPaymentTableMissing=models.BooleanField(default=False)
allApplicantFound=models.BooleanField(default=False)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
isDownloaded=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
errorDate=models.DateTimeField(null=True,blank=True)
isError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ( 'jobcard','panchayat')
db_table = 'jobcard'
def __str__(self):
return self.jobcard
# def serialize(self):
# json_data = serialize('json',[self])
# return json_data
class Worker(models.Model):
jobcard=models.ForeignKey('Jobcard',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
name=models.CharField(max_length=512)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
libtechTag=models.ManyToManyField('LibtechTag',related_name="workerTag",blank=True)
applicantNo=models.PositiveSmallIntegerField()
fatherHusbandName=models.CharField(max_length=512,blank=True,null=True)
relationship=models.CharField(max_length=64,blank=True,null=True)
gender=models.CharField(max_length=256,blank=True,null=True)
age=models.PositiveIntegerField(blank=True,null=True)
isDeleted=models.BooleanField(default=False)
isDisabled=models.BooleanField(default=False)
isMinority=models.BooleanField(default=False)
remarks=models.CharField(max_length=512,blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
is15Days=models.BooleanField(default=False)
isSample30=models.BooleanField(default=False)
isSample=models.BooleanField(default=False)
isExtraSample30=models.BooleanField(default=False)
isExtraSample=models.BooleanField(default=False)
oldID=models.IntegerField(blank=True,null=True) #This is Duration that last Crawl took in Minutes
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
isDownloaded=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
errorDate=models.DateTimeField(null=True,blank=True)
isError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = ('jobcard', 'name','applicantNo')
db_table = 'worker'
def __str__(self):
return self.code
class Muster(models.Model):
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
location=models.ForeignKey('Location',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
block=models.ForeignKey('block',on_delete=models.CASCADE)
finyear=models.CharField(max_length=2)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
musterNo=models.CharField(max_length=64,db_index=True)
musterType=models.CharField(max_length=4)
workCode=models.CharField(max_length=128)
workName=models.CharField(max_length=2046)
dateFrom=models.DateField(default=datetime.date.today)
dateTo=models.DateField(default=datetime.date.today)
paymentDate=models.DateField(blank=True,null=True)
musterURL=models.CharField(max_length=4096)
newMusterURL=models.CharField(max_length=4096,blank=True,null=True)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
allApplicantFound=models.BooleanField(default=False)
allWorkerFound=models.BooleanField(default=False)
allWDFound=models.BooleanField(default=False)
isComplete=models.BooleanField(default=False)
isJSONProcessed=models.BooleanField(default=False)
newMusterFormat=models.BooleanField(default=True)
isDownloaded=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
errorDate=models.DateTimeField(null=True,blank=True)
isError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
remarks=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('musterNo', 'block', 'finyear')
db_table="muster"
def __str__(self):
return self.musterNo
class Wagelist(models.Model):
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
block=models.ForeignKey('block',on_delete=models.CASCADE,db_index=True)
wagelistNo=models.CharField(max_length=256)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
wagelistURL=models.URLField(max_length=2048,blank=True,null=True)
finyear=models.CharField(max_length=2,db_index=True)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
code=models.CharField(max_length=256,blank=True,null=True)
generateDate=models.DateField(blank=True,null=True)
isComplete=models.BooleanField(default=False)
isDownloaded=models.BooleanField(default=False)
allFTOFound=models.BooleanField(default=False)
allDWDFound=models.BooleanField(default=False)
allWDFound=models.BooleanField(default=False)
allWorkerFound=models.BooleanField(default=False)
multiplePanchayats=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
errorDate=models.DateTimeField(null=True,blank=True)
isRequired=models.BooleanField(default=False)
isError=models.BooleanField(default=False)
remarks=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('wagelistNo', 'block', 'finyear')
db_table="wagelist"
def __str__(self):
return self.wagelistNo
class FTO(models.Model):
panchayat=models.ForeignKey('Panchayat',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
block=models.ForeignKey('block',on_delete=models.CASCADE,db_index=True)
ftoNo=models.CharField(max_length=256,db_index=True)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
ftoURL=models.URLField(max_length=2048,blank=True,null=True)
paymentMode=models.CharField(max_length=64,blank=True,null=True)
finyear=models.CharField(max_length=2,db_index=True)
firstSignatoryDate=models.DateField(null=True,blank=True)
secondSignatoryDate=models.DateField(null=True,blank=True)
ftofinyear=models.CharField(max_length=2,blank=True,null=True)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
allJobcardFound=models.BooleanField(default=False)
allWagelistFound=models.BooleanField(default=False)
allWorkerFound=models.BooleanField(default=False)
allWDFound=models.BooleanField(default=False)
isRequired=models.BooleanField(default=False)
isComplete=models.BooleanField(default=False)
isDownloaded=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
errorDate=models.DateTimeField(null=True,blank=True)
isError=models.BooleanField(default=False)
remarks=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('ftoNo', 'block', 'finyear')
db_table = "fto"
def __str__(self):
return self.ftoNo
class DPTransaction(models.Model):
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
muster=models.ForeignKey('Muster',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
jobcard=models.CharField(max_length=256,db_index=True,null=True,blank=True)
name=models.CharField(max_length=512)
finyear=models.CharField(max_length=2)
musterNo=models.CharField(max_length=64,db_index=True)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
paymentDate=models.DateField(null=True,blank=True)
delayDays=models.PositiveSmallIntegerField(null=True,blank=True)
delayCompensation=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
status=models.CharField(max_length=4096,null=True,blank=True)
class Meta:
db_table="DPTransaction"
def __str__(self):
return self.name+" "+str(self.musterNo)
class RejectionDetail(models.Model):
referenceNo=models.CharField(max_length=256,null=True,blank=True)
finyear=models.CharField(max_length=2)
block=models.ForeignKey('block',on_delete=models.CASCADE)
wagelist=models.ForeignKey('Wagelist',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
worker=models.ForeignKey('Worker',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
muster=models.ForeignKey('Muster',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
fto=models.ForeignKey('FTO',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
wd=models.ForeignKey('WorkDetail',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
wp=models.ForeignKey('WorkPayment',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
status=models.CharField(max_length=64,null=True,blank=True)
processDate=models.DateField(null=True,blank=True)
rejectionReason=models.CharField(max_length=4096,null=True,blank=True)
class Meta:
unique_together = ('referenceNo', 'block', 'finyear')
db_table="rejectedDetail"
def __str__(self):
return self.referenceNo
class RejectedPayment(models.Model):
referenceNo=models.CharField(max_length=256)
finyear=models.CharField(max_length=2)
block=models.ForeignKey('block',on_delete=models.CASCADE)
code=models.CharField(max_length=256,db_index=True,blank=True,null=True)
url=models.CharField(max_length=4096,null=True,blank=True)
contentFileURL=models.URLField(max_length=2048,blank=True,null=True)
isComplete=models.BooleanField(default=False)
ftoString=models.CharField(max_length=256,blank=True,null=True)
wagelistString=models.CharField(max_length=256,blank=True,null=True)
primaryAccountHolder=models.CharField(max_length=256,blank=True,null=True)
bankCode=models.CharField(max_length=256,blank=True,null=True)
ifscCode=models.CharField(max_length=256,blank=True,null=True)
amount=models.DecimalField(max_digits=10,decimal_places=2,null=True,blank=True)
rejectionReason=models.CharField(max_length=2048,blank=True,null=True)
rejectionDate=models.DateField(null=True,blank=True)
wagelist=models.ForeignKey('Wagelist',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
worker=models.ForeignKey('Worker',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
muster=models.ForeignKey('Muster',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
fto=models.ForeignKey('FTO',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
workDetail=models.ForeignKey('WorkDetail',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
workPayment=models.ForeignKey('WorkPayment',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
status=models.CharField(max_length=64,null=True,blank=True)
isDownloaded=models.BooleanField(default=False)
downloadDate=models.DateTimeField(null=True,blank=True)
errorDate=models.DateTimeField(null=True,blank=True)
isError=models.BooleanField(default=False)
downloadAttemptCount=models.PositiveSmallIntegerField(default=0)
remarks=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('referenceNo', 'block', 'finyear')
db_table="rejectedPayment"
def __str__(self):
return self.referenceNo
class PaymentTransaction(models.Model):
fto=models.ForeignKey('FTO',on_delete=models.CASCADE,db_index=True)
ftoIndex=models.PositiveSmallIntegerField(db_index=True)
wagelist=models.ForeignKey('Wagelist',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
jobcard=models.ForeignKey('Jobcard',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
creditedAmount=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
status=models.CharField(max_length=64,null=True,blank=True)
referenceNo=models.CharField(max_length=256,null=True,blank=True)
transactionDate=models.DateField(null=True,blank=True)
processDate=models.DateField(null=True,blank=True)
rejectionReason=models.CharField(max_length=4096,null=True,blank=True)
primaryAccountHolder=models.CharField(max_length=4096,null=True,blank=True)
favorBankAPB=models.CharField(max_length=256,null=True,blank=True)
IINBankAPB=models.CharField(max_length=256,null=True,blank=True)
jobcardRaw=models.CharField(max_length=256,db_index=True,null=True,blank=True)
class Meta:
db_table="paymentTransaction"
unique_together = ('fto','ftoIndex')
def __str__(self):
return self.fto.ftoNo+" "+str(self.ftoIndex)
class FTOTransaction(models.Model):
fto=models.ForeignKey('FTO',on_delete=models.CASCADE,db_index=True)
ftoIndex=models.PositiveSmallIntegerField(null=True,blank=True)
wagelist=models.ForeignKey('Wagelist',on_delete=models.CASCADE,db_index=True,blank=True,null=True)
jobcard=models.ForeignKey('Jobcard',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
creditedAmount=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
status=models.CharField(max_length=64,null=True,blank=True)
referenceNo=models.CharField(max_length=256,null=True,blank=True)
transactionDate=models.DateField(null=True,blank=True)
processDate=models.DateField(null=True,blank=True)
rejectionReason=models.CharField(max_length=4096,null=True,blank=True)
primaryAccountHolder=models.CharField(max_length=4096,null=True,blank=True)
favorBankAPB=models.CharField(max_length=256,null=True,blank=True)
IINBankAPB=models.CharField(max_length=256,null=True,blank=True)
jobcardRaw=models.CharField(max_length=256,db_index=True,null=True,blank=True)
class Meta:
db_table="ftoTransaction"
unique_together = ('fto','referenceNo')
def __str__(self):
return self.fto.ftoNo+" "+str(self.ftoIndex)
class WagelistTransaction(models.Model):
wagelist=models.ForeignKey('Wagelist',on_delete=models.CASCADE,db_index=True)
wagelistIndex=models.PositiveSmallIntegerField(null=True,blank=True)
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
fto=models.ForeignKey('FTO',on_delete=models.CASCADE,null=True,blank=True)
workDetail=models.ForeignKey('WorkDetail',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
workPayment=models.ForeignKey('WorkPayment',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
rejectedPayment=models.ForeignKey('RejectedPayment',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
daysWorked=models.PositiveSmallIntegerField(null=True,blank=True)
totalWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
isRegenerated=models.BooleanField(default=False)
isWagelistFTOAbsent=models.BooleanField(default=False)
status=models.CharField(max_length=64,null=True,blank=True)
referenceNo=models.CharField(max_length=256,null=True,blank=True)
processDate=models.DateField(null=True,blank=True)
rejectionReason=models.CharField(max_length=4096,null=True,blank=True)
class Meta:
db_table="wagelistTransaction"
unique_together = ('wagelist','wagelistIndex')
def __str__(self):
if self.worker is not None:
return self.worker.name+" "+str(self.wagelist.wagelistNo)
else:
return self.wagelist.wagelistNo
class WorkPayment(models.Model):
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True)
workDemandDate=models.DateField(db_index=True,null=True,blank=True)
workAllocatedDate=models.DateField(db_index=True,null=True,blank=True)
daysDemanded=models.PositiveSmallIntegerField(null=True,blank=True)
demandID=models.CharField(max_length=256,null=True,blank=True)
daysAllocated=models.PositiveSmallIntegerField(default=0)
demandExists=models.BooleanField(default=True)
finyear=models.CharField(max_length=2,null=True,blank=True)
amountDue=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
code=models.CharField(max_length=256,blank=True,null=True)
muster=models.ForeignKey('Muster',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
daysProvided=models.PositiveSmallIntegerField(null=True,blank=True)
daysWorked=models.PositiveSmallIntegerField(null=True,blank=True)
dayWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
totalWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
musterStatus=models.CharField(max_length=64,null=True,blank=True)
accountNo=models.CharField(max_length=256,null=True,blank=True)
bankName=models.CharField(max_length=256,null=True,blank=True)
branchName=models.CharField(max_length=256,null=True,blank=True)
branchCode=models.CharField(max_length=256,null=True,blank=True)
creditedDate=models.DateField(null=True,blank=True)
curWagelist=models.ForeignKey('Wagelist',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
wagelist=models.ManyToManyField('Wagelist',related_name="wpWagelist",blank=True)
wagelistTransaction=models.ManyToManyField('WagelistTransaction',related_name="wpWglTrn",blank=True)
rejectedPayment=models.ManyToManyField('RejectedPayment',related_name="wpRejPay",blank=True)
dpTransaction=models.ForeignKey('DPTransaction',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
isNICDelayAccounted=models.BooleanField(default=False)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('worker', 'workDemandDate','workAllocatedDate')
db_table="workPayment"
def __str__(self):
return str(self.worker)+" "+str(self.workDemandDate)
class WorkDetail(models.Model):
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True)
workDemandDate=models.DateField(db_index=True,null=True,blank=True)
workAllocatedDate=models.DateField(db_index=True,null=True,blank=True)
daysDemanded=models.PositiveSmallIntegerField(null=True,blank=True)
demandID=models.CharField(max_length=256,null=True,blank=True)
daysAllocated=models.PositiveSmallIntegerField(default=0)
demandExists=models.BooleanField(default=True)
finyear=models.CharField(max_length=2,null=True,blank=True)
amountDue=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
code=models.CharField(max_length=256,blank=True,null=True)
muster=models.ForeignKey('Muster',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
daysProvided=models.PositiveSmallIntegerField(null=True,blank=True)
daysWorked=models.PositiveSmallIntegerField(null=True,blank=True)
dayWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
totalWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
musterStatus=models.CharField(max_length=64,null=True,blank=True)
accountNo=models.CharField(max_length=256,null=True,blank=True)
bankName=models.CharField(max_length=256,null=True,blank=True)
branchName=models.CharField(max_length=256,null=True,blank=True)
branchCode=models.CharField(max_length=256,null=True,blank=True)
creditedDate=models.DateField(null=True,blank=True)
curWagelist=models.ForeignKey('Wagelist',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
wagelist=models.ManyToManyField('Wagelist',related_name="wdWagelist",blank=True)
wagelistTransaction=models.ManyToManyField('WagelistTransaction',related_name="wdWglTrn",blank=True)
rejectedPayment=models.ManyToManyField('RejectedPayment',related_name="wdRejPay",blank=True)
dpTransaction=models.ForeignKey('DPTransaction',on_delete=models.SET_NULL,db_index=True,null=True,blank=True)
isNICDelayAccounted=models.BooleanField(default=False)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('worker', 'workDemandDate')
db_table="workDetail"
def __str__(self):
return str(self.worker)+" "+str(self.workDemandDate)
class DemandWorkDetail(models.Model):
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
workDemandDate=models.DateField()
daysDemanded=models.PositiveSmallIntegerField(null=True,blank=True)
demandID=models.CharField(max_length=256,null=True,blank=True)
workAllocatedDate=models.DateField(null=True,blank=True)
daysAllocated=models.PositiveSmallIntegerField(default=0)
demandExists=models.BooleanField(default=True)
finyear=models.CharField(max_length=2,null=True,blank=True)
amountDue=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
code=models.CharField(max_length=256,blank=True,null=True)
muster=models.ForeignKey('Muster',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
# wagelist=models.ManyToManyField('Wagelist',related_name="dwdWagelist",blank=True)
musterIndex=models.PositiveSmallIntegerField(null=True,blank=True)
daysProvided=models.PositiveSmallIntegerField(null=True,blank=True)
daysWorked=models.PositiveSmallIntegerField(null=True,blank=True)
dayWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
totalWage=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
musterStatus=models.CharField(max_length=64,null=True,blank=True)
accountNo=models.CharField(max_length=256,null=True,blank=True)
bankName=models.CharField(max_length=256,null=True,blank=True)
branchName=models.CharField(max_length=256,null=True,blank=True)
branchCode=models.CharField(max_length=256,null=True,blank=True)
creditedDate=models.DateField(null=True,blank=True)
curWagelist=models.ForeignKey('Wagelist',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
dpTransaction=models.ForeignKey('DPTransaction',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
status=models.CharField(max_length=64,null=True,blank=True)
rejectedFlag=models.BooleanField(default=False)
inRejectionTable=models.BooleanField(default=False)
isCredited=models.BooleanField(default=False)
isNICDelayedPayment=models.BooleanField(default=False)
isNICDelayAccounted=models.BooleanField(default=False)
isDelayedPayment=models.BooleanField(default=False)
attemptCount=models.PositiveSmallIntegerField(null=True,blank=True)
nicDelayDays=models.PositiveSmallIntegerField(null=True,blank=True)
delayDays=models.PositiveSmallIntegerField(null=True,blank=True)
paymentDelay=models.PositiveSmallIntegerField(null=True,blank=True)
ftoDelay=models.PositiveSmallIntegerField(null=True,blank=True)
rejectionReason=models.TextField(blank=True,null=True)
remarks=models.TextField(blank=True,null=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('worker', 'workDemandDate')
db_table="demandWorkDetail"
def __str__(self):
return str(self.worker)+" "+str(self.workDemandDate)
class APWorkPayment(models.Model):
jobcard=models.ForeignKey('Jobcard',db_index=True,on_delete=models.CASCADE,blank=True,null=True)
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
name=models.CharField(max_length=512,null=True,blank=True)
finyear=models.CharField(max_length=2,blank=True,null=True)
ftofinyear=models.CharField(max_length=2,blank=True,null=True)
applicantNo=models.PositiveSmallIntegerField(db_index=True,null=True,blank=True)
musterNo=models.CharField(max_length=64,db_index=True,null=True,blank=True)
workCode=models.CharField(max_length=128,null=True,blank=True)
workName=models.CharField(max_length=2046,null=True,blank=True)
dateFrom=models.DateField(null=True,blank=True)
dateTo=models.DateField(null=True,blank=True)
daysWorked=models.PositiveSmallIntegerField(null=True,blank=True)
accountNo=models.CharField(max_length=256,blank=True,null=True)
modeOfPayment=models.CharField(max_length=256,blank=True,null=True)
payorderAmount=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
payorderNo=models.CharField(max_length=256,null=True,blank=True)
payorderDate=models.DateField(null=True,blank=True)
epayorderNo=models.CharField(db_index=True,max_length=256,null=True,blank=True)
epayorderDate=models.DateField(null=True,blank=True)
payingAgencyDate=models.DateField(null=True,blank=True)
creditedDate=models.DateField(null=True,blank=True)
disbursedAmount=models.DecimalField(max_digits=10,decimal_places=4,null=True,blank=True)
disbursedDate=models.DateField(null=True,blank=True)
isDelayedPayment=models.BooleanField(default=False)
isMusterRecordPresent=models.BooleanField(default=False)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('jobcard', 'epayorderNo')
db_table="apWorkPayment"
def __str__(self):
return str(self.id)
class RN6TransactionDetail(models.Model):
worker=models.ForeignKey('Worker',on_delete=models.CASCADE,db_index=True,null=True,blank=True)
name=models.CharField(max_length=256,null=True,blank=True)
transactionDate=models.DateField(null=True,blank=True)
transactionReference=models.CharField(max_length=256,null=True,blank=True)
withdrawnAt=models.CharField(max_length=256,null=True,blank=True)
deposit=models.DecimalField(max_digits=10,decimal_places=2,null=True,blank=True)
withdrawal=models.DecimalField(max_digits=10,decimal_places=2,null=True,blank=True)
balance=models.DecimalField(max_digits=10,decimal_places=2,null=True,blank=True)
created=models.DateTimeField(null=True,blank=True,auto_now_add=True)
modified=models.DateTimeField(null=True,blank=True,auto_now=True)
class Meta:
unique_together = ('worker', 'transactionDate','transactionReference')
db_table="rn6TransactionDetail"
def __str__(self):
return self.transactionReference
class LanguageDict(models.Model):
phrase1=models.CharField(max_length=1024)
lang1=models.CharField(max_length=1024)
phrase2=models.CharField(max_length=1024,null=True,blank=True)
lang2=models.CharField(max_length=1024,null=True,blank=True)
class Meta:
db_table="languageDict"
def __str__(self):
return "%s-%s" % (self.phrase1+self.lang1)
def createslug(instance):
try:
myslug=slugify(instance.englishName)[:50]
except:
myslug=slugify(instance.name)[:50]
if myslug == '':
if hasattr(instance, 'code'):
myslug="%s-%s" % (instance.__class__.__name__ , str(instance.code))
else:
myslug="%s-%s" % (instance.__class__.__name__ , str(instance.id))
return myslug
def location_post_save_receiver(sender,instance,*args,**kwargs):
myslug=createslug(instance)
if instance.slug != myslug:
instance.slug = myslug
instance.save()
def village_post_save_receiver(sender,instance,*args,**kwargs):
modified=False
myslug=slugify(instance.name)[:50]
if myslug == '':
myslug="%s-%s" % (instance.__class__.__name__ , str(instance.id))
if instance.slug != myslug:
instance.slug = myslug
modified=True
code="%s_%s" % (instance.panchayat.code,instance.name)
if instance.code != code:
instance.code=code
modified=True
if modified == True:
instance.save()
def blockStat_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s_%s" % (instance.block.district.state.name,instance.block.district.name,instance.block.code)
if instance.code != code:
instance.code=code
instance.save()
def panchayatStat_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s" % (instance.panchayat.code,instance.finyear)
if instance.code != code:
instance.code=code
instance.save()
def jobcard_post_save_receiver(sender,instance,*args,**kwargs):
modified=False
if instance.panchayat.block.district.state.code == telanganaStateCode:
code=instance.tjobcard
else:
code=instance.jobcard
if instance.code != code:
instance.code=code
modified=True
if modified==True:
instance.save()
def wd_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s_%s_%s" % (instance.muster.block.code,instance.muster.finyear,instance.muster.musterNo,instance.musterIndex)
if instance.code != code:
instance.code=code
instance.save()
def muster_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s_%s" % (instance.block.code,instance.finyear,instance.musterNo)
if instance.code != code:
instance.code=code
instance.save()
def rejectedPayment_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s_%s" % (instance.block.code,instance.finyear,instance.referenceNo)
if instance.code != code:
instance.code=code
instance.save()
def wagelist_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s_%s" % (instance.block.code,instance.finyear,instance.wagelistNo)
if instance.code != code:
instance.code=code
instance.save()
def fto_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s" % (instance.block.code,instance.ftoNo)
if instance.code != code:
instance.code=code
instance.save()
def panchayatCrawlInfo_post_save_receiver(sender,instance,*args,**kwargs):
code="%s" % (instance.panchayat.code)
if instance.code != code:
instance.code=code
instance.save()
def workDemand_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s" % (instance.worker.code,str(instance.workDemandDate))
if instance.code != code:
instance.code=code
instance.save()
def worker_post_save_receiver(sender,instance,*args,**kwargs):
code="%s_%s" % (instance.jobcard.jobcard,instance.name)
if instance.code != code:
instance.code=code
instance.save()
def crawlRequest_post_save_receiver(sender,instance,*args,**kwargs):
if instance.panchayat is not None:
sc=instance.panchayat.block.district.state.code
elif instance.block is not None:
sc=instance.block.district.state.code
else:
sc=None
if instance.crawlState is None:
if instance.sequenceType == 'pension':
cs=CrawlState.objects.filter(sequence=300).first()
elif instance.sequenceType == 'dd':
cs=CrawlState.objects.filter(sequence=200).first()
else:
if sc == telanganaStateCode:
cs=CrawlState.objects.filter(sequence=100).first()
else:
cs=CrawlState.objects.filter(sequence=1).first()
instance.crawlState=cs
instance.save()
def report_post_save_receiver(sender,instance,*args,**kwargs):
if instance.location is not None:
code="%s_%s_%s" % (instance.location.code,instance.finyear,instance.reportType)
elif instance.panchayat is not None:
code="%s_%s_%s" % (instance.panchayat.code,instance.finyear,instance.reportType)
elif instance.block is not None:
code="%s_%s_%s" % (instance.block.code,instance.finyear,instance.reportType)
else:
code="%s_%s_%s" % (instance.district.code,instance.finyear,instance.reportType)
if instance.code != code:
instance.code=code
instance.save()
# print(instance.__class__.__name__)
post_save.connect(report_post_save_receiver,sender=Report)
post_save.connect(panchayatStat_post_save_receiver,sender=PanchayatStat)
post_save.connect(blockStat_post_save_receiver,sender=BlockStat)
post_save.connect(worker_post_save_receiver,sender=Worker)
post_save.connect(jobcard_post_save_receiver,sender=Jobcard)
post_save.connect(muster_post_save_receiver,sender=Muster)
post_save.connect(crawlRequest_post_save_receiver,sender=CrawlRequest)
post_save.connect(rejectedPayment_post_save_receiver,sender=RejectedPayment)
post_save.connect(wagelist_post_save_receiver,sender=Wagelist)
post_save.connect(fto_post_save_receiver,sender=FTO)
post_save.connect(location_post_save_receiver,sender=Location)
post_save.connect(location_post_save_receiver,sender=PDSLocation)
post_save.connect(location_post_save_receiver,sender=State)
post_save.connect(location_post_save_receiver,sender=District)
post_save.connect(location_post_save_receiver,sender=Block)
post_save.connect(location_post_save_receiver,sender=Panchayat)
post_save.connect(village_post_save_receiver,sender=Village)
post_save.connect(location_post_save_receiver,sender=LibtechTag)
post_save.connect(location_post_save_receiver,sender=Info)
post_save.connect(panchayatCrawlInfo_post_save_receiver,sender=PanchayatCrawlInfo)
| [
"[email protected]"
] | |
afa9598a1738a2e20d6ccb9b9211a51c66351017 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_325/ch36_2020_04_06_19_33_03_105361.py | bf9146896d304b6113439ff62f63713215623c6c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | def factorial(n):
for i in range(1,i):
s+= n*n+1
return(s) | [
"[email protected]"
] | |
cad3020fe5ed05dd5f2adb766e25e7be8f6dbf96 | 9d41f4df737dc2e6fd3fcf4c6f50028fd483cdd0 | /python_basic/section08.py | c3e3a9de8ba928d719982f41ef6e0dcd6599505e | [] | no_license | Ha-Young/byte_degree_python | 33a730f4c1f4a99fea03fb923ad73edee2dd1d48 | 7fcbfed832dec3d7cb8503b86d9457e1f2ae0ccf | refs/heads/master | 2022-11-16T16:54:52.978443 | 2020-07-04T14:32:16 | 2020-07-04T14:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | # Section08
# 파이썬 모듈과 패키지
# 패키지 예제
# 상대 경로
# .. : 부모 디렉토리
# . : 현재 디렉토리
# 사용1(클래스)
#from 폴더 import 클래스
from pkg.fibonacci import Fibonacci
Fibonacci.fib(300)
print("ex2 : ", Fibonacci.fib2(400))
print("ex2 : ", Fibonacci("hoho").title)
# 사용2(클래스) -> 메모리 사용 많이하여 권장X
from pkg.fibonacci import * # 전부 가져온다
Fibonacci.fib(600)
print("ex2 : ", Fibonacci.fib2(600))
print("ex2 : ", Fibonacci("use2").title)
# 사용3(클래스) -> 권장. Alias 이용
from pkg.fibonacci import Fibonacci as Fib
Fib.fib(1000)
print("ex3 : ", Fib.fib2(1400))
print("ex3 : ", Fib("use3").title)
# 사용4(함수)
#import 파일 -> 그 파일안에 있는 모든 것을 가져온다
import pkg.calculations as c
print("ex4 : ", c.add(10, 100))
print("ex4 : ", c.mul(10, 100))
# 사용5(함수) -> 권장.
# from 파일 import 함수 as 요약명
from pkg.calculations import div as d
print("ex5 : ", int(d(100,10)))
# 사용6
import pkg.prints as p
import builtins # 기본으로 가지고 있는 것. 디폴트로 가져오게 되어있다
p.prt1()
p.prt2()
print(dir(builtins))
| [
"[email protected]"
] | |
096024ea08a007418474f21229662d03091ef468 | 5253ecc76e493afea8935be6ed7926a431f1721d | /sovrin_client/test/cli/test_pool_upgrade.py | a088aed75e7690e92fb0e012c68ff4dd722c0fd2 | [
"Apache-2.0"
] | permissive | peacekeeper/sovrin-client | a34f7d1edc1722e4805cbe36e61d031dc6574dc4 | 4d408d16ee2d8aca2d3065c0302431cc5c5386c1 | refs/heads/master | 2021-01-11T14:43:46.240473 | 2017-02-16T10:02:21 | 2017-02-16T10:02:21 | 80,198,885 | 2 | 1 | null | 2017-01-27T10:31:04 | 2017-01-27T10:31:03 | null | UTF-8 | Python | false | false | 1,916 | py | from copy import copy
import pytest
from plenum.common.eventually import eventually
from plenum.common.txn import VERSION
from sovrin_common.txn import ACTION, CANCEL, JUSTIFICATION
from sovrin_node.test.upgrade.helper import checkUpgradeScheduled, \
checkNoUpgradeScheduled
from sovrin_node.test.upgrade.conftest import validUpgrade
@pytest.fixture(scope='module')
def nodeIds(poolNodesStarted):
return next(iter(poolNodesStarted.nodes.values())).poolManager.nodeIds
@pytest.fixture(scope="module")
def poolUpgradeSubmitted(be, do, trusteeCli, validUpgrade, trusteeMap):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout}',
within=10,
expect=['Pool upgrade successful'], mapper=validUpgrade)
@pytest.fixture(scope="module")
def poolUpgradeScheduled(poolUpgradeSubmitted, poolNodesStarted, validUpgrade):
nodes = poolNodesStarted.nodes.values()
poolNodesStarted.looper.run(
eventually(checkUpgradeScheduled, nodes,
validUpgrade[VERSION], retryWait=1, timeout=10))
@pytest.fixture(scope="module")
def poolUpgradeCancelled(poolUpgradeScheduled, be, do, trusteeCli,
validUpgrade, trusteeMap):
validUpgrade = copy(validUpgrade)
validUpgrade[ACTION] = CANCEL
validUpgrade[JUSTIFICATION] = '"not gonna give you one"'
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} justification={justification}',
within=10,
expect=['Pool upgrade successful'], mapper=validUpgrade)
def testPoolUpgradeSent(poolUpgradeScheduled):
pass
def testPoolUpgradeCancelled(poolUpgradeCancelled, poolNodesStarted):
nodes = poolNodesStarted.nodes.values()
poolNodesStarted.looper.run(
eventually(checkNoUpgradeScheduled,
nodes, retryWait=1, timeout=10))
| [
"[email protected]"
] | |
c3122503563ac8940d8246dba442f246f956d3bb | 6f560247d031db5ab0bbf5d1d3ad6bd7b12f6e14 | /aiormq/tools.py | 42ec7dd8b7d84c995a3b6330edebcf611edb79ae | [
"Apache-2.0"
] | permissive | tchalupnik/aiormq | c7482a224447aabac734e5963e8ffba9c80872c0 | 8aea0ecd15f695ae74fdafe0dfb1626a56412130 | refs/heads/master | 2023-09-04T11:42:16.178616 | 2021-10-03T19:23:12 | 2021-10-03T19:23:12 | 422,150,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,634 | py | import asyncio
from functools import wraps
from typing import AsyncContextManager, Awaitable, TypeVar
from yarl import URL
from aiormq.abc import TimeoutType
T = TypeVar("T")
def censor_url(url: URL):
if url.password is not None:
return url.with_password("******")
return url
def shield(func):
@wraps(func)
def wrap(*args, **kwargs):
return asyncio.shield(awaitable(func)(*args, **kwargs))
return wrap
def awaitable(func):
# Avoid python 3.8+ warning
if asyncio.iscoroutinefunction(func):
return func
@wraps(func)
async def wrap(*args, **kwargs):
result = func(*args, **kwargs)
if hasattr(result, "__await__"):
return await result
if asyncio.iscoroutine(result) or asyncio.isfuture(result):
return await result
return result
return wrap
def _inspect_await_method():
async def _test():
pass
coro = _test()
method_await = getattr(coro, "__await__", None)
method_iter = getattr(coro, "__iter__", None)
for _ in (method_await or method_iter)():
pass
return bool(method_await)
HAS_AWAIT_METHOD = _inspect_await_method()
class Countdown:
def __init__(self, timeout: TimeoutType = None):
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
self.deadline: TimeoutType = None
if timeout is not None:
self.deadline = self.loop.time() + timeout
def get_timeout(self) -> TimeoutType:
if self.deadline is None:
return None
current = self.loop.time()
if current >= self.deadline:
raise asyncio.TimeoutError
return self.deadline - current
def __call__(self, coro: Awaitable[T]) -> Awaitable[T]:
if self.deadline is None:
return coro
return asyncio.wait_for(coro, timeout=self.get_timeout())
def enter_context(
self, ctx: AsyncContextManager[T],
) -> AsyncContextManager[T]:
return CountdownContext(self, ctx)
class CountdownContext(AsyncContextManager):
def __init__(self, countdown: Countdown, ctx: AsyncContextManager):
self.countdown = countdown
self.ctx = ctx
def __aenter__(self):
if self.countdown.deadline is None:
return self.ctx.__aenter__()
return self.countdown(self.ctx.__aenter__())
def __aexit__(self, exc_type, exc_val, exc_tb):
if self.countdown.deadline is None:
return self.ctx.__aexit__(exc_type, exc_val, exc_tb)
return self.countdown(self.ctx.__aexit__(exc_type, exc_val, exc_tb))
| [
"[email protected]"
] | |
6da9f3ca07b76acf4b1a00ae2eb132bb1850ea73 | aec0340716e713b552e894475844b12cdafe90b0 | /env/bin/python-config | dd857d83e1ef789e0a8795da1a307d59810c7b8b | [] | no_license | capt-alien/ReBay | 16bd355129057836364db59bf120b82ff539450f | 37b7498f2abb31a252ad71be51f653d45cf6041c | refs/heads/master | 2020-04-29T20:21:14.777249 | 2019-03-26T04:51:46 | 2019-03-26T04:51:46 | 176,381,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | #!/Users/ericbotcher/dev/projects/ReBay/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
f44824dea67298d1fb010a231b518d09518a8096 | 0464ea852eceb2815af8d327679796f04f6286ac | /ll_env/Scripts/django-admin.py | f005680e29f3b14f0853dbd540edf58f027b3147 | [] | no_license | TrellixVulnTeam/polls_DXZJ | 1a5856c823a86e78f0f31ebe111463fbd00cea61 | 5c91811ec232ab7f3c44a61ae67981f579ad530e | refs/heads/master | 2023-03-16T11:39:50.917803 | 2018-03-12T09:26:07 | 2018-03-12T09:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!c:\users\youki\desktop\site\blog\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
d50c1835fb54d1533e75bd2db34d814ec732a697 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02613/s924626133.py | 1ea35f5c1bd5d4cd2ec8b5efdd1f5791376114f6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def judge_status_summary():
N = int(input())
res = {"AC":0,"WA":0,"TLE":0,"RE":0}
for i in range(N):
res[input()] += 1
for k in res:
print("{} x {}".format(k,res[k]))
judge_status_summary() | [
"[email protected]"
] | |
5e43b776594e2a57279cf3a4f4284d4f06de5c57 | 2ca5a1fe5608eb8298d7e142ecca98fd0fa4566b | /venv/lib/python3.7/site-packages/supervisor/tests/test_loggers.py | 0742c1766ca4f699ba70d19c633ead415da0c07a | [
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"MIT"
] | permissive | basemanbase/Animal-Health | 75bd2e4dc4eb29b6466125b197a77a70e14d91a4 | d9741aafd54126f05ba43e6f4ad6517755797c76 | refs/heads/master | 2023-05-30T17:53:19.905594 | 2022-01-30T05:52:53 | 2022-01-30T05:52:53 | 228,593,914 | 0 | 1 | MIT | 2023-05-01T20:37:31 | 2019-12-17T10:41:05 | Python | UTF-8 | Python | false | false | 21,442 | py | # -*- coding: utf-8 -*-
import errno
import sys
import unittest
import tempfile
import shutil
import os
import syslog
from supervisor.compat import PY2
from supervisor.compat import as_string
from supervisor.compat import StringIO
from supervisor.compat import unicode
from supervisor.tests.base import mock
from supervisor.tests.base import DummyStream
class LevelTests(unittest.TestCase):
def test_LOG_LEVELS_BY_NUM_doesnt_include_builtins(self):
from supervisor import loggers
for level_name in loggers.LOG_LEVELS_BY_NUM.values():
self.assertFalse(level_name.startswith('_'))
class HandlerTests:
def setUp(self):
self.basedir = tempfile.mkdtemp()
self.filename = os.path.join(self.basedir, 'thelog')
def tearDown(self):
try:
shutil.rmtree(self.basedir)
except OSError:
pass
def _makeOne(self, *arg, **kw):
klass = self._getTargetClass()
return klass(*arg, **kw)
def _makeLogRecord(self, msg):
from supervisor import loggers
record = loggers.LogRecord(
level=loggers.LevelsByName.INFO,
msg=msg,
exc_info=None
)
return record
class BareHandlerTests(HandlerTests, unittest.TestCase):
def _getTargetClass(self):
from supervisor.loggers import Handler
return Handler
def test_flush_stream_flush_raises_IOError_EPIPE(self):
stream = DummyStream(error=IOError(errno.EPIPE))
inst = self._makeOne(stream=stream)
self.assertEqual(inst.flush(), None) # does not raise
def test_flush_stream_flush_raises_IOError_not_EPIPE(self):
stream = DummyStream(error=IOError(errno.EALREADY))
inst = self._makeOne(stream=stream)
self.assertRaises(IOError, inst.flush) # non-EPIPE IOError raises
def test_close_already_closed(self):
stream = DummyStream()
inst = self._makeOne(stream=stream)
inst.closed = True
self.assertEqual(inst.close(), None)
def test_close_stream_fileno_above_3(self):
stream = DummyStream(fileno=50)
inst = self._makeOne(stream=stream)
self.assertEqual(inst.close(), None)
self.assertTrue(inst.closed)
self.assertTrue(inst.stream.closed)
def test_close_stream_fileno_below_3(self):
stream = DummyStream(fileno=0)
inst = self._makeOne(stream=stream)
self.assertEqual(inst.close(), None)
self.assertFalse(inst.closed)
self.assertFalse(inst.stream.closed)
def test_close_stream_handles_fileno_unsupported_operation(self):
# on python 2, StringIO does not have fileno()
# on python 3, StringIO has fileno() but calling it raises
stream = StringIO()
inst = self._makeOne(stream=stream)
inst.close() # shouldn't raise
self.assertTrue(inst.closed)
def test_close_stream_handles_fileno_ioerror(self):
stream = DummyStream()
def raise_ioerror():
raise IOError()
stream.fileno = raise_ioerror
inst = self._makeOne(stream=stream)
inst.close() # shouldn't raise
self.assertTrue(inst.closed)
def test_emit_gardenpath(self):
stream = DummyStream()
inst = self._makeOne(stream=stream)
record = self._makeLogRecord(b'foo')
inst.emit(record)
self.assertEqual(stream.flushed, True)
self.assertEqual(stream.written, b'foo')
def test_emit_unicode_error(self):
stream = DummyStream(error=UnicodeError)
inst = self._makeOne(stream=stream)
record = self._makeLogRecord(b'foo')
inst.emit(record)
self.assertEqual(stream.flushed, True)
self.assertEqual(stream.written, b'foo')
def test_emit_other_error(self):
stream = DummyStream(error=ValueError)
inst = self._makeOne(stream=stream)
handled = []
inst.handleError = lambda: handled.append(True)
record = self._makeLogRecord(b'foo')
inst.emit(record)
self.assertEqual(stream.flushed, False)
self.assertEqual(stream.written, b'')
class FileHandlerTests(HandlerTests, unittest.TestCase):
def _getTargetClass(self):
from supervisor.loggers import FileHandler
return FileHandler
def test_ctor(self):
handler = self._makeOne(self.filename)
self.assertTrue(os.path.exists(self.filename), self.filename)
self.assertEqual(handler.mode, 'ab')
self.assertEqual(handler.baseFilename, self.filename)
self.assertEqual(handler.stream.name, self.filename)
handler.close()
def test_close(self):
handler = self._makeOne(self.filename)
handler.stream.close()
handler.stream = DummyStream()
handler.close()
self.assertEqual(handler.stream.closed, True)
def test_close_raises(self):
handler = self._makeOne(self.filename)
handler.stream.close()
handler.stream = DummyStream(OSError)
self.assertRaises(OSError, handler.close)
self.assertEqual(handler.stream.closed, False)
def test_reopen(self):
handler = self._makeOne(self.filename)
handler.stream.close()
stream = DummyStream()
handler.stream = stream
handler.reopen()
self.assertEqual(stream.closed, True)
self.assertEqual(handler.stream.name, self.filename)
handler.close()
def test_reopen_raises(self):
handler = self._makeOne(self.filename)
handler.stream.close()
stream = DummyStream()
handler.stream = stream
handler.baseFilename = os.path.join(self.basedir, 'notthere', 'a.log')
self.assertRaises(IOError, handler.reopen)
self.assertEqual(stream.closed, True)
def test_remove_exists(self):
handler = self._makeOne(self.filename)
self.assertTrue(os.path.exists(self.filename), self.filename)
handler.remove()
self.assertFalse(os.path.exists(self.filename), self.filename)
def test_remove_doesntexist(self):
handler = self._makeOne(self.filename)
os.remove(self.filename)
self.assertFalse(os.path.exists(self.filename), self.filename)
handler.remove() # should not raise
self.assertFalse(os.path.exists(self.filename), self.filename)
def test_remove_raises(self):
handler = self._makeOne(self.filename)
os.remove(self.filename)
os.mkdir(self.filename)
self.assertTrue(os.path.exists(self.filename), self.filename)
self.assertRaises(OSError, handler.remove)
def test_emit_ascii_noerror(self):
handler = self._makeOne(self.filename)
record = self._makeLogRecord(b'hello!')
handler.emit(record)
handler.close()
with open(self.filename, 'rb') as f:
self.assertEqual(f.read(), b'hello!')
def test_emit_unicode_noerror(self):
handler = self._makeOne(self.filename)
record = self._makeLogRecord(b'fi\xc3\xad')
handler.emit(record)
handler.close()
with open(self.filename, 'rb') as f:
self.assertEqual(f.read(), b'fi\xc3\xad')
def test_emit_error(self):
handler = self._makeOne(self.filename)
handler.stream.close()
handler.stream = DummyStream(error=OSError)
record = self._makeLogRecord(b'hello!')
try:
old_stderr = sys.stderr
dummy_stderr = DummyStream()
sys.stderr = dummy_stderr
handler.emit(record)
finally:
sys.stderr = old_stderr
self.assertTrue(dummy_stderr.written.endswith(b'OSError\n'),
dummy_stderr.written)
if os.path.exists('/dev/stdout'):
StdoutTestsBase = FileHandlerTests
else:
# Skip the stdout tests on platforms that don't have /dev/stdout.
StdoutTestsBase = object
class StdoutTests(StdoutTestsBase):
def test_ctor_with_dev_stdout(self):
handler = self._makeOne('/dev/stdout')
# Modes 'w' and 'a' have the same semantics when applied to
# character device files and fifos.
self.assertTrue(handler.mode in ['wb', 'ab'], handler.mode)
self.assertEqual(handler.baseFilename, '/dev/stdout')
self.assertEqual(handler.stream.name, '/dev/stdout')
handler.close()
class RotatingFileHandlerTests(FileHandlerTests):
def _getTargetClass(self):
from supervisor.loggers import RotatingFileHandler
return RotatingFileHandler
def test_ctor(self):
handler = self._makeOne(self.filename)
self.assertEqual(handler.mode, 'ab')
self.assertEqual(handler.maxBytes, 512*1024*1024)
self.assertEqual(handler.backupCount, 10)
handler.close()
def test_emit_does_rollover(self):
handler = self._makeOne(self.filename, maxBytes=10, backupCount=2)
record = self._makeLogRecord(b'a' * 4)
handler.emit(record) # 4 bytes
self.assertFalse(os.path.exists(self.filename + '.1'))
self.assertFalse(os.path.exists(self.filename + '.2'))
handler.emit(record) # 8 bytes
self.assertFalse(os.path.exists(self.filename + '.1'))
self.assertFalse(os.path.exists(self.filename + '.2'))
handler.emit(record) # 12 bytes, do rollover
self.assertTrue(os.path.exists(self.filename + '.1'))
self.assertFalse(os.path.exists(self.filename + '.2'))
handler.emit(record) # 16 bytes
self.assertTrue(os.path.exists(self.filename + '.1'))
self.assertFalse(os.path.exists(self.filename + '.2'))
handler.emit(record) # 20 bytes
self.assertTrue(os.path.exists(self.filename + '.1'))
self.assertFalse(os.path.exists(self.filename + '.2'))
handler.emit(record) # 24 bytes, do rollover
self.assertTrue(os.path.exists(self.filename + '.1'))
self.assertTrue(os.path.exists(self.filename + '.2'))
handler.emit(record) # 28 bytes
handler.close()
self.assertTrue(os.path.exists(self.filename + '.1'))
self.assertTrue(os.path.exists(self.filename + '.2'))
with open(self.filename, 'rb') as f:
self.assertEqual(f.read(), b'a' * 4)
with open(self.filename+'.1', 'rb') as f:
self.assertEqual(f.read(), b'a' * 12)
with open(self.filename+'.2', 'rb') as f:
self.assertEqual(f.read(), b'a' * 12)
def test_current_logfile_removed(self):
handler = self._makeOne(self.filename, maxBytes=6, backupCount=1)
record = self._makeLogRecord(b'a' * 4)
handler.emit(record) # 4 bytes
self.assertTrue(os.path.exists(self.filename))
self.assertFalse(os.path.exists(self.filename + '.1'))
# Someone removes the active log file! :-(
os.unlink(self.filename)
self.assertFalse(os.path.exists(self.filename))
handler.emit(record) # 8 bytes, do rollover
handler.close()
self.assertTrue(os.path.exists(self.filename))
self.assertFalse(os.path.exists(self.filename + '.1'))
def test_removeAndRename_destination_does_not_exist(self):
inst = self._makeOne(self.filename)
renames = []
removes = []
inst._remove = lambda v: removes.append(v)
inst._exists = lambda v: False
inst._rename = lambda s, t: renames.append((s, t))
inst.removeAndRename('foo', 'bar')
self.assertEqual(renames, [('foo', 'bar')])
self.assertEqual(removes, [])
inst.close()
def test_removeAndRename_destination_exists(self):
inst = self._makeOne(self.filename)
renames = []
removes = []
inst._remove = lambda v: removes.append(v)
inst._exists = lambda v: True
inst._rename = lambda s, t: renames.append((s, t))
inst.removeAndRename('foo', 'bar')
self.assertEqual(renames, [('foo', 'bar')])
self.assertEqual(removes, ['bar'])
inst.close()
def test_removeAndRename_remove_raises_ENOENT(self):
def remove(fn):
raise OSError(errno.ENOENT)
inst = self._makeOne(self.filename)
renames = []
inst._remove = remove
inst._exists = lambda v: True
inst._rename = lambda s, t: renames.append((s, t))
inst.removeAndRename('foo', 'bar')
self.assertEqual(renames, [('foo', 'bar')])
inst.close()
def test_removeAndRename_remove_raises_other_than_ENOENT(self):
def remove(fn):
raise OSError(errno.EAGAIN)
inst = self._makeOne(self.filename)
inst._remove = remove
inst._exists = lambda v: True
self.assertRaises(OSError, inst.removeAndRename, 'foo', 'bar')
inst.close()
def test_removeAndRename_rename_raises_ENOENT(self):
def rename(s, d):
raise OSError(errno.ENOENT)
inst = self._makeOne(self.filename)
inst._rename = rename
inst._exists = lambda v: False
self.assertEqual(inst.removeAndRename('foo', 'bar'), None)
inst.close()
def test_removeAndRename_rename_raises_other_than_ENOENT(self):
def rename(s, d):
raise OSError(errno.EAGAIN)
inst = self._makeOne(self.filename)
inst._rename = rename
inst._exists = lambda v: False
self.assertRaises(OSError, inst.removeAndRename, 'foo', 'bar')
inst.close()
def test_doRollover_maxbytes_lte_zero(self):
inst = self._makeOne(self.filename)
inst.maxBytes = 0
self.assertEqual(inst.doRollover(), None)
inst.close()
class BoundIOTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.loggers import BoundIO
return BoundIO
def _makeOne(self, maxbytes, buf=''):
klass = self._getTargetClass()
return klass(maxbytes, buf)
def test_write_overflow(self):
io = self._makeOne(1, b'a')
io.write(b'b')
self.assertEqual(io.buf, b'b')
def test_getvalue(self):
io = self._makeOne(1, b'a')
self.assertEqual(io.getvalue(), b'a')
def test_clear(self):
io = self._makeOne(1, b'a')
io.clear()
self.assertEqual(io.buf, b'')
def test_close(self):
io = self._makeOne(1, b'a')
io.close()
self.assertEqual(io.buf, b'')
class LoggerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.loggers import Logger
return Logger
def _makeOne(self, level=None, handlers=None):
klass = self._getTargetClass()
return klass(level, handlers)
def test_blather(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.BLAT)
logger = self._makeOne(LevelsByName.BLAT, (handler,))
logger.blather('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.TRAC
logger.blather('hello')
self.assertEqual(len(handler.records), 1)
def test_trace(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.TRAC)
logger = self._makeOne(LevelsByName.TRAC, (handler,))
logger.trace('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.DEBG
logger.trace('hello')
self.assertEqual(len(handler.records), 1)
def test_debug(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.DEBG)
logger = self._makeOne(LevelsByName.DEBG, (handler,))
logger.debug('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.INFO
logger.debug('hello')
self.assertEqual(len(handler.records), 1)
def test_info(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.INFO)
logger = self._makeOne(LevelsByName.INFO, (handler,))
logger.info('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.WARN
logger.info('hello')
self.assertEqual(len(handler.records), 1)
def test_warn(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.WARN)
logger = self._makeOne(LevelsByName.WARN, (handler,))
logger.warn('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.ERRO
logger.warn('hello')
self.assertEqual(len(handler.records), 1)
def test_error(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.ERRO)
logger = self._makeOne(LevelsByName.ERRO, (handler,))
logger.error('hello')
self.assertEqual(len(handler.records), 1)
logger.level = LevelsByName.CRIT
logger.error('hello')
self.assertEqual(len(handler.records), 1)
def test_critical(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.CRIT)
logger = self._makeOne(LevelsByName.CRIT, (handler,))
logger.critical('hello')
self.assertEqual(len(handler.records), 1)
def test_close(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.CRIT)
logger = self._makeOne(LevelsByName.CRIT, (handler,))
logger.close()
self.assertEqual(handler.closed, True)
def test_getvalue(self):
from supervisor.loggers import LevelsByName
handler = DummyHandler(LevelsByName.CRIT)
logger = self._makeOne(LevelsByName.CRIT, (handler,))
self.assertRaises(NotImplementedError, logger.getvalue)
class MockSysLog(mock.Mock):
def __call__(self, *args, **kwargs):
message = args[-1]
if sys.version_info < (3, 0) and isinstance(message, unicode):
# Python 2.x raises a UnicodeEncodeError when attempting to
# transmit unicode characters that don't encode in the
# default encoding.
message.encode()
super(MockSysLog, self).__call__(*args, **kwargs)
class SyslogHandlerTests(HandlerTests, unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _getTargetClass(self):
return __import__('supervisor.loggers').loggers.SyslogHandler
def _makeOne(self):
return self._getTargetClass()()
def test_emit_record_asdict_raises(self):
class Record(object):
def asdict(self):
raise TypeError
record = Record()
handler = self._makeOne()
handled = []
handler.handleError = lambda: handled.append(True)
handler.emit(record)
self.assertEqual(handled, [True])
@mock.patch('syslog.syslog', MockSysLog())
def test_emit_ascii_noerror(self):
handler = self._makeOne()
record = self._makeLogRecord(b'hello!')
handler.emit(record)
syslog.syslog.assert_called_with('hello!')
record = self._makeLogRecord('hi!')
handler.emit(record)
syslog.syslog.assert_called_with('hi!')
@mock.patch('syslog.syslog', MockSysLog())
def test_close(self):
handler = self._makeOne()
handler.close() # no-op for syslog
@mock.patch('syslog.syslog', MockSysLog())
def test_reopen(self):
handler = self._makeOne()
handler.reopen() # no-op for syslog
if PY2:
@mock.patch('syslog.syslog', MockSysLog())
def test_emit_unicode_noerror(self):
handler = self._makeOne()
inp = as_string('fií')
record = self._makeLogRecord(inp)
handler.emit(record)
syslog.syslog.assert_called_with('fi\xc3\xad')
def test_emit_unicode_witherror(self):
handler = self._makeOne()
called = []
def fake_syslog(msg):
if not called:
called.append(msg)
raise UnicodeError
handler._syslog = fake_syslog
record = self._makeLogRecord(as_string('fií'))
handler.emit(record)
self.assertEqual(called, [as_string('fi\xc3\xad')])
else:
@mock.patch('syslog.syslog', MockSysLog())
def test_emit_unicode_noerror(self):
handler = self._makeOne()
record = self._makeLogRecord('fií')
handler.emit(record)
syslog.syslog.assert_called_with('fií')
def test_emit_unicode_witherror(self):
handler = self._makeOne()
called = []
def fake_syslog(msg):
if not called:
called.append(msg)
raise UnicodeError
handler._syslog = fake_syslog
record = self._makeLogRecord('fií')
handler.emit(record)
self.assertEqual(called, ['fií'])
class DummyHandler:
close = False
def __init__(self, level):
self.level = level
self.records = []
def emit(self, record):
self.records.append(record)
def close(self):
self.closed = True
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"[email protected]"
] | |
6547446b3422447b4e2918a7979c12e3681e4daa | 5a3547772b61f7d1b3a81f76dd1397eb92c68e7b | /lunzi/config.py | c2d1f52b43355983c9b8b2247f0da3a96e3fa5db | [
"MIT"
] | permissive | suen049/AdMRL | 483440f0ded14e471d879b300da9afbab68fbe66 | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | refs/heads/master | 2023-03-12T23:15:05.154003 | 2021-03-06T15:31:21 | 2021-03-06T15:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | import argparse
import os
import yaml
from lunzi.Logger import logger
_frozen = False
_initialized = False
def expand(path):
return os.path.abspath(os.path.expanduser(path))
class MetaFLAGS(type):
_initialized = False
def __setattr__(self, key, value):
assert not _frozen, 'Modifying FLAGS after dumping is not allowed!'
super().__setattr__(key, value)
def __getitem__(self, item):
return self.__dict__[item]
def __iter__(self):
for key, value in self.__dict__.items():
if not key.startswith('_') and not isinstance(value, classmethod):
if isinstance(value, MetaFLAGS):
value = dict(value)
yield key, value
def as_dict(self):
return dict(self)
def merge(self, other: dict):
for key in other:
assert key in self.__dict__, f"Can't find key `{key}`"
if isinstance(self[key], MetaFLAGS) and isinstance(other[key], dict):
self[key].merge(other[key])
else:
setattr(self, key, other[key])
def set_value(self, path, value):
key, *rest = path
assert key in self.__dict__, f"Can't find key `{key}`"
if not rest:
setattr(self, key, value)
else:
self[key]: MetaFLAGS
self[key].set_value(rest, value)
@staticmethod
def set_frozen():
global _frozen
_frozen = True
def freeze(self):
for key, value in self.__dict__.items():
if not key.startswith('_'):
if isinstance(value, MetaFLAGS):
value.freeze()
self.finalize()
def finalize(self):
pass
class BaseFLAGS(metaclass=MetaFLAGS):
pass
def parse(cls):
global _initialized
if _initialized:
return
parser = argparse.ArgumentParser(description='Stochastic Lower Bound Optimization')
parser.add_argument('-c', '--config', type=str, help='configuration file (YAML)', nargs='+', action='append')
parser.add_argument('-s', '--set', type=str, help='additional options', nargs='*', action='append')
args, unknown = parser.parse_known_args()
for a in unknown:
logger.info('unknown arguments: %s', a)
# logger.info('parsed arguments = %s, unknown arguments: %s', args, unknown)
if args.config:
for config in sum(args.config, []):
cls.merge(yaml.load(open(expand(config))))
else:
logger.info('no config file specified.')
if args.set:
for instruction in sum(args.set, []):
path, *value = instruction.split('=')
cls.set_value(path.split('.'), yaml.load('='.join(value)))
_initialized = True
| [
"[email protected]"
] | |
05ee44b1e5c2c238d8118d81872b9810cb17608a | cd64e9076ab81f4b2b42215289b1791c8cc3a1dd | /LogHadoopJob/py/MR/MR_KWLIVEAD_POPUP_CLICK.py | 94452bf28c31743ea62110d84f629538799e61c3 | [] | no_license | tonygodspeed/py | a8396c31fa31cfeb47ebc98dc86e3298e76d5dfa | eb38514c540b92903d53434bddc26d35bf67148d | refs/heads/master | 2020-04-02T13:26:27.029232 | 2018-10-24T10:28:35 | 2018-10-24T10:28:35 | 154,481,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | #!/usr/bin/env python
#coding=utf8
from MR_BASE import *
reload(sys)
sys.setdefaultencoding("utf-8")
str_act = "ACT_KWLIVEAD_POPUP_CLICK"
class MR_KWLIVEAD_POPUP_CLICK(mr_base_ex):
def __init__(self):
mr_base_ex.__init__(self,str_act)
self.res_type = ["s","s","s","i","i"];
self.res_name = ["VER","CHID","MAC","closeret","time"];
#self.res_spec_str = "S:1010";
mr_obj = MR_KWLIVEAD_POPUP_CLICK()
if __name__ == '__main__':
#test_str = r'49:06| [INFO]: <SRC:MUSIC_8.5.2.0_BCS16|ACT:HIFI_LOG|S:KwMusic|TYPE:ENTER_HIFI_DOWNLOAD_PAGE|CHANNEL_NAME:10001_01|PROD:MUSIC|VER:8cs_20161208.exe}|K:546529252|RESEND:0|U:92204504>(60.181.172.98)TM:1481611747'
test_str = r'<SRC:KWSHELLEXT_1.0.6.9051_MUSIC8500PT|S:1010|PROD:KWSHELLEXT|DISVER:1.0.6.9072|OS:10.0.14393.2_|PLAT:X64|VER:2.0.2.17|GID:2562|CHID:MUSIC8500PT|PN:rundll32.exe|MAC:C860009C01D8|UAC:1|ADMIN:0|MVER:MUSIC_8.5.0.0_PT|MCID:57836354|ST:1481337017|CFGVER:14|ACT:ACT_KWLIVEAD_POPUP_CLOSE||autoclose:0|closeret:2|time:5|{}|U:>(222.87.155.143)TM:1481385606'
mr_obj.LocalTest(test_str)
pass
| [
"[email protected]"
] | |
ee191ec954ac1f34e4ac8adcfa289258e03fb944 | b3f8a351f5d92469c987302661a3dcb0328de609 | /fit_2D_LeastSq/fit_2Dpoly_mpfit.py | 8712f33d9e1b3e64fe131f375ded4672311fbe7f | [
"MIT"
] | permissive | Chloe20-cyber/PythonFitting | 87412e5fd7c956f4bf0da77b0c235ad5f5059bff | 54315e336593f7f105f516766fb323662eadd5e3 | refs/heads/master | 2022-04-24T02:46:38.224828 | 2020-03-31T05:04:50 | 2020-03-31T05:04:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,033 | py | #!/usr/bin/env python
# Initial model parameters
inParms=[ {'value': 5.1,
'fixed': False,
'parname': 'amp',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x1',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x2',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x3',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y1',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y2',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y3',
'limited': [False, False]} ]
#=============================================================================#
import os, sys, shutil
import math as m
import numpy as np
import matplotlib as mpl
import pylab as pl
from mpfit import mpfit
#-----------------------------------------------------------------------------#
def main():
# Generate a noisy polynomial
# [off, x1, x2, x3, y1, y2, y3]
pIn = [2.0, 1.5, 0.1, 0.3, 1.0, 2.0, 0.05]
pIn = [1.0, 0.2, 0.0, 0.0, 0.1, 0.0, 0.0]
shape = (200, 200)
X, Y, Z, xyData = genpolydata(pIn, shape, 300, 10.2)
# Define an function to evaluate the residual
def errFn(p, fjac=None):
status = 0
# poly_surface' returns the 'rfunc' function and the X,Y data is
# inserted via argument unpacking.
return status, poly_surface(p)(*[Y, X]) - Z
# Fit the data starting from an initial guess
mp = mpfit(errFn, parinfo=inParms, quiet=False)
print()
for i in range(len(inParms)):
print("%s = %f +/- %f" % (inParms[i]['parname'],
mp.params[i],
mp.perror[i]))
p1 = mp.params
#-------------------------------------------------------------------------#
# Plot the original, fit & residual
fig = pl.figure(figsize=(18,4.3))
ax1 = fig.add_subplot(1,3,1)
cax1 = ax1.imshow(xyData, origin='lower',cmap=mpl.cm.jet)
cbar1=fig.colorbar(cax1, pad=0.0)
ax1.scatter(X, Y, c=Z, s=40, cmap=mpl.cm.jet)
ax1.set_title("Sampled Data")
ax1.set_xlim(0, shape[-1]-1)
ax1.set_ylim(0, shape[-2]-1)
ax1.set_aspect('equal')
ax2 = fig.add_subplot(1,3,2)
xyDataFit = poly_surface(p1, shape)
cax2 = ax2.imshow(xyDataFit, origin='lower', cmap=mpl.cm.jet)
cbar2=fig.colorbar(cax2, pad=0.0)
ax2.set_title("Model Fit")
ax3 = fig.add_subplot(1,3,3)
xyDataRes = xyData - xyDataFit
cax3 = ax3.imshow(xyDataRes, origin='lower', cmap=mpl.cm.jet)
cbar2=fig.colorbar(cax3, pad=0.0)
ax3.set_title("Residual")
pl.show()
#-----------------------------------------------------------------------------#
def poly_surface(params, shape=None):
p = params
def rfunc(y, x):
z = p[0] + (p[1]*x + p[2]*x**2.0 + p[3]*x**3.0 +
p[4]*y + p[5]*y**2.0 + p[6]*y**3.0)
return z
if shape is not None:
return rfunc(*np.indices(shape))
else:
return rfunc
#-----------------------------------------------------------------------------#
def genpolydata(params, shape, nSamps=300, noiseFrac=0.2):
# Generate a noisy gaussian image
xyData = poly_surface(params, shape)
xyData += (np.random.random(xyData.shape) - 0.5) * noiseFrac
# Sample the data at discrete pixels
X = np.random.random(nSamps) * xyData.shape[-1] -1
X = np.array(np.round(X), dtype='int')
Y = np.random.random(nSamps) * xyData.shape[-2] -1
Y = np.array(np.round(Y), dtype='int')
Z = xyData[Y, X]
return X, Y, Z, xyData
#-----------------------------------------------------------------------------#
main()
| [
"[email protected]"
] | |
fe75e56c1ef7300cbd5ff394f09c7970355079b6 | 51ec37fc8b633e90f699d4372e1301cf30b9d960 | /angrmanagement/ui/toolbars/toolbar_action.py | b5297339032d4abc06afc2f05896b3ca27b13d43 | [
"BSD-2-Clause"
] | permissive | angr/angr-management | b7deffdefd53a99336c8da2cd21bd17f1eb689d7 | f28bfb1c34313c74f99691d0b47de1d90ebfd4ec | refs/heads/master | 2023-09-02T11:53:13.869102 | 2023-08-31T23:38:12 | 2023-08-31T23:38:12 | 40,425,410 | 727 | 125 | BSD-2-Clause | 2023-09-11T22:09:39 | 2015-08-09T04:35:26 | Python | UTF-8 | Python | false | false | 585 | py | class ToolbarAction:
def __init__(self, icon, name, tooltip, triggered, checkable=False, shortcut=None):
self.icon = icon
self.name = name
self.tooltip = tooltip
self.triggered = triggered
self.checkable = checkable
self.shortcut = shortcut
def __hash__(self):
return hash((ToolbarAction, self.name))
def __eq__(self, other):
return isinstance(other, ToolbarAction) and self.name == other.name
class ToolbarSplitter(ToolbarAction):
def __init__(self):
super().__init__(None, None, None, None)
| [
"[email protected]"
] | |
fd30b09fe8bc3641f1b3def960bfbf914b20883d | 1d1f173d67a04b78f732aee99ef0e2d4e8284d63 | /dev/phylografter_workaround.py | c2afcd323e6a6731ab85836b191cd5292163b922 | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | rvosa/peyotl | 8767165ec85129c8f25c56a572f0bd879158aa2a | 98154af9832d18cbcb079f7e2db3b0e45893e1da | refs/heads/master | 2021-01-18T19:48:31.273061 | 2015-09-03T15:30:13 | 2015-09-03T15:30:13 | 41,867,598 | 0 | 0 | null | 2015-09-03T15:29:00 | 2015-09-03T15:29:00 | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
import sys, json, codecs
from peyotl.phylografter.nexson_workaround import workaround_phylografter_export_diffs
inpfn = sys.argv[1]
outfn = sys.argv[2]
inp = codecs.open(inpfn, mode='rU', encoding='utf-8')
obj = json.load(inp)
workaround_phylografter_export_diffs(obj, outfn) | [
"[email protected]"
] | |
ee9ca801490c9efbd0feeb7b4aef729657168eb5 | cb0bde8ab641d5e411e91477728ade090836b729 | /sdk/python/pulumi_azure_nextgen/web/latest/web_app_metadata.py | 20a633976d48076e3187f390ed68311e3b95e47e | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | rchamorro/pulumi-azure-nextgen | 7debd444063f0f9810ac0ee5fe11e7e8913b4886 | 09987cba1c466657730a23f5083aa62ec3dc8247 | refs/heads/master | 2023-03-03T09:32:59.634185 | 2021-02-10T16:13:24 | 2021-02-10T16:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,534 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['WebAppMetadata']
class WebAppMetadata(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
String dictionary resource.
Latest API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Settings.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppMetadata")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppMetadata, __self__).__init__(
'azure-nextgen:web/latest:WebAppMetadata',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppMetadata':
"""
Get an existing WebAppMetadata resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppMetadata(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Mapping[str, str]]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
b3f938c6553b63f1af5179911a9209f8d462d04d | b56eaf7a603cbb850be11dbbed2c33b954dedbcb | /distar/model/alphastar/actor_critic.py | 5c7a80848e2f26b4577ea731c2614e761e19e771 | [
"Apache-2.0"
] | permissive | LFhase/DI-star | 2887d9c5dd8bfaa629e0171504b05ac70fdc356f | 09d507c412235a2f0cf9c0b3485ec9ed15fb6421 | refs/heads/main | 2023-06-20T20:05:01.378611 | 2021-07-09T16:26:18 | 2021-07-09T16:26:18 | 384,499,311 | 1 | 0 | Apache-2.0 | 2021-07-09T16:50:29 | 2021-07-09T16:50:28 | null | UTF-8 | Python | false | false | 18,036 | py | import os.path as osp
from collections import namedtuple, OrderedDict, defaultdict
from functools import reduce
import torch
import torch.nn as nn
from collections.abc import Sequence, Mapping
from .encoder import Encoder
from .policy import Policy
from .value import ValueBaseline
from ctools.model import ValueActorCriticBase
from ctools.utils import read_config, deep_merge_dicts
from distar.envs import AlphaStarEnv
alphastar_model_default_config = read_config(osp.join(osp.dirname(__file__), "actor_critic_default_config.yaml"))
def detach_grad(data):
if isinstance(data, Sequence):
for i in range(len(data)):
data[i] = detach_grad(data[i])
elif isinstance(data, Mapping):
for k in data.keys():
data[k] = detach_grad(data[k])
elif isinstance(data, torch.Tensor):
data = data.detach()
return data
class AlphaStarActorCritic(ValueActorCriticBase):
EvalInput = namedtuple(
'EvalInput', ['map_size', 'entity_raw', 'scalar_info', 'spatial_info', 'entity_info', 'prev_state']
)
EvalOutput = namedtuple('EvalOutput', ['actions', 'logits'])
MimicOutput = namedtuple('MimicOutput', ['logits', 'next_state'])
StepInput = namedtuple('StepInput', ['home', 'away'])
StepOutput = namedtuple('StepOutput', ['actions', 'logits', 'baselines'])
CriticInput = namedtuple(
'CriticInput', [
'lstm_output_home', 'embeddings_entity_away',
'embeddings_spatial_away', 'baseline_feature_home', 'baseline_feature_away',
'score_embedding_home', 'score_embedding_away', 'cum_stat_home', 'immediate_cum_stat_home',
'immediate_cum_stat_away'
]
)
CriticOutput = namedtuple('CriticOutput', ['winloss', 'build_order', 'built_unit', 'effect', 'upgrade', 'battle'])
def __init__(self, model_config=None):
super(AlphaStarActorCritic, self).__init__()
cfg = deep_merge_dicts(alphastar_model_default_config["model"], model_config)
self.cfg = self._merge_input_dim(cfg)
self.encoder = Encoder(self.cfg.encoder)
self.policy = Policy(self.cfg.policy)
self.only_update_baseline = cfg.get('only_update_baseline', False)
if self.cfg.use_value_network:
self.value_networks = nn.ModuleDict()
self.value_cum_stat_keys = OrderedDict()
for k, v in self.cfg.value.items():
if k in self.cfg.enable_baselines:
# creating a ValueBaseline network for each baseline, to be used in _critic_forward
self.value_networks[v.name] = ValueBaseline(v.param)
# name of needed cumulative stat items
self.value_cum_stat_keys[v.name] = v.cum_stat_keys
self.freeze_module(self.cfg.freeze_targets)
def _merge_input_dim(self, cfg):
env_info = AlphaStarEnv({}).info()
cfg.encoder.obs_encoder.entity_encoder.input_dim = env_info.obs_space['entity'].shape[-1]
cfg.encoder.obs_encoder.spatial_encoder.input_dim = env_info.obs_space['spatial'].shape[
0] + cfg.encoder.scatter.output_dim
handle = cfg.encoder.obs_encoder.scalar_encoder.module
for k in handle.keys():
handle[k].input_dim = env_info.obs_space['scalar'].shape[k]
cfg.encoder.score_cumulative.input_dim = env_info.obs_space['scalar'].shape['score_cumulative']
return cfg
def freeze_module(self, freeze_targets=None):
"""
Note:
must be called after the model initialization, before the model forward
"""
if freeze_targets is None:
# if freeze_targets is not provided, try to use self.freeze_targets
if self.freeze_targets is None:
raise Exception("not provided arguments(freeze_targets)")
else:
freeze_targets = self.freeze_targets
else:
# if freeze_targets is provided, update self.freeze_targets for next usage
self.freeze_targets = freeze_targets
def get_submodule(name):
part = name.split('.')
module = self
for p in part:
module = getattr(module, p)
return module
for name in freeze_targets:
module = get_submodule(name)
module.eval()
for m in module.parameters():
m.requires_grad_(False)
# overwrite
def train(self, mode=True):
super().train(mode)
if hasattr(self, 'freeze_targets'):
self.freeze_module()
# overwrite
def mimic_single(self, inputs, **kwargs):
lstm_output, next_state, entity_embeddings, map_skip, scalar_context, spatial_info, _, _, _, _, _, _= \
self.encoder(
inputs
)
policy_inputs = self.policy.MimicInput(
inputs['actions'], inputs['entity_raw'], inputs['scalar_info']['available_actions'], lstm_output,
entity_embeddings, map_skip, scalar_context, spatial_info, inputs['entity_num'],
inputs['selected_units_num']
)
logits = self.policy(policy_inputs, mode='mimic')
return {'policy_outputs': logits, 'next_state': next_state}
# overwrite
def mimic(self, inputs, **kwargs):
self.traj = [len(b['spatial_info']) for b in inputs]
self.batch_size = len(inputs[0]['spatial_info'])
prev_state = inputs[0].pop('prev_state')
end_idx = [[i for i in inputs[j]['end_index']] for j in range(len(inputs))]
inputs = self._merge_traj(inputs)
# encoder
embedded_entity, embedded_spatial, embedded_scalar, scalar_context, baseline_feature,\
cum_stat, entity_embeddings, map_skip = self.encoder.encode_parallel_forward(inputs)
embedded_entity, embedded_spatial, embedded_scalar = [
self._split_traj(t) for t in [embedded_entity, embedded_spatial, embedded_scalar]
]
# lstm
lstm_output = []
for idx, embedding in enumerate(zip(embedded_entity, embedded_spatial, embedded_scalar)):
active_state = [i for i in range(self.batch_size) if i not in end_idx[idx]]
tmp_state = [prev_state[i] for i in active_state]
tmp_output, tmp_state = self.encoder.core_lstm(embedding[0], embedding[1], embedding[2], tmp_state)
for _idx, active_idx in enumerate(active_state):
prev_state[active_idx] = tmp_state[_idx]
lstm_output.append(tmp_output.squeeze(0))
next_state = prev_state
lstm_output = self._merge_traj(lstm_output)
# head
policy_inputs = self.policy.MimicInput(
inputs['actions'], inputs['entity_raw'], inputs['scalar_info']['available_actions'], lstm_output,
entity_embeddings, map_skip, scalar_context, inputs['spatial_info']
)
logits = self.policy(policy_inputs, mode='mimic')
return self.MimicOutput(logits, next_state)
def _merge_traj(self, data):
def merge(t):
if isinstance(t[0], torch.Tensor):
# t = torch.stack(t, dim=0)
# return t.reshape(-1, *t.shape[2:])
t = torch.cat(t, dim=0)
return t
elif isinstance(t[0], list):
return reduce(lambda x, y: x + y, t)
elif isinstance(t[0], dict):
return {k: merge([m[k] for m in t]) for k in t[0].keys()}
else:
raise TypeError(type(t[0]))
if isinstance(data, torch.Tensor):
return data.reshape(-1, *data.shape[2:])
else:
return merge(data)
def _split_traj(self, data):
assert isinstance(data, torch.Tensor)
ret = [d.unsqueeze(0) for d in torch.split(data, self.traj, 0)]
assert len(ret) == len(self.traj), 'resume data length must equal to original data'
return ret
# overwrite
def compute_action(self, inputs, **kwargs):
"""
Overview: agent evaluate(only actor)
Note:
batch size = 1
Overview: forward for agent evaluate (only actor is evaluated). batch size must be 1
Inputs:
- inputs: EvalInput namedtuple with following fields
- map_size
- entity_raw
- scalar_info
- spatial_info
- entity_info
- prev_state
Output:
- EvalOutput named dict
"""
if self.cfg.use_value_network:
assert 'away_inputs' in inputs.keys()
away_inputs = inputs['away_inputs']
away_inputs['prev_state'] = inputs['away_hidden_state']
lstm_output_away, next_state_away, entity_embeddings_away, map_skip_away, scalar_context_away, \
spatial_info_away, baseline_feature_away, cum_stat_away, score_embedding_away, \
embedded_spatial_away, embedded_entity_away, immediate_cum_stat_away \
= self.encoder(
away_inputs
)
lstm_output, next_state, entity_embeddings, map_skip, scalar_context, spatial_info, \
baseline_feature, cum_stat, score_embedding, embedded_spatial, embedded_entity, \
immediate_cum_stat = self.encoder(
inputs
)
embedded_entity_away = embedded_entity_away.reshape(-1, embedded_entity_away.shape[-1])
embedded_spatial_away = embedded_spatial_away.reshape(-1, embedded_spatial_away.shape[-1])
critic_inputs = self.CriticInput(
lstm_output, embedded_entity_away,
embedded_spatial_away, baseline_feature, baseline_feature_away, score_embedding,
score_embedding_away, cum_stat, immediate_cum_stat, immediate_cum_stat_away
)
baselines = self._critic_forward(critic_inputs)
else:
lstm_output, next_state, entity_embeddings, map_skip, scalar_context, spatial_info, _, _, _, _, _, _ \
= self.encoder(
inputs
)
baselines = None
policy_inputs = self.policy.EvaluateInput(
inputs['entity_raw'],
inputs['scalar_info']['available_actions'],
lstm_output,
entity_embeddings,
map_skip,
scalar_context,
spatial_info,
inputs['entity_num'],
)
actions, logits = self.policy(policy_inputs, mode='evaluate', **kwargs)
return self.EvalOutput(actions, logits), next_state, baselines
# overwrite
def step(self, inputs, **kwargs):
"""
Overview: forward for training (actor and critic)
Inputs:
- inputs: StepInput namedtuple with observations
- obs_home: observation from my self as EvalInput
- obs_away: observation from the rival as EvalInput
Outputs:
- ret: StepOutput namedtuple containing
- actions: output from the model
- baselines: critic values
- next_state_home
- next_state_away
"""
# encoder(home and away)
prev_state = inputs['prev_state']
inputs['obs_home']['prev_state'] = [p['home'] for p in prev_state]
inputs['obs_away']['prev_state'] = [p['away'] for p in prev_state]
lstm_output_home, \
next_state_home, \
entity_embeddings, \
map_skip, \
scalar_context, \
spatial_info, \
baseline_feature_home, \
cum_stat_home, \
score_embedding_home = self.encoder(
inputs['obs_home']
)
lstm_output_away, next_state_away, _, _, _, _, baseline_feature_away, cum_stat_away, \
score_embedding_away = self.encoder(inputs['obs_away'])
# value
critic_inputs = self.CriticInput(
lstm_output_home, lstm_output_away, baseline_feature_home, baseline_feature_away, score_embedding_home,
score_embedding_away, cum_stat_home, cum_stat_away
)
baselines = self._critic_forward(critic_inputs)
# policy
policy_inputs = self.policy.EvaluateInput(
inputs['obs_home']['entity_raw'], inputs['obs_home']['scalar_info']['available_actions'], lstm_output_home,
entity_embeddings, map_skip, scalar_context, spatial_info
)
actions, logits = self.policy(policy_inputs, mode='evaluate', **kwargs)
next_state = [{'home': h, 'away': a} for h, a in zip(next_state_home, next_state_away)]
return self.StepOutput(actions, logits, baselines), next_state
def compute_action_value(self, inputs, **kwargs):
batch_size = inputs['batch_size']
traj_len = inputs['traj_len']
prev_state = inputs['prev_state']
prev_state = [p['home'] for p in prev_state]
# merge obs_home and obs_away together, add last obs, so trajectory length added one more
embedded_entity, embedded_spatial, embedded_scalar, scalar_context, baseline_feature, \
cum_stat, entity_embeddings, map_skip, score_embedding, immediate_cum_stat = self.encoder.encode_parallel_forward(inputs)
embeddings = [embedded_entity, embedded_spatial, embedded_scalar]
embeddings_entity_away = embeddings[0][(traj_len + 1) * batch_size:]
embeddings_spatial_away = embeddings[1][(traj_len + 1) * batch_size:]
embeddings_home = [e[:(traj_len + 1) * batch_size].view(-1, batch_size, e.shape[-1]) for e in embeddings]
# go through core lstm
lstm_output_home, next_state = self.encoder.core_lstm(*embeddings_home, prev_state) # traj + 1, 2*b, -1
# split embeddings to home and away
lstm_output_home = lstm_output_home.reshape(-1, lstm_output_home.shape[-1]) # (traj + 1) * b, -1
baseline_feature_home, baseline_feature_away = torch.chunk(baseline_feature, 2, dim=0)
score_embedding_home, score_embedding_away = torch.chunk(score_embedding, 2, dim=0)
cum_stat_home, cum_stat_away = dict(), dict()
for k, v in cum_stat.items():
cum_stat_home[k], cum_stat_away[k] = torch.chunk(v, 2, dim=0)
immediate_cum_stat_home, immediate_cum_stat_away = dict(), dict()
for k, v in immediate_cum_stat.items():
immediate_cum_stat_home[k], immediate_cum_stat_away[k] = torch.chunk(v, 2, dim=0)
# value
critic_input = [lstm_output_home, embeddings_entity_away, embeddings_spatial_away, baseline_feature_home,
baseline_feature_away, score_embedding_home, score_embedding_away, cum_stat_home,
immediate_cum_stat_home, immediate_cum_stat_away]
if self.only_update_baseline:
critic_input = detach_grad(critic_input)
critic_inputs = self.CriticInput(*critic_input)
baselines = self._critic_forward(critic_inputs, parallel=True, traj_len=traj_len + 1, batch_size=batch_size)
# get home embedding for policy
home_size = traj_len * batch_size
map_skip = [i[:home_size] for i in map_skip]
actions = {k: v[:home_size] for k, v in inputs['actions'].items()}
entity_raw = {k: v[:home_size] for k, v in inputs['entity_raw'].items()}
entity_num = inputs['entity_num'][:home_size]
max_entity_num = entity_num.max()
selected_units_num = inputs['selected_units_num'][:home_size]
entity_embeddings_home = entity_embeddings[:home_size, :max_entity_num]
policy_inputs = self.policy.MimicInput(
actions, entity_raw, inputs['scalar_info']['available_actions'][:home_size], lstm_output_home[:home_size],
entity_embeddings_home, map_skip, scalar_context[:home_size], inputs['spatial_info'][:home_size],
entity_num, selected_units_num
)
logits = self.policy(policy_inputs, mode='mimic')
mid = len(next_state) // 2
next_state = list(zip(*[next_state[:mid], next_state[mid:]]))
next_state = [{'home': n[0], 'away': n[1]} for n in next_state]
return {'policy_outputs': logits, 'baselines': baselines, 'next_state': next_state}
# overwrite
def _critic_forward(self, inputs, parallel=False, traj_len=0, batch_size=0):
"""
Overview: Evaluate value network on each baseline
"""
def select_item(data, key):
# Input: data:dict key:list Returns: ret:list
# filter data and return a list of values with keys in key
ret = []
for k, v in data.items():
if k in key:
ret.append(v)
return ret
cum_stat_home, immediate_cum_stat_home = inputs.cum_stat_home, inputs.immediate_cum_stat_home
immediate_cum_stat_away = inputs.immediate_cum_stat_away
# 'lstm_output_home', 'lstm_output_away', 'baseline_feature_home', 'baseline_feature_away'
# are torch.Tensors and are shared across all baselines
same_part = torch.cat(inputs[:7], dim=1)
ret = {k: None for k in self.CriticOutput._fields}
for (name_n, m), (name_c, key) in zip(self.value_networks.items(), self.value_cum_stat_keys.items()):
assert name_n == name_c
cum_stat_home_subset = select_item(cum_stat_home, key)
immediate_cum_stat_home_subset = select_item(immediate_cum_stat_home, key)
immediate_cum_stat_away_subset = select_item(immediate_cum_stat_away, key)
inputs = torch.cat(
[same_part] + cum_stat_home_subset + immediate_cum_stat_home_subset + immediate_cum_stat_away_subset,
dim=1)
# apply the value network to inputs
ret[name_n] = m(inputs)
if parallel:
ret[name_n] = ret[name_n].view(traj_len, batch_size)
return self.CriticOutput(**ret)
| [
"[email protected]"
] | |
e6ba30f501ef7ca6789ce6408a5692531d2ee3fa | 14a913fce4b538b22f28409645cd6abe3455808f | /bigtable/quickstart_happybase/main_test.py | 5f08c30b8b7bf068b92a8642ce32bef179ddf70a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iamLoi/Python-Random-Number-Generator | 8da7dbd37cb13a01232c8ed49b9df35a99c63d73 | 7579e8b15130802aaf519979e475c6c75c403eda | refs/heads/master | 2022-08-29T19:05:32.649931 | 2019-09-14T14:48:58 | 2019-09-14T14:48:58 | 208,454,877 | 2 | 1 | Apache-2.0 | 2022-08-05T21:57:49 | 2019-09-14T14:51:05 | Python | UTF-8 | Python | false | false | 873 | py | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from main import main
PROJECT = os.environ['GCLOUD_PROJECT']
BIGTABLE_CLUSTER = os.environ['BIGTABLE_CLUSTER']
TABLE_NAME = 'my-table'
def test_main(capsys):
main(PROJECT, BIGTABLE_CLUSTER, TABLE_NAME)
out, _ = capsys.readouterr()
assert '"cf1:c1": "test-value"' in out
| [
"[email protected]"
] | |
e7648cbf466cc94ea89fc72b4f47f5aa9e8ef1c9 | 301ff8012353185db7d1ad76f05e1b7972306f19 | /pickup/pickup2ground.py | 6a81bcf215cca8cf7db2c4c578143fa76cc5bcaf | [] | no_license | msyriac/tenki | 36cc9e7cee8f5bbc688ac0c946e0cc5c3be1df2c | b727f0f40f00a431679fea41e5dd693f07cc496b | refs/heads/master | 2021-01-22T07:32:03.015010 | 2017-08-23T14:15:28 | 2017-08-23T14:15:28 | 102,306,661 | 0 | 0 | null | 2017-09-04T01:36:40 | 2017-09-04T01:36:40 | null | UTF-8 | Python | false | false | 2,095 | py | # Project a single pickup map into horizontal coordinates
import numpy as np, os
from enlib import enmap, utils, config, array_ops
from enact import actdata, filedb
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("pickup_map")
parser.add_argument("template")
parser.add_argument("sel_repr")
parser.add_argument("el", type=float)
parser.add_argument("ofile")
args = parser.parse_args()
filedb.init()
nrow, ncol = 33, 32
# Read our template, which represents the output horizontal coordinates
template = enmap.read_map(args.template)
# Use our representative selector to get focalplane offsets and polangles
entry = filedb.data[filedb.scans[args.sel_repr][0]]
d = actdata.read(entry, ["boresight", "point_offsets", "polangle"])
d.boresight[2] = args.el # In degrees, calibrated in next step
d = actdata.calibrate(d, exclude=["autocut"])
def reorder(map, nrow, ncol, dets):
return enmap.samewcs(map[utils.transpose_inds(dets,nrow,ncol)],map)
# Read our map, and give each row a weight
pickup = enmap.read_map(args.pickup_map)
pickup = reorder(pickup, nrow, ncol, d.dets)
weight = np.median((pickup[:,1:]-pickup[:,:-1])**2,-1)
weight[weight>0] = 1/weight[weight>0]
# Find the output pixel for each input pixel
baz = pickup[:1].posmap()[1,0]
bel = baz*0 + args.el * utils.degree
ipoint = np.array([baz,bel])
opoint = ipoint[:,None,:] + d.point_offset.T[:,:,None]
opix = template.sky2pix(opoint[::-1]).astype(int) # [{y,x},ndet,naz]
opix = np.rollaxis(opix, 1) # [ndet,{y,x},naz]
omap = enmap.zeros((3,)+template.shape[-2:], template.wcs)
odiv = enmap.zeros((3,3)+template.shape[-2:], template.wcs)
for det in range(d.ndet):
omap += utils.bin_multi(opix[det], template.shape[-2:], weight[det]*pickup[det]) * d.det_comps[det,:,None,None]
odiv += utils.bin_multi(opix[det], template.shape[-2:], weight[det]) * d.det_comps[det,:,None,None,None] * d.det_comps[det,None,:,None,None]
odiv = enmap.samewcs(array_ops.svdpow(odiv, -1, axes=[0,1]), odiv)
omap = enmap.samewcs(array_ops.matmul(odiv, omap, axes=[0,1]), omap)
enmap.write_map(args.ofile, omap)
| [
"[email protected]"
] | |
bfaae3d582bc3e9ec14ee2a57de5826d6e5818a5 | 907b3bbd44c95be1542a36feaadb6a71b724579f | /files/usr/tmp/pip-build-nyxh8e0k/google-cloud-pubsub/unit_tests/test_topic.py | 0a7e4cede8d7590340ebd96d98b82704b158c62c | [
"Apache-2.0"
] | permissive | vo0doO/com.termux | 2d8f536c1a5dbd7a091be0baf181e51f235fb941 | c97dd7b906e5ef3ec157581fd0bcadd3e3fc220e | refs/heads/master | 2020-12-24T09:40:30.612130 | 2016-11-21T07:47:25 | 2016-11-21T07:47:25 | 73,282,539 | 2 | 2 | null | 2020-07-24T21:33:03 | 2016-11-09T12:33:01 | Python | UTF-8 | Python | false | false | 32,273 | py | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestTopic(unittest.TestCase):
PROJECT = 'PROJECT'
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
@staticmethod
def _get_target_class():
from google.cloud.pubsub.topic import Topic
return Topic
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_w_explicit_timestamp(self):
client = _Client(project=self.PROJECT)
topic = self._make_one(self.TOPIC_NAME,
client=client,
timestamp_messages=True)
self.assertEqual(topic.name, self.TOPIC_NAME)
self.assertEqual(topic.project, self.PROJECT)
self.assertEqual(topic.full_name, self.TOPIC_PATH)
self.assertTrue(topic.timestamp_messages)
def test_from_api_repr(self):
client = _Client(project=self.PROJECT)
resource = {'name': self.TOPIC_PATH}
klass = self._get_target_class()
topic = klass.from_api_repr(resource, client=client)
self.assertEqual(topic.name, self.TOPIC_NAME)
self.assertIs(topic._client, client)
self.assertEqual(topic.project, self.PROJECT)
self.assertEqual(topic.full_name, self.TOPIC_PATH)
def test_from_api_repr_with_bad_client(self):
PROJECT1 = 'PROJECT1'
PROJECT2 = 'PROJECT2'
client = _Client(project=PROJECT1)
PATH = 'projects/%s/topics/%s' % (PROJECT2, self.TOPIC_NAME)
resource = {'name': PATH}
klass = self._get_target_class()
self.assertRaises(ValueError, klass.from_api_repr,
resource, client=client)
def test_create_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_create_response = {'name': self.TOPIC_PATH}
topic = self._make_one(self.TOPIC_NAME, client=client)
topic.create()
self.assertEqual(api._topic_created, self.TOPIC_PATH)
def test_create_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_create_response = {'name': self.TOPIC_PATH}
topic = self._make_one(self.TOPIC_NAME, client=client1)
topic.create(client=client2)
self.assertEqual(api._topic_created, self.TOPIC_PATH)
def test_exists_miss_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
topic = self._make_one(self.TOPIC_NAME, client=client)
self.assertFalse(topic.exists())
self.assertEqual(api._topic_got, self.TOPIC_PATH)
def test_exists_hit_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_get_response = {'name': self.TOPIC_PATH}
topic = self._make_one(self.TOPIC_NAME, client=client1)
self.assertTrue(topic.exists(client=client2))
self.assertEqual(api._topic_got, self.TOPIC_PATH)
def test_delete_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_delete_response = {}
topic = self._make_one(self.TOPIC_NAME, client=client)
topic.delete()
self.assertEqual(api._topic_deleted, self.TOPIC_PATH)
def test_delete_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_delete_response = {}
topic = self._make_one(self.TOPIC_NAME, client=client1)
topic.delete(client=client2)
self.assertEqual(api._topic_deleted, self.TOPIC_PATH)
def test_publish_single_bytes_wo_attrs_w_bound_client(self):
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client)
msgid = topic.publish(PAYLOAD)
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_single_bytes_wo_attrs_w_add_timestamp_alt_client(self):
import datetime
import mock
from google.cloud._helpers import _RFC3339_MICROS
NOW = datetime.datetime.utcnow()
def _utcnow():
return NOW
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
MESSAGE = {
'data': PAYLOAD,
'attributes': {'timestamp': NOW.strftime(_RFC3339_MICROS)},
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client1,
timestamp_messages=True)
with mock.patch('google.cloud.pubsub.topic._NOW', new=_utcnow):
msgid = topic.publish(PAYLOAD, client=client2)
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_single_bytes_w_add_timestamp_w_ts_in_attrs(self):
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
OVERRIDE = '2015-04-10T16:46:22.868399Z'
MESSAGE = {'data': PAYLOAD,
'attributes': {'timestamp': OVERRIDE}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client,
timestamp_messages=True)
msgid = topic.publish(PAYLOAD, timestamp=OVERRIDE)
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_single_w_attrs(self):
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
MESSAGE = {'data': PAYLOAD,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client)
msgid = topic.publish(PAYLOAD, attr1='value1', attr2='value2')
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_with_gax(self):
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client)
msgid = topic.publish(PAYLOAD)
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_without_gax(self):
PAYLOAD = 'This is the message text'
MSGID = 'DEADBEEF'
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID]
topic = self._make_one(self.TOPIC_NAME, client=client)
msgid = topic.publish(PAYLOAD)
self.assertEqual(msgid, MSGID)
self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE]))
def test_publish_multiple_w_bound_client(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MSGID1 = 'DEADBEEF'
MSGID2 = 'BEADCAFE'
MESSAGE1 = {'data': PAYLOAD1, 'attributes': {}}
MESSAGE2 = {'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID1, MSGID2]
topic = self._make_one(self.TOPIC_NAME, client=client)
with topic.batch() as batch:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
self.assertEqual(list(batch), [MSGID1, MSGID2])
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._topic_published,
(self.TOPIC_PATH, [MESSAGE1, MESSAGE2]))
def test_publish_w_no_messages(self):
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = []
topic = self._make_one(self.TOPIC_NAME, client=client)
with topic.batch() as batch:
pass
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._api_called, 0)
def test_publish_multiple_w_alternate_client(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MSGID1 = 'DEADBEEF'
MSGID2 = 'BEADCAFE'
MESSAGE1 = {'data': PAYLOAD1, 'attributes': {}}
MESSAGE2 = {
'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'},
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID1, MSGID2]
topic = self._make_one(self.TOPIC_NAME, client=client1)
with topic.batch(client=client2) as batch:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
self.assertEqual(list(batch), [MSGID1, MSGID2])
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._topic_published,
(self.TOPIC_PATH, [MESSAGE1, MESSAGE2]))
def test_publish_multiple_error(self):
PAYLOAD1 = b'This is the first message text'
PAYLOAD2 = b'This is the second message text'
client = _Client(project=self.PROJECT)
api = client.publisher_api = _FauxPublisherAPI()
topic = self._make_one(self.TOPIC_NAME, client=client)
try:
with topic.batch() as batch:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
raise _Bugout()
except _Bugout:
pass
self.assertEqual(list(batch), [])
self.assertEqual(getattr(api, '_topic_published', self), self)
def test_subscription(self):
from google.cloud.pubsub.subscription import Subscription
client = _Client(project=self.PROJECT)
topic = self._make_one(self.TOPIC_NAME, client=client)
SUBSCRIPTION_NAME = 'subscription_name'
subscription = topic.subscription(SUBSCRIPTION_NAME)
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscription.name, SUBSCRIPTION_NAME)
self.assertIs(subscription.topic, topic)
def test_list_subscriptions_no_paging(self):
import six
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.subscription import Subscription
client = Client(project=self.PROJECT, credentials=object(),
use_gax=False)
SUB_NAME_1 = 'subscription_1'
SUB_PATH_1 = 'projects/%s/subscriptions/%s' % (
self.PROJECT, SUB_NAME_1)
SUB_NAME_2 = 'subscription_2'
SUB_PATH_2 = 'projects/%s/subscriptions/%s' % (
self.PROJECT, SUB_NAME_2)
SUBS_LIST = [SUB_PATH_1, SUB_PATH_2]
TOKEN = 'TOKEN'
returned = {
'subscriptions': SUBS_LIST,
'nextPageToken': TOKEN,
}
client._connection = _Connection(returned)
topic = self._make_one(self.TOPIC_NAME, client=client)
iterator = topic.list_subscriptions()
page = six.next(iterator.pages)
subscriptions = list(page)
next_page_token = iterator.next_page_token
self.assertEqual(len(subscriptions), 2)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscriptions[0].name, SUB_NAME_1)
self.assertIs(subscription.topic, topic)
subscription = subscriptions[1]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscriptions[1].name, SUB_NAME_2)
self.assertIs(subscription.topic, topic)
self.assertEqual(next_page_token, TOKEN)
# Verify the mock.
called_with = client._connection._called_with
self.assertEqual(len(called_with), 3)
self.assertEqual(called_with['method'], 'GET')
path = '/%s/subscriptions' % (self.TOPIC_PATH,)
self.assertEqual(called_with['path'], path)
self.assertEqual(called_with['query_params'], {})
def test_list_subscriptions_with_paging(self):
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.subscription import Subscription
client = Client(project=self.PROJECT, credentials=object(),
use_gax=False)
SUB_NAME_1 = 'subscription_1'
SUB_PATH_1 = 'projects/%s/subscriptions/%s' % (
self.PROJECT, SUB_NAME_1)
SUB_NAME_2 = 'subscription_2'
SUB_PATH_2 = 'projects/%s/subscriptions/%s' % (
self.PROJECT, SUB_NAME_2)
SUBS_LIST = [SUB_PATH_1, SUB_PATH_2]
PAGE_SIZE = 10
TOKEN = 'TOKEN'
returned = {
'subscriptions': SUBS_LIST,
}
client._connection = _Connection(returned)
topic = self._make_one(self.TOPIC_NAME, client=client)
iterator = topic.list_subscriptions(
page_size=PAGE_SIZE, page_token=TOKEN)
subscriptions = list(iterator)
next_page_token = iterator.next_page_token
self.assertEqual(len(subscriptions), 2)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscriptions[0].name, SUB_NAME_1)
self.assertIs(subscription.topic, topic)
subscription = subscriptions[1]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscriptions[1].name, SUB_NAME_2)
self.assertIs(subscription.topic, topic)
self.assertIsNone(next_page_token)
# Verify the mock.
called_with = client._connection._called_with
self.assertEqual(len(called_with), 3)
self.assertEqual(called_with['method'], 'GET')
path = '/%s/subscriptions' % (self.TOPIC_PATH,)
self.assertEqual(called_with['path'], path)
self.assertEqual(called_with['query_params'],
{'pageSize': PAGE_SIZE, 'pageToken': TOKEN})
def test_list_subscriptions_missing_key(self):
from google.cloud.pubsub.client import Client
client = Client(project=self.PROJECT, credentials=object(),
use_gax=False)
client._connection = _Connection({})
topic = self._make_one(self.TOPIC_NAME, client=client)
iterator = topic.list_subscriptions()
subscriptions = list(iterator)
next_page_token = iterator.next_page_token
self.assertEqual(len(subscriptions), 0)
self.assertIsNone(next_page_token)
# Verify the mock.
called_with = client._connection._called_with
self.assertEqual(len(called_with), 3)
self.assertEqual(called_with['method'], 'GET')
path = '/%s/subscriptions' % (self.TOPIC_PATH,)
self.assertEqual(called_with['path'], path)
self.assertEqual(called_with['query_params'], {})
def test_get_iam_policy_w_bound_client(self):
from google.cloud.pubsub.iam import (
PUBSUB_ADMIN_ROLE,
PUBSUB_EDITOR_ROLE,
PUBSUB_VIEWER_ROLE,
PUBSUB_PUBLISHER_ROLE,
PUBSUB_SUBSCRIBER_ROLE,
)
OWNER1 = 'user:[email protected]'
OWNER2 = 'group:[email protected]'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:[email protected]'
VIEWER1 = 'serviceAccount:[email protected]'
VIEWER2 = 'user:[email protected]'
PUBLISHER = 'user:[email protected]'
SUBSCRIBER = 'serviceAccount:[email protected]'
POLICY = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': PUBSUB_ADMIN_ROLE, 'members': [OWNER1, OWNER2]},
{'role': PUBSUB_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': PUBSUB_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
{'role': PUBSUB_PUBLISHER_ROLE, 'members': [PUBLISHER]},
{'role': PUBSUB_SUBSCRIBER_ROLE, 'members': [SUBSCRIBER]},
],
}
client = _Client(project=self.PROJECT)
api = client.iam_policy_api = _FauxIAMPolicy()
api._get_iam_policy_response = POLICY
topic = self._make_one(self.TOPIC_NAME, client=client)
policy = topic.get_iam_policy()
self.assertEqual(policy.etag, 'DEADBEEF')
self.assertEqual(policy.version, 17)
self.assertEqual(sorted(policy.owners), [OWNER2, OWNER1])
self.assertEqual(sorted(policy.editors), [EDITOR1, EDITOR2])
self.assertEqual(sorted(policy.viewers), [VIEWER1, VIEWER2])
self.assertEqual(sorted(policy.publishers), [PUBLISHER])
self.assertEqual(sorted(policy.subscribers), [SUBSCRIBER])
self.assertEqual(api._got_iam_policy, self.TOPIC_PATH)
def test_get_iam_policy_w_alternate_client(self):
POLICY = {
'etag': 'ACAB',
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.iam_policy_api = _FauxIAMPolicy()
api._get_iam_policy_response = POLICY
topic = self._make_one(self.TOPIC_NAME, client=client1)
policy = topic.get_iam_policy(client=client2)
self.assertEqual(policy.etag, 'ACAB')
self.assertIsNone(policy.version)
self.assertEqual(sorted(policy.owners), [])
self.assertEqual(sorted(policy.editors), [])
self.assertEqual(sorted(policy.viewers), [])
self.assertEqual(api._got_iam_policy, self.TOPIC_PATH)
def test_set_iam_policy_w_bound_client(self):
from google.cloud.pubsub.iam import Policy
from google.cloud.pubsub.iam import (
PUBSUB_ADMIN_ROLE,
PUBSUB_EDITOR_ROLE,
PUBSUB_VIEWER_ROLE,
PUBSUB_PUBLISHER_ROLE,
PUBSUB_SUBSCRIBER_ROLE,
)
OWNER1 = 'group:[email protected]'
OWNER2 = 'user:[email protected]'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:[email protected]'
VIEWER1 = 'serviceAccount:[email protected]'
VIEWER2 = 'user:[email protected]'
PUBLISHER = 'user:[email protected]'
SUBSCRIBER = 'serviceAccount:[email protected]'
POLICY = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': PUBSUB_ADMIN_ROLE,
'members': [OWNER1, OWNER2]},
{'role': PUBSUB_EDITOR_ROLE,
'members': [EDITOR1, EDITOR2]},
{'role': PUBSUB_VIEWER_ROLE,
'members': [VIEWER1, VIEWER2]},
{'role': PUBSUB_PUBLISHER_ROLE,
'members': [PUBLISHER]},
{'role': PUBSUB_SUBSCRIBER_ROLE,
'members': [SUBSCRIBER]},
],
}
RESPONSE = POLICY.copy()
RESPONSE['etag'] = 'ABACABAF'
RESPONSE['version'] = 18
client = _Client(project=self.PROJECT)
api = client.iam_policy_api = _FauxIAMPolicy()
api._set_iam_policy_response = RESPONSE
topic = self._make_one(self.TOPIC_NAME, client=client)
policy = Policy('DEADBEEF', 17)
policy.owners.add(OWNER1)
policy.owners.add(OWNER2)
policy.editors.add(EDITOR1)
policy.editors.add(EDITOR2)
policy.viewers.add(VIEWER1)
policy.viewers.add(VIEWER2)
policy.publishers.add(PUBLISHER)
policy.subscribers.add(SUBSCRIBER)
new_policy = topic.set_iam_policy(policy)
self.assertEqual(new_policy.etag, 'ABACABAF')
self.assertEqual(new_policy.version, 18)
self.assertEqual(sorted(new_policy.owners), [OWNER1, OWNER2])
self.assertEqual(sorted(new_policy.editors), [EDITOR1, EDITOR2])
self.assertEqual(sorted(new_policy.viewers), [VIEWER1, VIEWER2])
self.assertEqual(sorted(new_policy.publishers), [PUBLISHER])
self.assertEqual(sorted(new_policy.subscribers), [SUBSCRIBER])
self.assertEqual(api._set_iam_policy, (self.TOPIC_PATH, POLICY))
def test_set_iam_policy_w_alternate_client(self):
from google.cloud.pubsub.iam import Policy
RESPONSE = {'etag': 'ACAB'}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.iam_policy_api = _FauxIAMPolicy()
api._set_iam_policy_response = RESPONSE
topic = self._make_one(self.TOPIC_NAME, client=client1)
policy = Policy()
new_policy = topic.set_iam_policy(policy, client=client2)
self.assertEqual(new_policy.etag, 'ACAB')
self.assertIsNone(new_policy.version)
self.assertEqual(sorted(new_policy.owners), [])
self.assertEqual(sorted(new_policy.editors), [])
self.assertEqual(sorted(new_policy.viewers), [])
self.assertEqual(api._set_iam_policy, (self.TOPIC_PATH, {}))
def test_check_iam_permissions_w_bound_client(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
ROLES = [VIEWER_ROLE, EDITOR_ROLE, OWNER_ROLE]
client = _Client(project=self.PROJECT)
api = client.iam_policy_api = _FauxIAMPolicy()
api._test_iam_permissions_response = ROLES[:-1]
topic = self._make_one(self.TOPIC_NAME, client=client)
allowed = topic.check_iam_permissions(ROLES)
self.assertEqual(allowed, ROLES[:-1])
self.assertEqual(api._tested_iam_permissions,
(self.TOPIC_PATH, ROLES))
def test_check_iam_permissions_w_alternate_client(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
ROLES = [VIEWER_ROLE, EDITOR_ROLE, OWNER_ROLE]
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.iam_policy_api = _FauxIAMPolicy()
api._test_iam_permissions_response = []
topic = self._make_one(self.TOPIC_NAME, client=client1)
allowed = topic.check_iam_permissions(ROLES, client=client2)
self.assertEqual(len(allowed), 0)
self.assertEqual(api._tested_iam_permissions,
(self.TOPIC_PATH, ROLES))
class TestBatch(unittest.TestCase):
PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.pubsub.topic import Batch
return Batch
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_ctor_defaults(self):
topic = _Topic()
client = _Client(project=self.PROJECT)
batch = self._make_one(topic, client)
self.assertIs(batch.topic, topic)
self.assertIs(batch.client, client)
self.assertEqual(len(batch.messages), 0)
self.assertEqual(len(batch.message_ids), 0)
def test___iter___empty(self):
topic = _Topic()
client = object()
batch = self._make_one(topic, client)
self.assertEqual(list(batch), [])
def test___iter___non_empty(self):
topic = _Topic()
client = object()
batch = self._make_one(topic, client)
batch.message_ids[:] = ['ONE', 'TWO', 'THREE']
self.assertEqual(list(batch), ['ONE', 'TWO', 'THREE'])
def test_publish_bytes_wo_attrs(self):
PAYLOAD = 'This is the message text'
MESSAGE = {'data': PAYLOAD,
'attributes': {}}
client = _Client(project=self.PROJECT)
topic = _Topic()
batch = self._make_one(topic, client=client)
batch.publish(PAYLOAD)
self.assertEqual(batch.messages, [MESSAGE])
def test_publish_bytes_w_add_timestamp(self):
PAYLOAD = 'This is the message text'
MESSAGE = {'data': PAYLOAD,
'attributes': {'timestamp': 'TIMESTAMP'}}
client = _Client(project=self.PROJECT)
topic = _Topic(timestamp_messages=True)
batch = self._make_one(topic, client=client)
batch.publish(PAYLOAD)
self.assertEqual(batch.messages, [MESSAGE])
def test_commit_w_bound_client(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MSGID1 = 'DEADBEEF'
MSGID2 = 'BEADCAFE'
MESSAGE1 = {'data': PAYLOAD1,
'attributes': {}}
MESSAGE2 = {'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client = _Client(project='PROJECT')
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID1, MSGID2]
topic = _Topic()
batch = self._make_one(topic, client=client)
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
batch.commit()
self.assertEqual(list(batch), [MSGID1, MSGID2])
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._topic_published,
(topic.full_name, [MESSAGE1, MESSAGE2]))
def test_commit_w_alternate_client(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MSGID1 = 'DEADBEEF'
MSGID2 = 'BEADCAFE'
MESSAGE1 = {'data': PAYLOAD1, 'attributes': {}}
MESSAGE2 = {'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client1 = _Client(project='PROJECT')
client2 = _Client(project='PROJECT')
api = client2.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID1, MSGID2]
topic = _Topic()
batch = self._make_one(topic, client=client1)
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
batch.commit(client=client2)
self.assertEqual(list(batch), [MSGID1, MSGID2])
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._topic_published,
(topic.full_name, [MESSAGE1, MESSAGE2]))
def test_context_mgr_success(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MSGID1 = 'DEADBEEF'
MSGID2 = 'BEADCAFE'
MESSAGE1 = {'data': PAYLOAD1, 'attributes': {}}
MESSAGE2 = {'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client = _Client(project='PROJECT')
api = client.publisher_api = _FauxPublisherAPI()
api._topic_publish_response = [MSGID1, MSGID2]
topic = _Topic()
batch = self._make_one(topic, client=client)
with batch as other:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
self.assertIs(other, batch)
self.assertEqual(list(batch), [MSGID1, MSGID2])
self.assertEqual(list(batch.messages), [])
self.assertEqual(api._topic_published,
(topic.full_name, [MESSAGE1, MESSAGE2]))
def test_context_mgr_failure(self):
PAYLOAD1 = 'This is the first message text'
PAYLOAD2 = 'This is the second message text'
MESSAGE1 = {'data': PAYLOAD1, 'attributes': {}}
MESSAGE2 = {'data': PAYLOAD2,
'attributes': {'attr1': 'value1', 'attr2': 'value2'}}
client = _Client(project='PROJECT')
api = client.publisher_api = _FauxPublisherAPI()
topic = _Topic()
batch = self._make_one(topic, client=client)
try:
with batch as other:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, attr1='value1', attr2='value2')
raise _Bugout()
except _Bugout:
pass
self.assertIs(other, batch)
self.assertEqual(list(batch), [])
self.assertEqual(list(batch.messages), [MESSAGE1, MESSAGE2])
self.assertEqual(getattr(api, '_topic_published', self), self)
class _FauxPublisherAPI(object):
_api_called = 0
def topic_create(self, topic_path):
self._topic_created = topic_path
return self._topic_create_response
def topic_get(self, topic_path):
from google.cloud.exceptions import NotFound
self._topic_got = topic_path
try:
return self._topic_get_response
except AttributeError:
raise NotFound(topic_path)
def topic_delete(self, topic_path):
self._topic_deleted = topic_path
return self._topic_delete_response
def topic_publish(self, topic_path, messages):
self._topic_published = topic_path, messages
self._api_called += 1
return self._topic_publish_response
class _FauxIAMPolicy(object):
def get_iam_policy(self, target_path):
self._got_iam_policy = target_path
return self._get_iam_policy_response
def set_iam_policy(self, target_path, policy):
self._set_iam_policy = target_path, policy
return self._set_iam_policy_response
def test_iam_permissions(self, target_path, permissions):
self._tested_iam_permissions = target_path, permissions
return self._test_iam_permissions_response
class _Topic(object):
def __init__(self, name="NAME", project="PROJECT",
timestamp_messages=False):
self.full_name = 'projects/%s/topics/%s' % (project, name)
self.path = '/%s' % (self.full_name,)
self.timestamp_messages = timestamp_messages
def _timestamp_message(self, attrs):
if self.timestamp_messages:
attrs['timestamp'] = 'TIMESTAMP'
class _Client(object):
connection = None
def __init__(self, project):
self.project = project
class _Bugout(Exception):
pass
class _Connection(object):
_called_with = None
def __init__(self, *responses):
self._responses = responses
def api_request(self, **kw):
self._called_with = kw
response, self._responses = self._responses[0], self._responses[1:]
return response
| [
"[email protected]"
] | |
db082c6ccc44d72a76219a522cda7b5f8c59f543 | b9acbd83aca1f147db64620127a5ea2518910265 | /calvin-base-master/calvin/runtime/north/control_apis/runtime_api.py | 9d6fb6a3278a678546f6b9dea69e960f54ec8238 | [
"Apache-2.0"
] | permissive | skyjan0428/WorkSpace | c1484bde0e4a79a02486f45c518113ba4e3072bf | 837be7be75f06b6823df1cb63128506d9ce0016e | refs/heads/main | 2023-02-27T11:15:56.974778 | 2021-01-31T16:53:42 | 2021-01-31T16:53:42 | 321,545,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,959 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from calvin.requests import calvinresponse
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.runtime.south.async import async
from routes import handler, register
from authentication import authentication_decorator
from calvin.runtime.north.calvinsys import get_calvinsys
from calvin.runtime.north.calvinlib import get_calvinlib
_log = get_logger(__name__)
#Can't be access controlled, as it is needed to find authorization server
# @authentication_decorator
@handler(method="GET", path="/id")
def handle_get_node_id(self, handle, connection, match, data, hdr):
"""
GET /id
Get id of this calvin node
Response status code: OK
Response: node-id
"""
self.send_response(handle, connection, json.dumps({'id': self.node.id}))
@handler(method="GET", path="/capabilities")
def handle_get_node_capabilities(self, handle, connection, match, data, hdr):
"""
GET /capabilities
Get capabilities of this calvin node
Response status code: OK
Response: list of capabilities
"""
self.send_response(handle, connection, json.dumps(get_calvinsys().list_capabilities() + get_calvinlib().list_capabilities()))
@handler(method="POST", path="/peer_setup")
def handle_peer_setup(self, handle, connection, match, data, hdr):
"""
POST /peer_setup
Add calvin nodes to network
Body: {"peers: ["calvinip://<address>:<port>", ...] }
Response status code: OK or SERVICE_UNAVAILABLE
Response: {<peer control uri>: [<peer node id>, <per peer status>], ...}
"""
_log.analyze(self.node.id, "+", data)
self.node.peersetup(data['peers'], cb=CalvinCB(self.handle_peer_setup_cb, handle, connection))
@register
def handle_peer_setup_cb(self, handle, connection, status=None, peer_node_ids=None):
_log.analyze(self.node.id, "+", status.encode())
if peer_node_ids:
data = json.dumps({k: (v[0], v[1].status) for k, v in peer_node_ids.items()})
else:
data = None
self.send_response(handle, connection, data, status=status.status)
@handler(method="GET", path="/nodes")
@authentication_decorator
def handle_get_nodes(self, handle, connection, match, data, hdr):
"""
GET /nodes
List nodes in network (excluding self) known to self
Response status code: OK
Response: List of node-ids
"""
self.send_response(handle, connection, json.dumps(self.node.network.list_links()))
@handler(method="DELETE", path="/node", optional=["/now", "/migrate", "/clean"])
@authentication_decorator
def handle_quit(self, handle, connection, match, data, hdr):
"""
DELETE /node{/now|/migrate|/clean}
Stop (this) calvin node
now: stop the runtime without handling actors on the runtime
migrate: migrate any actors before stopping the runtime
clean: stop & destroy all actors before stopping [default]
Response status code: ACCEPTED
Response: none
"""
if match.group(1) == "now":
stop_method = self.node.stop
elif match.group(1) == "migrate":
stop_method = self.node.stop_with_migration
else: # Clean up
stop_method = self.node.stop_with_cleanup
async.DelayedCall(.2, stop_method)
self.send_response(handle, connection, None, status=calvinresponse.ACCEPTED)
@handler(method="OPTIONS", path=r"/{path}")
@authentication_decorator
def handle_options(self, handle, connection, match, data, hdr):
"""
OPTIONS /url
Request for information about the communication options available on url
Response status code: OK
Response: Available communication options
"""
response = "HTTP/1.1 200 OK\n"
# Copy the content of Access-Control-Request-Headers to the response
if 'access-control-request-headers' in hdr:
response += "Access-Control-Allow-Headers: " + \
hdr['access-control-request-headers'] + "\n"
response += "Content-Length: 0\n" \
"Access-Control-Allow-Origin: *\n" \
"Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\n" \
"Content-Type: *\n" \
"\n\r\n"
if connection is None:
msg = {"cmd": "httpresp", "msgid": handle, "header": response, "data": None}
self.tunnel_client.send(msg)
else:
connection.send(response)
| [
"[email protected]"
] | |
b7486ff523640c2dc9848f2de261a32473eedb3a | 9e549ee54faa8b037f90eac8ecb36f853e460e5e | /venv/lib/python3.6/site-packages/pylint/test/functional/bad_continuation.py | 34c7c864c86ed2230807c6ccdf8b1eb41b67afdf | [
"MIT"
] | permissive | aitoehigie/britecore_flask | e8df68e71dd0eac980a7de8c0f20b5a5a16979fe | eef1873dbe6b2cc21f770bc6dec783007ae4493b | refs/heads/master | 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 | MIT | 2022-12-08T04:54:09 | 2019-03-24T00:38:20 | Python | UTF-8 | Python | false | false | 3,862 | py | """Regression test case for bad-continuation."""
# pylint: disable=print-statement,implicit-str-concat-in-sequence,using-constant-test,missing-docstring,wrong-import-position
# Various alignment for brackets
from __future__ import print_function
LIST0 = [1, 2, 3]
LIST1 = [1, 2, 3]
LIST2 = [1, 2, 3] # [bad-continuation]
# Alignment inside literals
W0 = [
1,
2,
3,
4,
5,
6,
7, # [bad-continuation]
8,
9,
10,
11,
12,
13,
# and a comment
14,
15,
16,
]
W1 = {"a": 1, "b": 2, "c": 3} # [bad-continuation]
W2 = {"a": 1, "b": 2, "c": 3} # [bad-continuation]
W2 = [
"some",
"contents" # with a continued comment that may be aligned
# under the previous comment (optionally)
"and",
"more", # but this
# [bad-continuation] is not accepted
"contents", # [bad-continuation] nor this.
]
# Values in dictionaries should be indented 4 spaces further if they are on a
# different line than their key
W4 = {
"key1": "value1", # Grandfather in the old style
"key2": "value2", # [bad-continuation]
"key3": "value3", # Comma here
}
# And should follow the same rules as continuations within parens
W5 = {
"key1": "long value" "long continuation",
"key2": "breaking" "wrong", # [bad-continuation]
"key3": 2 * (2 + 2),
"key4": ("parenthesis", "continuation"), # No comma here
}
# Allow values to line up with their keys when the key is next to the brace
W6 = {"key1": "value1", "key2": "value2"}
# Or allow them to be indented
W7 = {"key1": "value1", "key2": "value2"}
# Bug that caused a warning on the previous two cases permitted these odd
# incorrect indentations
W8 = {"key1": "value1"} # [bad-continuation]
W9 = {"key1": "value1"} # [bad-continuation]
# Alignment of arguments in function definitions
def continue1(some_arg, some_other_arg):
"""A function with well-aligned arguments."""
print(some_arg, some_other_arg)
def continue2(some_arg, some_other_arg):
"""A function with well-aligned arguments."""
print(some_arg, some_other_arg)
def continue3(some_arg, some_other_arg): # [bad-continuation] # [bad-continuation]
"""A function with misaligned arguments"""
print(some_arg, some_other_arg)
def continue4(arg1, arg2): # pylint:disable=missing-docstring
print(arg1, arg2)
def callee(*args):
"""noop"""
print(args)
callee("a", "b")
callee("a", "b") # [bad-continuation]
callee(5, {"a": "b", "c": "d"})
if 1:
pass
if 1:
pass
if 1:
pass # [bad-continuation]
if 1 and 2: # [bad-continuation]
pass
while 1 and 2:
pass
while 1 and 2 and 3: # [bad-continuation]
pass
if 2:
pass # [bad-continuation]
if 1 or 2 or 3:
pass
if 1 or 2 or 3: # [bad-continuation]
print(1, 2)
if 1 and 2:
pass # [bad-continuation]
if 2:
pass
if 2: # [bad-continuation]
pass
L1 = lambda a, b: a + b
if not (1 and 2):
print(3)
if not (1 and 2): # [bad-continuation]
print(3)
continue2("foo", some_other_arg="this " "is " "fine")
from contextlib import contextmanager
@contextmanager
def mycontext(*args):
yield args
with mycontext("this is", "great stuff", "mane"):
pass
# pylint: disable=using-constant-test
# More indentation included to distinguish this from the rest.
def long_function_name(var_one, var_two, var_three, var_four):
print(var_one, var_two, var_three, var_four)
def short_func_name(first, second, third):
# Add some extra indentation on the conditional continuation line.
if first and second == first == "some_big_long_statement_that_should_not_trigger":
third()
# Some normal multi-line statements with double-indented continuation lines.
LARGE_COLLECTION = ["spam", "eggs", "beans"]
long_function_name("1", "2", "3", "4")
CONCATENATED_TEXT = "spam" "eggs" "beans"
| [
"[email protected]"
] | |
73c48dc5a6a99067d2dd379d454a40ba74e6126f | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/src/programy/dialog/tokenizer/tokenizer_jp.py | 637b63ad0dde65e5653e318e881994284e8bbef4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,957 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import MeCab
import mojimoji
import re
from programy.dialog.tokenizer.tokenizer import Tokenizer
from programy.utils.language.japanese import JapaneseLanguage
tagger = MeCab.Tagger('-Owakati')
class TokenizerJP(Tokenizer):
TOKENIZER_CHILD_IN = '\uF010'
TOKENIZER_CHILD_OUT = '\uF011'
def __init__(self, split_chars=' ', punctuation_chars=None, before_concatenation_rule=None, after_concatenation_rule=None):
Tokenizer.__init__(self, split_chars=split_chars, punctuation_chars=punctuation_chars,
before_concatenation_rule=before_concatenation_rule,
after_concatenation_rule=after_concatenation_rule)
def _texts_to_words_en(self, texts):
if not texts:
return []
if self._is_template is True:
words = [word.strip() for word in texts.split(self.split_chars) if word]
return [" ".join(words)]
if self.punctuation_chars is None or self._is_punctuation is False:
new_texts = texts
else:
new_texts = ""
for ch in texts:
is_append = True
for punch_ch in self.punctuation_chars:
if ch == punch_ch:
is_append = False
break
if is_append is True:
new_texts += ch
return [word.strip() for word in new_texts.split(self.split_chars) if word]
def _texts_to_words_jp(self, texts):
if not texts:
return []
words_text = tagger.parse(texts.strip()).strip()
words = [word.strip() for word in words_text.split(self.split_chars) if word]
if self.punctuation_chars is None or self._is_punctuation is False:
return words
else:
new_words = []
for word in words:
is_append = True
for ch in self.punctuation_chars:
if word == ch:
is_append = False
break
if is_append is True:
new_words.append(word)
return new_words
def _template_texts_to_words_jp(self, texts):
if not texts:
return []
return [word.strip() for word in texts.split('\n')]
def _words_to_texts(self, words):
if not words or len(words) == 0:
return ''
texts = ''
last_word = ''
is_child_tag = None
is_tag_text = False
for word in words:
if is_tag_text is True:
if is_child_tag == self.TOKENIZER_CHILD_IN:
if last_word != '' and last_word[-1] != '"':
if self._check_concatenation_rule(last_word, word) is True:
texts += ' '
texts += self.TOKENIZER_CHILD_IN
is_child_tag = None
texts += word
if word == self.TOKENIZER_CHILD_OUT:
is_tag_text = False
is_child_tag = self.TOKENIZER_CHILD_OUT
else:
last_word = word
continue
if word == self.TOKENIZER_CHILD_IN:
is_tag_text = True
is_child_tag = self.TOKENIZER_CHILD_IN
continue
if word == ' ' or word == ' ' or word == '':
texts += ' '
last_word = ' '
else:
if is_child_tag == self.TOKENIZER_CHILD_OUT and word[0] == '"':
texts += word
else:
if self._check_concatenation_rule(last_word, word) is True:
texts += ' '
texts += word
last_word = word
is_child_tag = None
return texts
def _check_concatenation_rule(self, last_word, now_word):
if self.before_concatenation_rule is None or last_word is None:
return False
before_match = self.before_concatenation_rule.fullmatch(last_word)
if before_match is None:
return False
if self.after_concatenation_rule is None:
return False
after_match = self.after_concatenation_rule.fullmatch(now_word)
if after_match is None:
return False
return True
def texts_to_words(self, texts):
if not texts:
return []
if self._is_convert is True:
han_texts = mojimoji.zen_to_han(texts, kana=False)
zen_texts = mojimoji.han_to_zen(han_texts, digit=False, ascii=False)
else:
han_texts = texts
zen_texts = texts
if JapaneseLanguage.is_CJKword(zen_texts) is True:
if self._is_template is False:
words = []
target_text = ''
words_CJK = JapaneseLanguage.is_CJKchar(zen_texts[0])
for ch in zen_texts:
char_CJK = JapaneseLanguage.is_CJKchar(ch)
if words_CJK != char_CJK:
if words_CJK is True:
tmp_words = self._texts_to_words_jp(target_text)
else:
tmp_words = self._texts_to_words_en(target_text)
for word in tmp_words:
words.append(word)
words_CJK = char_CJK
target_text = ''
target_text += ch
if len(target_text) > 0:
if words_CJK is True:
tmp_words = self._texts_to_words_jp(target_text)
else:
tmp_words = self._texts_to_words_en(target_text)
for word in tmp_words:
words.append(word)
else:
words = self._template_texts_to_words_jp(texts)
else:
if self._is_template is False:
words = self._texts_to_words_en(han_texts)
else:
words = self._texts_to_words_en(texts)
return words
def words_to_texts(self, words):
if not words:
return ''
if self.TOKENIZER_CHILD_IN in words:
# タグ展開文字列の単単語化
new_words = []
child_words = []
is_child = False
for word in words:
if word == self.TOKENIZER_CHILD_IN: # タグ展開文字列の開始コード
new_words.append(word)
is_child = True
elif word == self.TOKENIZER_CHILD_OUT: # タグ展開文字列の終了コード
texts = ''
if len(child_words) > 0:
texts = self.words_to_texts(child_words)
new_words.append(texts)
new_words.append(word)
child_words = []
is_child = False
else:
if is_child is False:
new_words.append(word)
else:
if word == ' ' or word == ' ' or word == '':
new_words.append(word)
else:
word = word.replace('\\"', '"')
child_words.append(word)
words = new_words
# 日本語 文字列結合
texts = self._words_to_texts(words)
return re.sub(' +', ' ', texts.strip())
def words_from_current_pos(self, words, current_pos):
if words:
return self.words_to_texts(words[current_pos:])
raise Exception("Num word array violation !")
def compare(self, value1, value2):
return value1 == value2
| [
"[email protected]"
] | |
d858d233de5baa5b713615d6ef487dd053ac6099 | 6630694f401f6f475dd81bb01ff9368db844ccff | /mmpretrain/models/backbones/vit_sam.py | 0eb46a72adf26cb62b93d5538116bd74f36070fa | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 27,046 | py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.transformer import FFN, PatchEmbed
from mmengine.model import BaseModule, ModuleList
from mmengine.model.weight_init import trunc_normal_
from mmpretrain.registry import MODELS
from ..utils import LayerNorm2d, build_norm_layer, resize_pos_embed, to_2tuple
from .base_backbone import BaseBackbone
def window_partition(x: torch.Tensor,
window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""Partition into non-overlapping windows with padding if needed.
Borrowed from https://github.com/facebookresearch/segment-anything/
Args:
x (torch.Tensor): Input tokens with [B, H, W, C].
window_size (int): Window size.
Returns:
Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
- ``windows``: Windows after partition with
[B * num_windows, window_size, window_size, C].
- ``(Hp, Wp)``: Padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4,
5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(windows: torch.Tensor, window_size: int,
pad_hw: Tuple[int, int],
hw: Tuple[int, int]) -> torch.Tensor:
"""Window unpartition into original sequences and removing padding.
Borrowed from https://github.com/facebookresearch/segment-anything/
Args:
x (torch.Tensor): Input tokens with
[B * num_windows, window_size, window_size, C].
window_size (int): Window size.
pad_hw (tuple): Padded height and width (Hp, Wp).
hw (tuple): Original height and width (H, W) before padding.
Returns:
torch.Tensor: Unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int,
rel_pos: torch.Tensor) -> torch.Tensor:
"""Get relative positional embeddings according to the relative positions
of query and key sizes.
Borrowed from https://github.com/facebookresearch/segment-anything/
Args:
q_size (int): Size of query q.
k_size (int): Size of key k.
rel_pos (torch.Tensor): Relative position embeddings (L, C).
Returns:
torch.Tensor: Extracted positional embeddings according to relative
positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode='linear',
)
rel_pos_resized = rel_pos_resized.reshape(-1,
max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords -
k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""Borrowed from https://github.com/facebookresearch/segment-anything/
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
attn (torch.Tensor): Attention map.
q (torch.Tensor): Query q in the attention layer with shape
(B, q_h * q_w, C).
rel_pos_h (torch.Tensor): Relative position embeddings (Lh, C) for
height axis.
rel_pos_w (torch.Tensor): Relative position embeddings (Lw, C) for
width axis.
q_size (tuple): Spatial sequence size of query q with (q_h, q_w).
k_size (tuple): Spatial sequence size of key k with (k_h, k_w).
Returns:
torch.Tensor: Attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh)
rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw)
attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] +
rel_w[:, :, :, None, :]).view(B, q_h * q_w, k_h * k_w)
return attn
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings.
Borrowed from https://github.com/facebookresearch/segment-anything/
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
qkv_bias (bool): If True, add a learnable bias to q, k, v.
Defaults to True.
use_rel_pos (bool):Whether to use relative position embedding.
Defaults to False.
input_size (int, optional): Input resolution for calculating the
relative positional parameter size. Defaults to None.
"""
def __init__(
self,
embed_dims: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
super().__init__()
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = head_embed_dims**-0.5
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.proj = nn.Linear(embed_dims, embed_dims)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (input_size is not None), \
'Input size must be provided if using relative position embed.'
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(
torch.zeros(2 * input_size[0] - 1, head_embed_dims))
self.rel_pos_w = nn.Parameter(
torch.zeros(2 * input_size[1] - 1, head_embed_dims))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads,
-1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h,
self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W,
-1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
class TransformerEncoderLayer(BaseModule):
"""Encoder layer with window attention in Vision Transformer.
Args:
embed_dims (int): The feature dimension
num_heads (int): Parallel attention heads
feedforward_channels (int): The hidden dimension for FFNs
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Defaults to 0.
drop_path_rate (float): Stochastic depth rate. Defaults to 0.
num_fcs (int): The number of fully-connected layers for FFNs.
Defaults to 2.
qkv_bias (bool): enable bias for qkv if True. Defaults to True.
act_cfg (dict): The activation config for FFNs.
Defaults to ``dict(type='GELU')``.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='LN')``.
use_rel_pos (bool):Whether to use relative position embedding.
Defaults to False.
window_size (int): Window size for window attention. Defaults to 0.
input_size (int, optional): Input resolution for calculating the
relative positional parameter size. Defaults to None.
init_cfg (dict, optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
embed_dims: int,
num_heads: int,
feedforward_channels: int,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
num_fcs: int = 2,
qkv_bias: bool = True,
act_cfg: dict = dict(type='GELU'),
norm_cfg: dict = dict(type='LN'),
use_rel_pos: bool = False,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
self.window_size = window_size
self.ln1 = build_norm_layer(norm_cfg, self.embed_dims)
self.attn = Attention(
embed_dims=embed_dims,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
input_size=input_size if window_size == 0 else
(window_size, window_size),
)
self.ln2 = build_norm_layer(norm_cfg, self.embed_dims)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return self.ln1
@property
def norm2(self):
return self.ln2
def forward(self, x):
shortcut = x
x = self.ln1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = self.ffn(self.ln2(x), identity=x)
return x
@MODELS.register_module()
class ViTSAM(BaseBackbone):
"""Vision Transformer as image encoder used in SAM.
A PyTorch implement of backbone: `Segment Anything
<https://arxiv.org/abs/2304.02643>`_
Args:
arch (str | dict): Vision Transformer architecture. If use string,
choose from 'base', 'large', 'huge'. If use dict, it should have
below keys:
- **embed_dims** (int): The dimensions of embedding.
- **num_layers** (int): The number of transformer encoder layers.
- **num_heads** (int): The number of heads in attention modules.
- **feedforward_channels** (int): The hidden dimensions in
feedforward modules.
- **global_attn_indexes** (int): The index of layers with global
attention.
Defaults to 'base'.
img_size (int | tuple): The expected input image shape. Because we
support dynamic input shape, just set the argument to the most
common input image shape. Defaults to 224.
patch_size (int | tuple): The patch size in patch embedding.
Defaults to 16.
in_channels (int): The num of input channels. Defaults to 3.
out_channels (int): The num of output channels, if equal to 0, the
channel reduction layer is disabled. Defaults to 256.
out_indices (Sequence | int): Output from which stages.
Defaults to -1, means the last stage.
out_type (str): The type of output features. Please choose from
- ``"raw"`` or ``"featmap"``: The feature map tensor from the
patch tokens with shape (B, C, H, W).
- ``"avg_featmap"``: The global averaged feature map tensor
with shape (B, C).
Defaults to ``"raw"``.
drop_rate (float): Probability of an element to be zeroed.
Defaults to 0.
drop_path_rate (float): stochastic depth rate. Defaults to 0.
qkv_bias (bool): Whether to add bias for qkv in attention modules.
Defaults to True.
use_abs_pos (bool): Whether to use absolute position embedding.
Defaults to True.
use_rel_pos (bool):Whether to use relative position embedding.
Defaults to True.
window_size (int): Window size for window attention. Defaults to 14.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='LN')``.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Defaults to -1.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Defaults to "bicubic".
patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict.
layer_cfgs (Sequence | dict): Configs of each transformer layer in
encoder. Defaults to an empty dict.
init_cfg (dict, optional): Initialization config dict.
Defaults to None.
"""
arch_zoo = {
**dict.fromkeys(
['b', 'base'], {
'embed_dims': 768,
'num_layers': 12,
'num_heads': 12,
'feedforward_channels': 3072,
'global_attn_indexes': [2, 5, 8, 11]
}),
**dict.fromkeys(
['l', 'large'], {
'embed_dims': 1024,
'num_layers': 24,
'num_heads': 16,
'feedforward_channels': 4096,
'global_attn_indexes': [5, 11, 17, 23]
}),
**dict.fromkeys(
['h', 'huge'], {
'embed_dims': 1280,
'num_layers': 32,
'num_heads': 16,
'feedforward_channels': 5120,
'global_attn_indexes': [7, 15, 23, 31]
}),
}
OUT_TYPES = {'raw', 'featmap', 'avg_featmap'}
def __init__(self,
arch: str = 'base',
img_size: int = 224,
patch_size: int = 16,
in_channels: int = 3,
out_channels: int = 256,
out_indices: int = -1,
out_type: str = 'raw',
drop_rate: float = 0.,
drop_path_rate: float = 0.,
qkv_bias: bool = True,
use_abs_pos: bool = True,
use_rel_pos: bool = True,
window_size: int = 14,
norm_cfg: dict = dict(type='LN', eps=1e-6),
frozen_stages: int = -1,
interpolate_mode: str = 'bicubic',
patch_cfg: dict = dict(),
layer_cfgs: dict = dict(),
init_cfg: Optional[dict] = None):
super().__init__(init_cfg)
if isinstance(arch, str):
arch = arch.lower()
assert arch in set(self.arch_zoo), \
f'Arch {arch} is not in default archs {set(self.arch_zoo)}'
self.arch_settings = self.arch_zoo[arch]
else:
essential_keys = {
'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels'
}
assert isinstance(arch, dict) and essential_keys <= set(arch), \
f'Custom arch needs a dict with keys {essential_keys}'
self.arch_settings = arch
self.embed_dims = self.arch_settings['embed_dims']
self.num_layers = self.arch_settings['num_layers']
self.global_attn_indexes = self.arch_settings['global_attn_indexes']
self.img_size = to_2tuple(img_size)
# Set patch embedding
_patch_cfg = dict(
in_channels=in_channels,
input_size=img_size,
embed_dims=self.embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=patch_size,
)
_patch_cfg.update(patch_cfg)
self.patch_embed = PatchEmbed(**_patch_cfg)
self.patch_resolution = self.patch_embed.init_out_size
# Set out type
if out_type not in self.OUT_TYPES:
raise ValueError(f'Unsupported `out_type` {out_type}, please '
f'choose from {self.OUT_TYPES}')
self.out_type = out_type
self.use_abs_pos = use_abs_pos
self.interpolate_mode = interpolate_mode
if use_abs_pos:
# Set position embedding
self.pos_embed = nn.Parameter(
torch.zeros(1, *self.patch_resolution, self.embed_dims))
self.drop_after_pos = nn.Dropout(p=drop_rate)
self._register_load_state_dict_pre_hook(self._prepare_pos_embed)
if use_rel_pos:
self._register_load_state_dict_pre_hook(
self._prepare_relative_position)
if isinstance(out_indices, int):
out_indices = [out_indices]
assert isinstance(out_indices, Sequence), \
f'"out_indices" must by a sequence or int, ' \
f'get {type(out_indices)} instead.'
for i, index in enumerate(out_indices):
if index < 0:
out_indices[i] = self.num_layers + index
assert 0 <= out_indices[i] <= self.num_layers, \
f'Invalid out_indices {index}'
self.out_indices = out_indices
# stochastic depth decay rule
dpr = np.linspace(0, drop_path_rate, self.num_layers)
self.layers = ModuleList()
if isinstance(layer_cfgs, dict):
layer_cfgs = [layer_cfgs] * self.num_layers
for i in range(self.num_layers):
_layer_cfg = dict(
embed_dims=self.embed_dims,
num_heads=self.arch_settings['num_heads'],
feedforward_channels=self.
arch_settings['feedforward_channels'],
drop_rate=drop_rate,
drop_path_rate=dpr[i],
qkv_bias=qkv_bias,
window_size=window_size
if i not in self.global_attn_indexes else 0,
input_size=self.patch_resolution,
use_rel_pos=use_rel_pos,
norm_cfg=norm_cfg)
_layer_cfg.update(layer_cfgs[i])
self.layers.append(TransformerEncoderLayer(**_layer_cfg))
self.out_channels = out_channels
if self.out_channels > 0:
self.channel_reduction = nn.Sequential(
nn.Conv2d(
self.embed_dims,
out_channels,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_channels, eps=1e-6),
nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_channels, eps=1e-6),
)
# freeze stages only when self.frozen_stages > 0
self.frozen_stages = frozen_stages
if self.frozen_stages > 0:
self._freeze_stages()
def init_weights(self):
super().init_weights()
if not (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
def _freeze_stages(self):
# freeze position embedding
if self.pos_embed is not None:
self.pos_embed.requires_grad = False
# set dropout to eval model
self.drop_after_pos.eval()
# freeze patch embedding
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
# freeze layers
for i in range(1, self.frozen_stages + 1):
m = self.layers[i - 1]
m.eval()
for param in m.parameters():
param.requires_grad = False
# freeze channel_reduction module
if self.frozen_stages == self.num_layers and self.out_channels > 0:
m = self.channel_reduction
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]:
B = x.shape[0]
x, patch_resolution = self.patch_embed(x)
x = x.view(B, patch_resolution[0], patch_resolution[1],
self.embed_dims)
if self.use_abs_pos:
# 'resize_pos_embed' only supports 'pos_embed' with ndim==3, but
# in ViTSAM, the 'pos_embed' has 4 dimensions (1, H, W, C), so it
# is flattened. Besides, ViTSAM doesn't have any extra token.
resized_pos_embed = resize_pos_embed(
self.pos_embed.flatten(1, 2),
self.patch_resolution,
patch_resolution,
mode=self.interpolate_mode,
num_extra_tokens=0)
x = x + resized_pos_embed.view(1, *patch_resolution,
self.embed_dims)
x = self.drop_after_pos(x)
outs = []
for i, layer in enumerate(self.layers):
x = layer(x)
if i in self.out_indices:
# (B, H, W, C) -> (B, C, H, W)
x_reshape = x.permute(0, 3, 1, 2)
if self.out_channels > 0:
x_reshape = self.channel_reduction(x_reshape)
outs.append(self._format_output(x_reshape))
return tuple(outs)
def _format_output(self, x) -> torch.Tensor:
if self.out_type == 'raw' or self.out_type == 'featmap':
return x
elif self.out_type == 'avg_featmap':
# (B, C, H, W) -> (B, C, N) -> (B, N, C)
x = x.flatten(2).permute(0, 2, 1)
return x.mean(dim=1)
def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs):
name = prefix + 'pos_embed'
if name not in state_dict.keys():
return
ckpt_pos_embed_shape = state_dict[name].shape
if self.pos_embed.shape != ckpt_pos_embed_shape:
from mmengine.logging import MMLogger
logger = MMLogger.get_current_instance()
logger.info(
f'Resize the pos_embed shape from {ckpt_pos_embed_shape} '
f'to {self.pos_embed.shape}.')
ckpt_pos_embed_shape = ckpt_pos_embed_shape[1:3]
pos_embed_shape = self.patch_embed.init_out_size
flattened_pos_embed = state_dict[name].flatten(1, 2)
resized_pos_embed = resize_pos_embed(flattened_pos_embed,
ckpt_pos_embed_shape,
pos_embed_shape,
self.interpolate_mode, 0)
state_dict[name] = resized_pos_embed.view(1, *pos_embed_shape,
self.embed_dims)
def _prepare_relative_position(self, state_dict, prefix, *args, **kwargs):
state_dict_model = self.state_dict()
all_keys = list(state_dict_model.keys())
for key in all_keys:
if 'rel_pos_' in key:
ckpt_key = prefix + key
if ckpt_key not in state_dict:
continue
relative_position_pretrained = state_dict[ckpt_key]
relative_position_current = state_dict_model[key]
L1, _ = relative_position_pretrained.size()
L2, _ = relative_position_current.size()
if L1 != L2:
new_rel_pos = F.interpolate(
relative_position_pretrained.reshape(1, L1,
-1).permute(
0, 2, 1),
size=L2,
mode='linear',
)
new_rel_pos = new_rel_pos.reshape(-1, L2).permute(1, 0)
from mmengine.logging import MMLogger
logger = MMLogger.get_current_instance()
logger.info(f'Resize the {ckpt_key} from '
f'{state_dict[ckpt_key].shape} to '
f'{new_rel_pos.shape}')
state_dict[ckpt_key] = new_rel_pos
def get_layer_depth(self, param_name: str, prefix: str = ''):
"""Get the layer-wise depth of a parameter.
Args:
param_name (str): The name of the parameter.
prefix (str): The prefix for the parameter.
Defaults to an empty string.
Returns:
Tuple[int, int]: The layer-wise depth and the num of layers.
Note:
The first depth is the stem module (``layer_depth=0``), and the
last depth is the subsequent module (``layer_depth=num_layers-1``)
"""
num_layers = self.num_layers + 2
if not param_name.startswith(prefix):
# For subsequent module like head
return num_layers - 1, num_layers
param_name = param_name[len(prefix):]
if param_name in ('cls_token', 'pos_embed'):
layer_depth = 0
elif param_name.startswith('patch_embed'):
layer_depth = 0
elif param_name.startswith('layers'):
layer_id = int(param_name.split('.')[1])
layer_depth = layer_id + 1
else:
layer_depth = num_layers - 1
return layer_depth, num_layers
| [
"[email protected]"
] | |
e7ad02c46c836321fc801c6337ecc6989ac40f3a | 6a609bc67d6a271c1bd26885ce90b3332995143c | /exercises/array/kth_largest_element_in_a_stream.py | 9e85a56efb00d9c855375c9d8927a296ae770bdc | [] | no_license | nahgnaw/data-structure | 1c38b3f7e4953462c5c46310b53912a6e3bced9b | 18ed31a3edf20a3e5a0b7a0b56acca5b98939693 | refs/heads/master | 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # -*- coding: utf-8 -*-
"""
Design a data structure that supports the following two operations:
void addNum(int num) - Add a integer number from the data stream to the data structure.
int findKthLargest() - Return the Kth largest number.
"""
class StreamData(object):
def __init__(self, k):
self.heap = []
self.k = k
def addNum(self, num):
import heapq
if len(self.heap) < self.k:
heapq.heappush(self.heap, num)
else:
if num > self.heap[0]:
heapq.heappushpop(self.heap, num)
def findKthLargest(self):
if len(self.heap) < self.k:
return None
return self.heap[0]
if __name__ == '__main__':
sd = StreamData(3)
sd.addNum(3)
sd.addNum(6)
sd.addNum(2)
sd.addNum(1)
sd.addNum(10)
sd.addNum(4)
sd.addNum(1)
print sd.findKthLargest() | [
"[email protected]"
] | |
1b8071621e5a807dfbf32f489db90293f1e03389 | 96148bf17555c028f5650d51f496f349c89e8c79 | /build/cob_common/cob_srvs/catkin_generated/pkg.develspace.context.pc.py | e9d6f4abd9b3c97cf7a0411b20a4e7c5eea11669 | [] | no_license | kerekare/ros_hydra_libphidgetsupdated | 239daed94a95f60743c5659f1102183641761240 | e05e58417fb03a14d627bc80d09af3b2a0fcceab | refs/heads/master | 2016-09-05T23:35:43.792883 | 2014-03-25T16:32:01 | 2014-03-25T16:32:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kerekare/workspace/care-o-bot/devel/include".split(';') if "/home/kerekare/workspace/care-o-bot/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;trajectory_msgs;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cob_srvs"
PROJECT_SPACE_DIR = "/home/kerekare/workspace/care-o-bot/devel"
PROJECT_VERSION = "0.5.0"
| [
"kerekare@i60sr2.(none)"
] | kerekare@i60sr2.(none) |
76fe0a3f5aa81d72157f8f98e1075cbfe5e407f8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_243/ch170_2020_06_15_20_16_56_108338.py | 806f3ed123f5f56aca42de8307af5b88fd84f731 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | def apaga_repetidos(letras):
dic = {}
novas = ""
for letra in letras:
if letra in dic:
novas += "*"
else:
novas += letra
dic[letra]=letra
return novas | [
"[email protected]"
] | |
a1179bc36093f156e41a95c37c819dddcf4e4f9e | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/matplotlib/tests/test_collections.py | 773093100349f07fd31ca250883807222e9f8bfe | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 24,140 | py | """
Tests specific to the collections module.
"""
import io
import platform
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import Collection, LineCollection, EventCollection
from matplotlib.testing.decorators import image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert props['orientation'] == coll.get_orientation()
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert coll.is_horizontal()
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert props['linelength'] == coll.get_linelength()
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert props['lineoffset'] == coll.get_lineoffset()
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert coll.get_linestyle() == [(None, None)]
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert new_orientation == coll.get_orientation()
assert not coll.is_horizontal()
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert props['orientation'] == coll.get_orientation()
assert coll.is_horizontal()
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert new_orientation == coll.get_orientation()
assert not coll.is_horizontal()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert new_linelength == coll.get_linelength()
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert new_lineoffset == coll.get_lineoffset()
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert coll.get_linestyle() == [(0, (6.0, 6.0))]
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert coll.get_linestyle() == [(0, (6.0, 6.0))]
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert coll.get_linewidth() == new_linewidth
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert segment[0, pos1] == lineoffset + linelength / 2
assert segment[1, pos1] == lineoffset - linelength / 2
assert segment[0, pos2] == positions[i]
assert segment[1, pos2] == positions[i]
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert ax.dataLim.bounds == bounds
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert q.get_datalim(ax.transData).bounds == (0., 0., 7., 9.)
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert ax.dataLim.bounds == (20.0, 30.0, 15.0, 6.0)
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X / x[-1]
hh = Y / y[-1]
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=[xy], transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super().__init__(4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(baseline_images=['size_in_xy'], remove_text=True,
extensions=['png'])
def test_size_in_xy():
fig, ax = plt.subplots()
widths, heights, angles = (10, 10), 10, 0
widths = 10, 10
coords = [(10, 10), (15, 15)]
e = mcollections.EllipseCollection(
widths, heights, angles,
units='xy',
offsets=coords,
transOffset=ax.transData)
ax.add_collection(e)
ax.set_xlim(0, 30)
ax.set_ylim(0, 30)
def test_pandas_indexing(pd):
# Should not fail break when faced with a
# non-zero indexed series
index = [11, 12, 13]
ec = fc = pd.Series(['red', 'blue', 'green'], index=index)
lw = pd.Series([1, 2, 3], index=index)
ls = pd.Series(['solid', 'dashed', 'dashdot'], index=index)
aa = pd.Series([True, False, True], index=index)
Collection(edgecolors=ec)
Collection(facecolors=fc)
Collection(linewidths=lw)
Collection(linestyles=ls)
Collection(antialiaseds=aa)
@pytest.mark.style('default')
def test_lslw_bcast():
col = mcollections.PathCollection([])
col.set_linestyles(['-', '-'])
col.set_linewidths([1, 2, 3])
assert col.get_linestyles() == [(None, None)] * 6
assert col.get_linewidths() == [1, 2, 3] * 2
col.set_linestyles(['-', '-', '-'])
assert col.get_linestyles() == [(None, None)] * 3
assert (col.get_linewidths() == [1, 2, 3]).all()
@pytest.mark.style('default')
def test_capstyle():
col = mcollections.PathCollection([], capstyle='round')
assert col.get_capstyle() == 'round'
col.set_capstyle('butt')
assert col.get_capstyle() == 'butt'
@pytest.mark.style('default')
def test_joinstyle():
col = mcollections.PathCollection([], joinstyle='round')
assert col.get_joinstyle() == 'round'
col.set_joinstyle('miter')
assert col.get_joinstyle() == 'miter'
@image_comparison(baseline_images=['cap_and_joinstyle'],
extensions=['png'])
def test_cap_and_joinstyle_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([-0.5, 2.5])
x = np.array([0.0, 1.0, 0.5])
ys = np.array([[0.0], [0.5], [1.0]]) + np.array([[0.0, 0.0, 1.0]])
segs = np.zeros((3, 3, 2))
segs[:, :, 0] = x
segs[:, :, 1] = ys
line_segments = LineCollection(segs, linewidth=[10, 15, 20])
line_segments.set_capstyle("round")
line_segments.set_joinstyle("miter")
ax.add_collection(line_segments)
ax.set_title('Line collection with customized caps and joinstyle')
@image_comparison(baseline_images=['scatter_post_alpha'],
extensions=['png'], remove_text=True,
style='default')
def test_scatter_post_alpha():
fig, ax = plt.subplots()
sc = ax.scatter(range(5), range(5), c=range(5))
# this needs to be here to update internal state
fig.canvas.draw()
sc.set_alpha(.1)
def test_pathcollection_legend_elements():
np.random.seed(19680801)
x, y = np.random.rand(2, 10)
y = np.random.rand(10)
c = np.random.randint(0, 5, size=10)
s = np.random.randint(10, 300, size=10)
fig, ax = plt.subplots()
sc = ax.scatter(x, y, c=c, s=s, cmap="jet", marker="o", linewidths=0)
h, l = sc.legend_elements(fmt="{x:g}")
assert len(h) == 5
assert_array_equal(np.array(l).astype(float), np.arange(5))
colors = np.array([line.get_color() for line in h])
colors2 = sc.cmap(np.arange(5)/4)
assert_array_equal(colors, colors2)
l1 = ax.legend(h, l, loc=1)
h2, lab2 = sc.legend_elements(num=9)
assert len(h2) == 9
l2 = ax.legend(h2, lab2, loc=2)
h, l = sc.legend_elements(prop="sizes", alpha=0.5, color="red")
alpha = np.array([line.get_alpha() for line in h])
assert_array_equal(alpha, 0.5)
color = np.array([line.get_markerfacecolor() for line in h])
assert_array_equal(color, "red")
l3 = ax.legend(h, l, loc=4)
h, l = sc.legend_elements(prop="sizes", num=4, fmt="{x:.2f}",
func=lambda x: 2*x)
actsizes = [line.get_markersize() for line in h]
labeledsizes = np.sqrt(np.array(l).astype(float)/2)
assert_array_almost_equal(actsizes, labeledsizes)
l4 = ax.legend(h, l, loc=3)
import matplotlib.ticker as mticker
loc = mticker.MaxNLocator(nbins=9, min_n_ticks=9-1,
steps=[1, 2, 2.5, 3, 5, 6, 8, 10])
h5, lab5 = sc.legend_elements(num=loc)
assert len(h2) == len(h5)
levels = [-1, 0, 55.4, 260]
h6, lab6 = sc.legend_elements(num=levels, prop="sizes", fmt="{x:g}")
assert_array_equal(np.array(lab6).astype(float), levels[2:])
for l in [l1, l2, l3, l4]:
ax.add_artist(l)
fig.canvas.draw()
def test_EventCollection_nosort():
# Check that EventCollection doesn't modify input in place
arr = np.array([3, 2, 1, 10])
coll = EventCollection(arr)
np.testing.assert_array_equal(arr, np.array([3, 2, 1, 10]))
| [
"[email protected]"
] | |
74f4c2bea2dd68798f8ca1c4e735b0a9dfdc3974 | f8c52776a3e5d3161ee7da4d3efd9c5a12e19102 | /physmath/physnum.py | 6643fc50bb341b05c18d72ed44f9697ffdc7c434 | [
"Apache-2.0"
] | permissive | matthagy/physmath | e26f64b78a919c96a0ceb71e6ab8bd5abd361608 | c3ff3c803cc2202373c6fe22754c8eba664870f2 | refs/heads/master | 2021-01-10T18:30:34.147614 | 2013-04-04T08:02:59 | 2013-04-04T08:02:59 | 2,356,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,221 | py | '''Physical numbers combine a quantity, a unit, and a possible name
'''
from __future__ import division
from __future__ import absolute_import
import re
from decimal import Decimal
from functools import partial
from hlab.bases import AutoRepr
from hlab.memorize import memorize
from jamenson.runtime import atypes
from jamenson.runtime.atypes import anytype, as_optimized_type, typep, Seq, as_type
from jamenson.runtime.multimethod import defmethod, MultiMethod
from jamenson.runtime.as_string import as_string
from . import algebra as A
from .sigfig import SigFig
from .ratio import Ratio, as_ratio
from .dne import DNEType, dne
from .units import as_unit, BaseUnit, ex_parse_unit, dimensionless
name_type = as_optimized_type((str,unicode,type(None)))
lossless_number_type = as_optimized_type((int,long,Ratio,Decimal,SigFig,DNEType))
class PhysNum(A.DivAlgebraBase, AutoRepr):
def __init__(self, quantity, unit=None, name=None):
assert typep(quantity, lossless_number_type), \
'bad quantity %r(%s)' % (quantity, type(quantity).__name__)
self.quantity = quantity
self.unit = as_unit(unit)
assert typep(name, name_type)
self.name = name
def repr_args(self):
yield self.quantity
if self.unit != dimensionless:
yield self.unit
if self.name is not None:
if self.unit == dimensionless:
yield None
yield self.name
def __str__(self):
parts = [self.quantity]
if self.unit != dimensionless:
parts.append(self.unit)
if self.name is not None:
parts.append(self.name)
return ' '.join(map(str, parts))
def __hash__(self):
if self.unit==dimensionless and not self.name:
return hash(self.quantity)
return hash(self.quantity) ^ hash(self.unit) ^ hash(self.name) ^ 893409342
def split_posneg(self):
unit = self.unit
if isinstance(unit, PrimitiveUnit):
unit = primunit_to_compound(unit)
u_pos,u_neg = unit.split_posneg()
if isinstance(self.quantity, Ratio):
q_pos,q_neg = self.quantity.num, self.quantity.den
else:
q_pos,q_neg = self.quantity, 1
return [self.__class__(q_pos, u_pos, self.name),
self.__class__(q_neg, u_neg)]
as_physnum = MultiMethod('as_physnum')
@defmethod(as_physnum, [PhysNum])
def meth(pn):
return pn
@defmethod(as_physnum, [lossless_number_type])
def meth(n):
return PhysNum(n)
@defmethod(as_physnum, [str])
def meth(s):
return parse_physical_number(s)
@defmethod(as_physnum, [unicode])
def meth(u):
return as_physnum(str(u))
@memorize
def parse_physical_number(bytes, quantity_class=None, create_unit=False):
parts = bytes.strip().split(None, 1)
number, rest = (parts if len(parts)==2 else (parts[0], ''))
unit,extra = ex_parse_unit(rest, create=create_unit) if rest else (dimensionless, None)
name = extra and extra.strip()
if number.endswith('d'):
number = number[:-1]
quantity_class = quantity_class or Decimal
if number.endswith('s'):
number = number[:-1]
quantity_class = quantity_class or SigFig
if quantity_class is None:
quantity_class = int if not re.search('[.eE]', number) else Decimal
return PhysNum(quantity_class(number), unit, name)
@defmethod(A.mm_eq, [PhysNum, PhysNum])
def meth(a, b):
return (self.quantity == other.quantity and
self.unit == other.unit and
self.name == other.name)
@A.defboth_mm_eq([PhysNum, lossless_number_type])
def meth(p, o):
if p.unit != dimensionless or p.name is not None:
return NotImplemented
return p.quantity == o
@defmethod(A.mm_neg, [PhysNum])
def meth(p):
return PhysNum(-p.quantity, p.unit)
@defmethod(A.mm_pow, [PhysNum, (int,long)])
def meth(p,pow):
return PhysNum((as_ratio(p.quantity)**pow
if pow < 0 and isinstance(p, (int,long)) else
p.quantity) ** pow,
p.unit**pow)
@defmethod(A.mm_mul, [PhysNum, PhysNum])
def meth(a, b):
return PhysNum(a.quantity * b.quantity, a.unit * b.unit)
@A.defboth_mm_mul([PhysNum, lossless_number_type])
def meth(p, a):
return PhysNum(p.quantity * a, p.unit)
def xdiv(a, b):
try:
return a/b
except ZeroDivisionError:
return dne
@defmethod(A.mm_div, [PhysNum, PhysNum])
def meth(a, b):
return PhysNum(xdiv(a.quantity, b.quantity), a.unit / b.unit)
@defmethod(A.mm_div, [PhysNum, lossless_number_type])
def meth(p, a):
return PhysNum(xdiv(p.quantity, a), p.unit)
@defmethod(A.mm_div, [anytype, PhysNum])
def meth(a, p):
return PhysNum(xdiv(a, p.quantity), p.unit ** -1)
def add_unit_check(verb, a, b):
if a.unit != b.unit:
raise ValueError("cannot %s %s and %s; units are incompatible" % (verb,a,b))
return a.unit
def add_dimensionless_check(p, other):
if p.unit != dimensionless:
raise ValueError("cannot add/subtract dimensionless %s and dimensional %s" % (other, p))
return dimensionless
@defmethod(A.mm_add, [PhysNum, PhysNum])
def meth(a, b):
return PhysNum(a.quantity + b.quantity, add_unit_check('add', a, b))
@A.defboth_mm_add([PhysNum, lossless_number_type])
def meth(p, a):
return PhysNum(p.quantity + a, add_dimensionless_check(p, a))
@defmethod(A.mm_sub, [PhysNum, PhysNum])
def meth(a, b):
return PhysNum(a.quantity - b.quantity, add_unit_check('sub', a, b))
@defmethod(A.mm_sub, [PhysNum, lossless_number_type])
def meth(p, a):
return PhysNum(p.quantity - a, add_dimensionless_check(p, a))
@defmethod(A.mm_sub, [anytype, PhysNum])
def meth(a, p):
return PhysNum(a - p.quantity, add_dimensionless_check(p, a))
@defmethod(A.mm_pow, [PhysNum, (Decimal, int, long)])
def meth(p, po):
return PhysNum(p.quantity ** po, p.unit ** po)
# # # # # # # # # #
# Algebric Types #
# # # # # # # # # #
class PhysNumInnerType(atypes.TypeBase):
'''Matches inner matchers on the quantity and unit
of a PhysNum
'''
__slots__ = ['quantity_inner','unit_inner']
def __init__(self, quantity_inner=anytype, unit_inner=anytype):
self.quantity_inner = atypes.as_type(quantity_inner)
self.unit_inner = atypes.as_type(unit_inner)
# Type Methods
@defmethod(as_string, [PhysNumInnerType])
def meth(op):
return '(physical_number_inner_type %s %s)' % (op.quantity_inner, op.unit_inner)
@defmethod(atypes.eq_types, [PhysNumInnerType, PhysNumInnerType])
def meth(a, b):
return a.quantity_inner == b.quantity_inner and a.unit_inner == b.unit_inner
@defmethod(atypes.hash_type, [PhysNumInnerType])
def meth(op):
return hash(op.quantity_inner) ^ hash(op.unit_inner) ^ 83412734
# Reductions and optimizations
# INCLUDE PAIRWISE REDUCTIONS
@defmethod(atypes.optimize_type, [PhysNumInnerType])
def meth(pn):
quantity_inner = atypes.optimize_type(pn.quantity_inner)
unit_inner = atypes.optimize_type(pn.unit_inner)
if quantity_inner==anytype and unit_inner==anytype:
return as_optimized_type((PhysNum,))
return PhysNumInnerType(quantity_inner, unit_inner)
# typep
@defmethod(atypes.typep, [object, PhysNumInnerType])
def meth(op, pn):
return (isinstance(op, PhysNum) and
typep(op.quantity, pn.quantity_inner) and
typep(op.unit, pn.unit_inner))
# Type Keyers
class PhysNumInnerKeyer(atypes.KeyerBase):
def __init__(self, quantity_keyer, unit_keyer):
self.quantity_keyer = quantity_keyer
self.unit_keyer = unit_keyer
def __eq__(self, other):
if not isinstance(other, PhysNumInnerKeyer):
return NotImplemented
return (self.quantity_keyer == other.quantity_keyer and
self.unit_keyer == other.unit_keyer)
def __hash__(self):
return hash(self.quantity_keyer) ^ hash(self.unit_keyer) ^ 348923432
@defmethod(atypes.get_type_keyer, [PhysNumInnerType])
def meth(op):
return PhysNumInnerKeyer(atypes.get_type_keyer(op.quantity_inner),
atypes.get_type_keyer(op.unit_inner))
def physical_number_inner_keyer(quantity_keyer_func, unit_keyer_func, op):
if not isinstance(op, PhysNum):
return None
return quantity_keyer_func(op.quantity), unit_keyer_func(op.unit)
@defmethod(atypes.keyer_getfunc, [PhysNumInnerKeyer])
def meth(pnk):
return partial(physical_number_inner_keyer,
atypes.keyer_getfunc(pnk.quantity_keyer),
atypes.keyer_getfunc(pnk.unit_keyer))
def physical_number_scorer(quantity_scorer, unit_scorer, pn_key):
if pn_key is None:
return atypes.worst_score
quantity_key, unit_key = pn_key
quantity_score = quantity_scorer(quantity_key)
unit_score = unit_scorer(unit_key)
if atypes.no_score in [quantity_score, unit_score]:
return atypes.no_score
return int(round((float(quantity_score) + float(unit_score)) / 2.0))
@defmethod(atypes.get_key_scorer, [PhysNumInnerType])
def meth(pn):
return partial(physical_number_scorer,
atypes.get_key_scorer(pn.quantity_inner),
atypes.get_key_scorer(pn.unit_inner))
| [
"[email protected]"
] | |
4c2b7947b41201f8cef1f67b6c56bb5f479c0730 | 993cf64df4795e7912a7f9157bd8bf02aa985506 | /Tasks/2_AD/190401-5251-MinDis.py | fc43dbd939d9cd03400ba6661775334357519819 | [] | no_license | jiwonjulietyoon/Algorithm | b541e630c5b01b47cc05b538970d2b73d452baf5 | a11be16f4700e7e55382d4dcfd88d534a232f024 | refs/heads/master | 2020-04-24T01:54:05.200538 | 2019-11-09T03:56:47 | 2019-11-09T03:56:47 | 171,616,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | # 5251. [파이썬 S/W 문제해결 구현] 7일차 - 최소 이동 거리
import sys
sys.stdin = open('../Input/5251.txt', 'r')
"""
# source, destination, length
# start: node 0
# goal: node N
"""
def min_not_vis(vis, dis):
min_idx = -1
min_dis = 11111
for i in range(N+1):
if vis[i]:
continue
else:
if dis[i] < min_dis:
min_dis = dis[i]
min_idx = i
return min_idx
TC = int(input())
for T in range(1, TC+1):
N, E = map(int, input().split()) # N+1 nodes including 0, E edges
edges = [[] for _ in range(N+1)]
for _ in range(E): # E edges
tmp = list(map(int, input().split()))
edges[tmp[0]] += [(tmp[1], tmp[2])]
dis = [0] + [11111 for _ in range(N)] # start at 0; max given weight is 10
vis = [0]*(N+1)
while 0 in vis: # c: current node; run while() until all nodes are marked visited
c = min_not_vis(vis, dis)
vis[c] = 1
for x in edges[c]:
if dis[c] + x[1] < dis[x[0]]:
dis[x[0]] = dis[c] + x[1]
print(f"#{T} {dis[-1]}")
| [
"[email protected]"
] | |
d0ba4f009a9310ae683d9c5fd9342c0d71e4289b | 4ae0b725cb05f590c502e32b8bf1c7853da11b03 | /servermsg/models.py | 9fa6f6f984265113304a51c15cd6bb8deb49665a | [] | no_license | alingb/ytym | a264ee282b710be2d7fc0529e54c5ef5a3383fac | 8c495dc118ce9d22f8890dc6397bf995b9c825b0 | refs/heads/master | 2021-07-23T11:29:18.674748 | 2018-12-04T02:08:24 | 2018-12-04T02:08:24 | 143,982,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | #-*-encoding:utf-8-*-
from django.db import models
# Create your models here.
class ServerMessage(models.Model):
pass
| [
"[email protected]"
] | |
94bb0ba1f6e517ca8fff0ea8bc4b7a8fea66af16 | a0e0bd0aacc93aa1b494a220aa79a6f99f6e8565 | /trunk/src/appserver/lib/console.py | 6306dba962b50cf2a5d9b94cd759694065dd53be | [] | no_license | newguangzhou/now-hello | b3f21c5b07c7f5b0b69eb91430c760d059499a03 | a0b970711318547afaa5d6ce1b500c089a2ded59 | refs/heads/master | 2021-08-20T03:21:42.927049 | 2017-11-28T02:40:59 | 2017-11-28T02:40:59 | 112,283,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # -*- coding: utf-8 -*-
import logging
import tornado.tcpserver
import tornado.ioloop
from tornado import gen
class Console(tornado.tcpserver.TCPServer):
def __init__(self):
tornado.tcpserver.TCPServer.__init__(self)
@gen.coroutine
def _read_line(self, stream, address):
yield stream.write(">>")
line = yield stream.read_until("\n", None, 1024)
line = line.strip()
self.handle_line(stream, address, line)
def handle_stream(self, stream, address):
logging.debug("A new console client, peer=%s", str(address))
self._read_line(stream, address)
def handle_line(self, stream, address, line):
logging.debug("Receive a console line \"%s\", peer=%s", line, address)
cmd = []
if line:
cmd = line.split(' ')
if not self.handle_cmd(stream, address, cmd):
stream.close()
self._read_line(stream, address)
def handle_cmd(self, stream, address, cmd):
return False
@gen.coroutine
def send_response(self, stream, response):
yield stream.write(response + "\r\n")
if __name__ == "__main__":
class _MyConsole(Console):
def handle_cmd(self, stream, address, cmd):
if len(cmd) == 1 and cmd[0] == "quit":
self.send_response(stream, "Byte!")
return False
elif len(cmd) == 0:
return True
else:
self.send_response(stream, "Invalid command!")
return True
import tornado.options
tornado.options.parse_command_line()
c = _MyConsole()
c.bind(9090, "127.0.0.1")
c.start()
tornado.ioloop.IOLoop.current().start()
| [
"[email protected]"
] | |
08c7741d1ab9a6b192780c8e21b5220b9f85ae0b | e0519908caa23bef1873ff69ebd17c5d81f741e1 | /calabiyau/ui/app.py | 5caf54da281f343de9ce80a4140b9b0d0894f596 | [
"BSD-3-Clause"
] | permissive | TachyonicProject/calabiyau | 2fb7af37bd656a686a5f741cadd082b2500718ff | 415a8ada4a93ee84c4776e89c9442af328dcfdd6 | refs/heads/latest | 2020-05-02T04:14:43.953841 | 2019-12-06T04:12:39 | 2019-12-06T04:12:39 | 177,745,608 | 0 | 3 | NOASSERTION | 2019-12-06T04:12:40 | 2019-03-26T08:31:25 | Python | UTF-8 | Python | false | false | 1,634 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from calabiyau.ui import views
| [
"[email protected]"
] | |
84b205ecd56b768f67dcd221e5a23a7a8cbb4837 | e6e65a6704c20e6e0288cfc54915ee7ea9e1c0a7 | /archive/1recon/gruTrans/model.py | efd9a602f4849f1f9ec47563f78d45059f342e62 | [] | no_license | schatzkara/REU2019 | fbb1f17d860c5d51a7ccae3ba106960d4c733949 | 6de28b5a8992f6122f2e9813de8b92d9e97ccbf3 | refs/heads/master | 2020-06-06T03:50:40.753334 | 2019-11-07T14:11:50 | 2019-11-07T14:11:50 | 192,629,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,635 | py | # phase 3
import torch
import torch.nn as nn
from networks.modifiedVGG import vgg16
from networks.modifiedI3D import InceptionI3d
from networks.expander import Expander
from networks.transformer import Transformer
from networks.kpp import KPPredictor
from networks.convGRU import ConvGRU
from networks.generator import Generator
from torchsummary import summary
"""
Pipeline:
i1 = single frame view2
i2 = 8 frames view1
i3 = viewpoint change
rep = I3D(i2)
vp = expander(i3)
rep' = trans(rep + vp)
kp' = kpp(rep')
app = VGG(i1)
app' = gru(app, kp')
recon = gen(app' + kp')
"""
vgg_weights_path = '/home/yogesh/kara/REU2019/weights/vgg16-397923af.pth'
# 'C:/Users/Owner/Documents/UCF/Project/REU2019/weights/vgg16-397923af.pth'
i3d_weights_path = '/home/yogesh/kara/REU2019/weights/rgb_charades.pt'
# 'C:/Users/Owner/Documents/UCF/Project/REU2019/weights/rgb_charades.pt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class FullNetwork(nn.Module):
"""
Class combining the full Cross-View Action Synthesis network architecture.
"""
VALID_VP_VALUE_COUNTS = (1, 3)
VALID_FRAME_COUNTS = (8, 16)
def __init__(self, vp_value_count, stdev, output_shape, name='Full Network'):
"""
Initializes the Full Network.
:param vp_value_count: (int) The number of values that identify the viewpoint.
:param output_shape: (5-tuple) The desired output shape for generated videos. Must match video input shape.
Legal values: (bsz, 3, 8/16, 112, 112) and (bsz, 3, 16, 112, 112)
:param name: (str, optional) The name of the network (default 'Full Network').
Raises:
ValueError: if 'vp_value_count' is not a legal value count
ValueError: if 'output_shape' does not contain a legal number of frames.
"""
if vp_value_count not in self.VALID_VP_VALUE_COUNTS:
raise ValueError('Invalid number of vp values: %d' % vp_value_count)
if output_shape[2] not in self.VALID_FRAME_COUNTS:
raise ValueError('Invalid number of frames in desired output: %d' % output_shape[2])
super(FullNetwork, self).__init__()
self.net_name = name
self.vp_value_count = vp_value_count
self.stdev = stdev
self.output_shape = output_shape
self.out_frames = output_shape[2]
# specs of various features
self.app_feat = 128
self.rep_feat = 128
self.rep_frames = 4
self.rep_size = 14
self.nkp = 32
self.vgg = vgg16(pretrained=True, weights_path=vgg_weights_path)
self.i3d = InceptionI3d(final_endpoint='Mixed_5c', in_frames=self.out_frames,
pretrained=True, weights_path=i3d_weights_path)
self.exp = Expander(vp_value_count=self.vp_value_count)
# convs to make all appearance encodings have same number of channels, so they can be used in the same convLSTM
self.app_conv128 = nn.Conv2d(in_channels=128, out_channels=self.app_feat, kernel_size=(3, 3),
stride=(1, 1), padding=(1, 1))
self.app_conv256a = nn.Conv2d(in_channels=256, out_channels=self.app_feat, kernel_size=(3, 3),
stride=(1, 1), padding=(1, 1))
self.app_conv256b = nn.Conv2d(in_channels=256, out_channels=self.app_feat, kernel_size=(3, 3),
stride=(1, 1), padding=(1, 1))
self.app_convs = [self.app_conv128, self.app_conv256a, self.app_conv256b]
# self.app_convs = {128: self.app_conv128,
# 256: self.app_conv256,
# 512: self.app_conv512}
# convs for the initial hidden state of the convGRU
# self.hconv = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=(3, 3), stride=(1, 1),
# padding=(1, 1))
# self.cconv = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=(3, 3), stride=(1, 1),
# padding=(1, 1))
# convs to make all motion features have the same number of channels, so they can be used in the same trans net
self.rep_conv64 = nn.Conv3d(in_channels=64, out_channels=self.rep_feat, kernel_size=(3, 3, 3), stride=(1, 1, 1),
padding=(1, 1, 1))
self.rep_conv192 = nn.Conv3d(in_channels=192, out_channels=self.rep_feat, kernel_size=(3, 3, 3), stride=(1, 1, 1),
padding=(1, 1, 1))
self.rep_conv256 = nn.Conv3d(in_channels=256, out_channels=self.rep_feat, kernel_size=(3, 3, 3), stride=(1, 1, 1),
padding=(1, 1, 1))
self.rep_convs = {64: self.rep_conv64,
192: self.rep_conv192,
256: self.rep_conv256}
self.trans = Transformer(in_channels=self.rep_feat + self.vp_value_count, out_channels=self.rep_feat)
self.kpp = KPPredictor(in_channels=self.rep_feat, nkp=self.nkp, stdev=self.stdev)
self.conv_gru = ConvGRU(input_dim=self.rep_feat, hidden_dim=[self.app_feat], kernel_size=(7, 7),
num_layers=1, batch_first=True, bias=False, return_all_layers=False)
self.gen = Generator(in_channels=[self.app_feat, self.nkp], out_frames=self.out_frames)
# print('%s Model Successfully Built \n' % self.net_name)
def forward(self, vp_diff, vid1, img2):
"""
Function to compute a single forward pass through the network, according to the architecture.
:param vp_diff (tensor) The difference between the two viewpoints.
A scalar value for the NTU Dataset; a 3-tuple for the panoptic Dataset.
Must be a tensor of shape: (bsz, 1/3) for this application.
:param vid1: (tensor) A video of the scene from the first viewpoint.
Must be a tensor of shape: (bsz, 3, 8/16, 112, 112) for this application.
:param img2: (tensor) An image of the scene from the second viewpoint to use for appearance conditioning.
Must be a tensor of shape: (bsz, 3, 112, 112) for this application.
:return: The reconstructed video and the transformed motion features.
Shape of the output video is: (bsz, 3, out_frames, 112, 112) for this application.
"""
rep_v2_est, kp_v2_est = self.action_pipeline(vp_diff, vid1) # bsz,128,8,56,56, bsz,128,8,28,28, bsz,128,4,14,14
app_v2_est = self.appearance_pipeline(img2, rep_v2_est) # bsz,128,8,56,56, bsz,128,8,28,28, bsz,128,4,14,14
# these are the videos that get returned
gen_v2 = self.gen(app_v2_est, kp_v2_est) # bsz,3,out_frames,112,112
return gen_v2
def action_pipeline(self, vp_diff, vid1):
rep_v1 = self.i3d(vid1) # bsz,64,8,56,56, bsz,192,8,28,28, bsz,256,4,14,14
rep_v2_est = [] # bsz,128,8,56,56, bsz,128,8,28,28, bsz,128,4,14,14
for rep in rep_v1:
bsz, channels, frames, height, width = rep.size()
rep = self.rep_convs[channels](rep)
vp_1_to_2 = self.exp(vp_diff, out_frames=frames, out_size=height)
trans_input2 = torch.cat([vp_1_to_2, rep], dim=1) # dim=channels
rep_v2_est.append(self.trans(trans_input2))
reps_v2_est, kp_v2_est = self.kpp(rep_v2_est) # bsz,32,16,56,56
return rep_v2_est, kp_v2_est # bsz,128,8,56,56, bsz,128,8,28,28, bsz,128,4,14,14, bsz,32,16,56,56
def appearance_pipeline(self, img2, rep_v2_est):
app_v2 = self.vgg(img2) # bsz,128,56,56, bsz,256,28,28, bsz,512,14,14
app_v2_est = [] # bsz,256,8,56,56, bsz,256,8,28,28, bsz,256,4,14,14
for i in range(len(app_v2)):
bsz, channels, height, width = app_v2[i].size()
app = self.app_convs[i](app_v2[i]) # bsz,256,56,56, bsz,256,28,28, bsz,256,14,14
output, last_state = self.conv_gru(input_tensor=rep_v2_est[i].permute(0, 2, 1, 3, 4),
hidden_state=[app])
output = output[0].permute(0, 2, 1, 3, 4)
trans_app = output.to(device)
app_v2_est.append(trans_app)
return app_v2_est # bsz,256,8,56,56, bsz,256,8,28,28, bsz,256,4,14,14
if __name__ == "__main__":
print_summary = True
net = FullNetwork(vp_value_count=1, output_shape=(20, 3, 8, 112, 112))
if print_summary:
summary(net, input_size=[(3, 8, 112, 112), (3, 8, 112, 122), (3, 112, 122), (3, 112, 112), (1)])
| [
"[email protected]"
] | |
38ac9b5d20a5a72d267ad789829aecd19e2a9e44 | c492c405f0535cb4eada74d9099b395f8e9701c3 | /demo/migrations/0002_auto_20191124_1646.py | 1c60f482747ab41c3f3a68ea11fd2022d3aa6277 | [] | no_license | Spirovanni/PyTut | a6a0713dcd100bbd35af21022e5b95f0894badf0 | 51489b7550ad8b4a70548de268624f806f827dc4 | refs/heads/master | 2020-09-16T15:39:55.081384 | 2019-11-29T03:39:42 | 2019-11-29T03:39:42 | 223,816,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.2.7 on 2019-11-25 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(default='', max_length=32, unique=True),
),
]
| [
"[email protected]"
] | |
6155e5faa1425f554bea5aec1808a9bf81438e7b | 2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5 | /tests/layer_tests/tensorflow_lite_tests/test_tfl_ReverseV2.py | 66a615945471d1c82044abccff6a5518cdc6b829 | [
"Apache-2.0"
] | permissive | openvinotoolkit/openvino | 38ea745a247887a4e14580dbc9fc68005e2149f9 | e4bed7a31c9f00d8afbfcabee3f64f55496ae56a | refs/heads/master | 2023-08-18T03:47:44.572979 | 2023-08-17T21:24:59 | 2023-08-17T21:24:59 | 153,097,643 | 3,953 | 1,492 | Apache-2.0 | 2023-09-14T21:42:24 | 2018-10-15T10:54:40 | C++ | UTF-8 | Python | false | false | 1,671 | py | import pytest
import tensorflow as tf
from common.tflite_layer_test_class import TFLiteLayerTest
test_params = [
{'shape': [1], 'axis': [-1]},
{'shape': [1], 'axis': [0]},
{'shape': [2, 6], 'axis': [-1, -2]},
{'shape': [2, 6], 'axis': [1]},
{'shape': [2, 4, 6], 'axis': [0, -2]},
{'shape': [2, 4, 6], 'axis': [2]},
{'shape': [2, 4, 6, 8], 'axis': [0, 3, -3, 2]},
{'shape': [2, 4, 6, 8], 'axis': [-3]},
{'shape': [2, 3, 1, 2, 2], 'axis': [0, 3, -3, 1, -1]},
{'shape': [2, 3, 1, 2, 2], 'axis': [4]},
{'shape': [2, 1, 1, 1, 2, 3, 2, 2], 'axis': [-1]},
{'shape': [2, 1, 1, 1, 2, 3, 2, 2], 'axis': [0, 1, 2, 3, 4, 5, 6, 7]},
]
class TestTFLiteReverseV2LayerTest(TFLiteLayerTest):
inputs = ["Input"]
outputs = ["ReverseV2"]
allowed_ops = ['REVERSE_V2']
def make_model(self, params):
assert len(set(params.keys()).intersection({'shape', 'axis'})) == 2, \
'Unexpected parameters for test: ' + ','.join(params.keys())
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
place_holder = tf.compat.v1.placeholder(params.get('dtype', tf.float32), params['shape'],
name=self.inputs[0])
tf.reverse(place_holder, params['axis'], name=self.outputs[0])
net = sess.graph_def
return net
@pytest.mark.parametrize("params", test_params)
@pytest.mark.nightly
def test_reverse_v2(self, params, ie_device, precision, temp_dir):
if len(params['axis']) > 1:
pytest.xfail('CVS-109932')
self._test(ie_device, precision, temp_dir, params)
| [
"[email protected]"
] | |
d4af23600119c1186c95e875df4696286a2d71f7 | 947fa6a4a6155ffce0038b11f4d743603418ad68 | /.c9/metadata/environment/testing/testing_submissions/testing_assignment_004/fb_post/tests.py | 6f28d64abcd696a3c986e70dd17281c6eb809d20 | [] | no_license | bharathi151/bharathi_diyyala | bd75e10639d7d22b332d5ce677e7799402dc4984 | 99f8657d010c790a0e4e4c9d6b57f81814784eb0 | refs/heads/master | 2022-11-21T12:43:48.401239 | 2020-07-23T09:05:52 | 2020-07-23T09:05:52 | 281,903,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,119 | py | {"filter":false,"title":"tests.py","tooltip":"/testing/testing_submissions/testing_assignment_004/fb_post/tests.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":940,"column":31},"end":{"row":940,"column":32},"action":"insert","lines":["e"],"id":5546},{"start":{"row":940,"column":32},"end":{"row":940,"column":33},"action":"insert","lines":["s"]}],[{"start":{"row":940,"column":26},"end":{"row":940,"column":33},"action":"remove","lines":["replies"],"id":5547},{"start":{"row":940,"column":26},"end":{"row":940,"column":39},"action":"insert","lines":["replies_count"]}],[{"start":{"row":940,"column":26},"end":{"row":940,"column":27},"action":"insert","lines":["\""],"id":5548}],[{"start":{"row":940,"column":40},"end":{"row":940,"column":41},"action":"insert","lines":["\""],"id":5549}],[{"start":{"row":940,"column":42},"end":{"row":940,"column":43},"action":"insert","lines":[" "],"id":5550},{"start":{"row":940,"column":43},"end":{"row":940,"column":44},"action":"insert","lines":["="]},{"start":{"row":940,"column":44},"end":{"row":940,"column":45},"action":"insert","lines":["="]}],[{"start":{"row":940,"column":45},"end":{"row":940,"column":46},"action":"insert","lines":[" "],"id":5551}],[{"start":{"row":940,"column":46},"end":{"row":940,"column":47},"action":"insert","lines":["r"],"id":5552},{"start":{"row":940,"column":47},"end":{"row":940,"column":48},"action":"insert","lines":["e"]},{"start":{"row":940,"column":48},"end":{"row":940,"column":49},"action":"insert","lines":["s"]},{"start":{"row":940,"column":49},"end":{"row":940,"column":50},"action":"insert","lines":["u"]}],[{"start":{"row":940,"column":50},"end":{"row":940,"column":51},"action":"insert","lines":["l"],"id":5553},{"start":{"row":940,"column":51},"end":{"row":940,"column":52},"action":"insert","lines":["t"]}],[{"start":{"row":940,"column":52},"end":{"row":940,"column":53},"action":"insert","lines":["_"],"id":5554}],[{"start":{"row":940,"column":46},"end":{"row":940,"column":53},"action":"remove","lines":["result_"],"id":5555},{"start":{"row":940,"column":46},"end":{"row":940,"column":60},"action":"insert","lines":["result_replies"]}],[{"start":{"row":940,"column":60},"end":{"row":940,"column":62},"action":"insert","lines":["[]"],"id":5556}],[{"start":{"row":940,"column":61},"end":{"row":940,"column":62},"action":"insert","lines":["i"],"id":5557},{"start":{"row":940,"column":62},"end":{"row":940,"column":63},"action":"insert","lines":["t"]},{"start":{"row":940,"column":63},"end":{"row":940,"column":64},"action":"insert","lines":["e"]},{"start":{"row":940,"column":64},"end":{"row":940,"column":65},"action":"insert","lines":["m"]}],[{"start":{"row":940,"column":66},"end":{"row":940,"column":68},"action":"insert","lines":["[]"],"id":5558}],[{"start":{"row":940,"column":67},"end":{"row":940,"column":69},"action":"insert","lines":["\"\""],"id":5559}],[{"start":{"row":940,"column":68},"end":{"row":940,"column":69},"action":"insert","lines":["r"],"id":5560},{"start":{"row":940,"column":69},"end":{"row":940,"column":70},"action":"insert","lines":["e"]},{"start":{"row":940,"column":70},"end":{"row":940,"column":71},"action":"insert","lines":["p"]},{"start":{"row":940,"column":71},"end":{"row":940,"column":72},"action":"insert","lines":["l"]},{"start":{"row":940,"column":72},"end":{"row":940,"column":73},"action":"insert","lines":["i"]},{"start":{"row":940,"column":73},"end":{"row":940,"column":74},"action":"insert","lines":["e"]}],[{"start":{"row":940,"column":74},"end":{"row":940,"column":75},"action":"insert","lines":["s"],"id":5561}],[{"start":{"row":940,"column":68},"end":{"row":940,"column":75},"action":"remove","lines":["replies"],"id":5562},{"start":{"row":940,"column":68},"end":{"row":940,"column":81},"action":"insert","lines":["replies_count"]}],[{"start":{"row":940,"column":40},"end":{"row":940,"column":41},"action":"remove","lines":["\""],"id":5563},{"start":{"row":940,"column":39},"end":{"row":940,"column":40},"action":"remove","lines":["t"]},{"start":{"row":940,"column":38},"end":{"row":940,"column":39},"action":"remove","lines":["n"]},{"start":{"row":940,"column":37},"end":{"row":940,"column":38},"action":"remove","lines":["u"]},{"start":{"row":940,"column":36},"end":{"row":940,"column":37},"action":"remove","lines":["o"]},{"start":{"row":940,"column":35},"end":{"row":940,"column":36},"action":"remove","lines":["c"]},{"start":{"row":940,"column":34},"end":{"row":940,"column":35},"action":"remove","lines":["_"]},{"start":{"row":940,"column":33},"end":{"row":940,"column":34},"action":"remove","lines":["s"]},{"start":{"row":940,"column":32},"end":{"row":940,"column":33},"action":"remove","lines":["e"]},{"start":{"row":940,"column":31},"end":{"row":940,"column":32},"action":"remove","lines":["i"]},{"start":{"row":940,"column":30},"end":{"row":940,"column":31},"action":"remove","lines":["l"]},{"start":{"row":940,"column":29},"end":{"row":940,"column":30},"action":"remove","lines":["p"]},{"start":{"row":940,"column":28},"end":{"row":940,"column":29},"action":"remove","lines":["e"]},{"start":{"row":940,"column":27},"end":{"row":940,"column":28},"action":"remove","lines":["r"]}],[{"start":{"row":940,"column":27},"end":{"row":940,"column":41},"action":"insert","lines":["\"replies_count"],"id":5564}],[{"start":{"row":940,"column":41},"end":{"row":940,"column":42},"action":"insert","lines":["\""],"id":5565}],[{"start":{"row":940,"column":26},"end":{"row":940,"column":28},"action":"remove","lines":["\"\""],"id":5566}],[{"start":{"row":940,"column":26},"end":{"row":940,"column":27},"action":"insert","lines":["\""],"id":5567}],[{"start":{"row":940,"column":59},"end":{"row":940,"column":60},"action":"remove","lines":["s"],"id":5568},{"start":{"row":940,"column":58},"end":{"row":940,"column":59},"action":"remove","lines":["e"]},{"start":{"row":940,"column":57},"end":{"row":940,"column":58},"action":"remove","lines":["i"]},{"start":{"row":940,"column":56},"end":{"row":940,"column":57},"action":"remove","lines":["l"]},{"start":{"row":940,"column":55},"end":{"row":940,"column":56},"action":"remove","lines":["p"]},{"start":{"row":940,"column":54},"end":{"row":940,"column":55},"action":"remove","lines":["e"]},{"start":{"row":940,"column":53},"end":{"row":940,"column":54},"action":"remove","lines":["r"]},{"start":{"row":940,"column":52},"end":{"row":940,"column":53},"action":"remove","lines":["_"]}],[{"start":{"row":940,"column":8},"end":{"row":940,"column":75},"action":"remove","lines":["assert dict[item][\"replies_count\"] == result[item][\"replies_count\"]"],"id":5569},{"start":{"row":940,"column":4},"end":{"row":940,"column":8},"action":"remove","lines":[" "]},{"start":{"row":940,"column":0},"end":{"row":940,"column":4},"action":"remove","lines":[" "]},{"start":{"row":939,"column":35},"end":{"row":940,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1004,"column":54},"end":{"row":1005,"column":0},"action":"insert","lines":["",""],"id":5570},{"start":{"row":1005,"column":0},"end":{"row":1005,"column":4},"action":"insert","lines":[" "]},{"start":{"row":1005,"column":4},"end":{"row":1005,"column":5},"action":"insert","lines":["f"]},{"start":{"row":1005,"column":5},"end":{"row":1005,"column":6},"action":"insert","lines":["o"]}],[{"start":{"row":1005,"column":6},"end":{"row":1005,"column":7},"action":"insert","lines":["r"],"id":5571}],[{"start":{"row":1005,"column":7},"end":{"row":1005,"column":8},"action":"insert","lines":[" "],"id":5572}],[{"start":{"row":1004,"column":41},"end":{"row":1004,"column":42},"action":"remove","lines":["t"],"id":5573},{"start":{"row":1004,"column":40},"end":{"row":1004,"column":41},"action":"remove","lines":["l"]},{"start":{"row":1004,"column":39},"end":{"row":1004,"column":40},"action":"remove","lines":["u"]},{"start":{"row":1004,"column":38},"end":{"row":1004,"column":39},"action":"remove","lines":["s"]},{"start":{"row":1004,"column":37},"end":{"row":1004,"column":38},"action":"remove","lines":["e"]},{"start":{"row":1004,"column":36},"end":{"row":1004,"column":37},"action":"remove","lines":["r"]}],[{"start":{"row":1004,"column":36},"end":{"row":1004,"column":37},"action":"insert","lines":["f"],"id":5574},{"start":{"row":1004,"column":37},"end":{"row":1004,"column":38},"action":"insert","lines":["i"]},{"start":{"row":1004,"column":38},"end":{"row":1004,"column":39},"action":"insert","lines":["n"]}],[{"start":{"row":1004,"column":38},"end":{"row":1004,"column":39},"action":"remove","lines":["n"],"id":5575},{"start":{"row":1004,"column":37},"end":{"row":1004,"column":38},"action":"remove","lines":["i"]},{"start":{"row":1004,"column":36},"end":{"row":1004,"column":37},"action":"remove","lines":["f"]}],[{"start":{"row":1004,"column":36},"end":{"row":1004,"column":37},"action":"insert","lines":["p"],"id":5576},{"start":{"row":1004,"column":37},"end":{"row":1004,"column":38},"action":"insert","lines":["o"]},{"start":{"row":1004,"column":38},"end":{"row":1004,"column":39},"action":"insert","lines":["s"]},{"start":{"row":1004,"column":39},"end":{"row":1004,"column":40},"action":"insert","lines":["t"]}],[{"start":{"row":1004,"column":26},"end":{"row":1004,"column":27},"action":"insert","lines":["_"],"id":5577},{"start":{"row":1004,"column":27},"end":{"row":1004,"column":28},"action":"insert","lines":["p"]},{"start":{"row":1004,"column":28},"end":{"row":1004,"column":29},"action":"insert","lines":["o"]},{"start":{"row":1004,"column":29},"end":{"row":1004,"column":30},"action":"insert","lines":["s"]}],[{"start":{"row":1004,"column":30},"end":{"row":1004,"column":31},"action":"insert","lines":["t"],"id":5578}],[{"start":{"row":1005,"column":8},"end":{"row":1005,"column":9},"action":"insert","lines":["e"],"id":5579},{"start":{"row":1005,"column":9},"end":{"row":1005,"column":10},"action":"insert","lines":["x"]}],[{"start":{"row":1005,"column":8},"end":{"row":1005,"column":10},"action":"remove","lines":["ex"],"id":5580},{"start":{"row":1005,"column":8},"end":{"row":1005,"column":25},"action":"insert","lines":["expected_comments"]}],[{"start":{"row":1005,"column":25},"end":{"row":1005,"column":26},"action":"insert","lines":[","],"id":5581},{"start":{"row":1005,"column":26},"end":{"row":1005,"column":27},"action":"insert","lines":["r"]},{"start":{"row":1005,"column":27},"end":{"row":1005,"column":28},"action":"insert","lines":["e"]},{"start":{"row":1005,"column":28},"end":{"row":1005,"column":29},"action":"insert","lines":["s"]}],[{"start":{"row":1005,"column":26},"end":{"row":1005,"column":29},"action":"remove","lines":["res"],"id":5582},{"start":{"row":1005,"column":26},"end":{"row":1005,"column":42},"action":"insert","lines":["result_commments"]}],[{"start":{"row":1005,"column":42},"end":{"row":1005,"column":43},"action":"insert","lines":[" "],"id":5583},{"start":{"row":1005,"column":43},"end":{"row":1005,"column":44},"action":"insert","lines":["i"]},{"start":{"row":1005,"column":44},"end":{"row":1005,"column":45},"action":"insert","lines":["n"]}],[{"start":{"row":1005,"column":45},"end":{"row":1005,"column":46},"action":"insert","lines":[" "],"id":5584}],[{"start":{"row":1005,"column":46},"end":{"row":1005,"column":47},"action":"insert","lines":["z"],"id":5585},{"start":{"row":1005,"column":47},"end":{"row":1005,"column":48},"action":"insert","lines":["i"]},{"start":{"row":1005,"column":48},"end":{"row":1005,"column":49},"action":"insert","lines":["p"]}],[{"start":{"row":1005,"column":49},"end":{"row":1005,"column":51},"action":"insert","lines":["()"],"id":5586}],[{"start":{"row":1005,"column":50},"end":{"row":1005,"column":51},"action":"insert","lines":["e"],"id":5587},{"start":{"row":1005,"column":51},"end":{"row":1005,"column":52},"action":"insert","lines":["x"]}],[{"start":{"row":1005,"column":50},"end":{"row":1005,"column":52},"action":"remove","lines":["ex"],"id":5588},{"start":{"row":1005,"column":50},"end":{"row":1005,"column":72},"action":"insert","lines":["expected_post_comments"]}],[{"start":{"row":1005,"column":72},"end":{"row":1005,"column":73},"action":"insert","lines":[","],"id":5589}],[{"start":{"row":1005,"column":73},"end":{"row":1005,"column":74},"action":"insert","lines":["r"],"id":5590},{"start":{"row":1005,"column":74},"end":{"row":1005,"column":75},"action":"insert","lines":["e"]},{"start":{"row":1005,"column":75},"end":{"row":1005,"column":76},"action":"insert","lines":["s"]}],[{"start":{"row":1005,"column":75},"end":{"row":1005,"column":76},"action":"remove","lines":["s"],"id":5591},{"start":{"row":1005,"column":74},"end":{"row":1005,"column":75},"action":"remove","lines":["e"]},{"start":{"row":1005,"column":73},"end":{"row":1005,"column":74},"action":"remove","lines":["r"]}],[{"start":{"row":1005,"column":73},"end":{"row":1005,"column":74},"action":"insert","lines":["p"],"id":5592},{"start":{"row":1005,"column":74},"end":{"row":1005,"column":75},"action":"insert","lines":["o"]},{"start":{"row":1005,"column":75},"end":{"row":1005,"column":76},"action":"insert","lines":["s"]},{"start":{"row":1005,"column":76},"end":{"row":1005,"column":77},"action":"insert","lines":["t"]}],[{"start":{"row":1005,"column":73},"end":{"row":1005,"column":77},"action":"remove","lines":["post"],"id":5593},{"start":{"row":1005,"column":73},"end":{"row":1005,"column":79},"action":"insert","lines":["post_2"]}],[{"start":{"row":1005,"column":78},"end":{"row":1005,"column":79},"action":"remove","lines":["2"],"id":5594},{"start":{"row":1005,"column":77},"end":{"row":1005,"column":78},"action":"remove","lines":["_"]},{"start":{"row":1005,"column":76},"end":{"row":1005,"column":77},"action":"remove","lines":["t"]},{"start":{"row":1005,"column":75},"end":{"row":1005,"column":76},"action":"remove","lines":["s"]}],[{"start":{"row":1005,"column":75},"end":{"row":1005,"column":76},"action":"insert","lines":["s"],"id":5595},{"start":{"row":1005,"column":76},"end":{"row":1005,"column":77},"action":"insert","lines":["t"]}],[{"start":{"row":1005,"column":73},"end":{"row":1005,"column":77},"action":"remove","lines":["post"],"id":5596},{"start":{"row":1005,"column":73},"end":{"row":1005,"column":87},"action":"insert","lines":["post_commments"]}],[{"start":{"row":1005,"column":88},"end":{"row":1005,"column":89},"action":"insert","lines":[":"],"id":5597}],[{"start":{"row":1006,"column":0},"end":{"row":1006,"column":4},"action":"remove","lines":[" "],"id":5598},{"start":{"row":1005,"column":89},"end":{"row":1006,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1005,"column":89},"end":{"row":1006,"column":0},"action":"insert","lines":["",""],"id":5599},{"start":{"row":1006,"column":0},"end":{"row":1006,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":1007,"column":0},"end":{"row":1007,"column":4},"action":"remove","lines":[" "],"id":5600},{"start":{"row":1006,"column":85},"end":{"row":1007,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1006,"column":85},"end":{"row":1007,"column":0},"action":"insert","lines":["",""],"id":5601},{"start":{"row":1007,"column":0},"end":{"row":1007,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":1008,"column":0},"end":{"row":1008,"column":4},"action":"remove","lines":[" "],"id":5602},{"start":{"row":1007,"column":85},"end":{"row":1008,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1007,"column":85},"end":{"row":1008,"column":0},"action":"insert","lines":["",""],"id":5603},{"start":{"row":1008,"column":0},"end":{"row":1008,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":1009,"column":0},"end":{"row":1009,"column":4},"action":"remove","lines":[" "],"id":5604},{"start":{"row":1008,"column":78},"end":{"row":1009,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1008,"column":78},"end":{"row":1009,"column":0},"action":"insert","lines":["",""],"id":5605},{"start":{"row":1009,"column":0},"end":{"row":1009,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":1010,"column":0},"end":{"row":1010,"column":4},"action":"remove","lines":[" "],"id":5606},{"start":{"row":1009,"column":80},"end":{"row":1010,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1009,"column":80},"end":{"row":1010,"column":0},"action":"insert","lines":["",""],"id":5607},{"start":{"row":1010,"column":0},"end":{"row":1010,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":1011,"column":0},"end":{"row":1011,"column":4},"action":"remove","lines":[" "],"id":5608},{"start":{"row":1010,"column":84},"end":{"row":1011,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1010,"column":84},"end":{"row":1011,"column":0},"action":"insert","lines":["",""],"id":5609},{"start":{"row":1011,"column":0},"end":{"row":1011,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":946,"column":77},"end":{"row":947,"column":0},"action":"insert","lines":["",""],"id":5610},{"start":{"row":947,"column":0},"end":{"row":947,"column":8},"action":"insert","lines":[" "]},{"start":{"row":947,"column":8},"end":{"row":947,"column":9},"action":"insert","lines":["c"]},{"start":{"row":947,"column":9},"end":{"row":947,"column":10},"action":"insert","lines":["h"]}],[{"start":{"row":947,"column":10},"end":{"row":947,"column":11},"action":"insert","lines":["e"],"id":5611}],[{"start":{"row":947,"column":8},"end":{"row":947,"column":11},"action":"remove","lines":["che"],"id":5612},{"start":{"row":947,"column":8},"end":{"row":947,"column":23},"action":"insert","lines":["check_comment()"]}],[{"start":{"row":947,"column":22},"end":{"row":947,"column":91},"action":"insert","lines":[" assert dict[item][\"comments\"] == result[item][\"comments\"]"],"id":5613}],[{"start":{"row":947,"column":29},"end":{"row":947,"column":30},"action":"remove","lines":[" "],"id":5614},{"start":{"row":947,"column":28},"end":{"row":947,"column":29},"action":"remove","lines":[" "]},{"start":{"row":947,"column":24},"end":{"row":947,"column":28},"action":"remove","lines":[" "]},{"start":{"row":947,"column":23},"end":{"row":947,"column":24},"action":"remove","lines":[" "]}],[{"start":{"row":947,"column":29},"end":{"row":947,"column":30},"action":"remove","lines":[" "],"id":5615},{"start":{"row":947,"column":28},"end":{"row":947,"column":29},"action":"remove","lines":["t"]},{"start":{"row":947,"column":27},"end":{"row":947,"column":28},"action":"remove","lines":["r"]},{"start":{"row":947,"column":26},"end":{"row":947,"column":27},"action":"remove","lines":["e"]},{"start":{"row":947,"column":25},"end":{"row":947,"column":26},"action":"remove","lines":["s"]},{"start":{"row":947,"column":24},"end":{"row":947,"column":25},"action":"remove","lines":["s"]},{"start":{"row":947,"column":23},"end":{"row":947,"column":24},"action":"remove","lines":["a"]},{"start":{"row":947,"column":22},"end":{"row":947,"column":23},"action":"remove","lines":[" "]}],[{"start":{"row":947,"column":51},"end":{"row":947,"column":52},"action":"remove","lines":[" "],"id":5616},{"start":{"row":947,"column":50},"end":{"row":947,"column":51},"action":"remove","lines":[" "]},{"start":{"row":947,"column":49},"end":{"row":947,"column":50},"action":"remove","lines":[" "]},{"start":{"row":947,"column":48},"end":{"row":947,"column":49},"action":"remove","lines":["="]},{"start":{"row":947,"column":47},"end":{"row":947,"column":48},"action":"remove","lines":["="]},{"start":{"row":947,"column":46},"end":{"row":947,"column":47},"action":"remove","lines":[" "]},{"start":{"row":947,"column":45},"end":{"row":947,"column":46},"action":"remove","lines":[" "]},{"start":{"row":947,"column":44},"end":{"row":947,"column":45},"action":"remove","lines":[" "]}],[{"start":{"row":947,"column":44},"end":{"row":947,"column":45},"action":"insert","lines":[","],"id":5617}],[{"start":{"row":1009,"column":44},"end":{"row":1009,"column":45},"action":"insert","lines":["l"],"id":5618}],[{"start":{"row":1024,"column":49},"end":{"row":1024,"column":50},"action":"remove","lines":[")"],"id":5619},{"start":{"row":1024,"column":48},"end":{"row":1024,"column":49},"action":"remove","lines":["s"]},{"start":{"row":1024,"column":47},"end":{"row":1024,"column":48},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":46},"end":{"row":1024,"column":47},"action":"remove","lines":["i"]},{"start":{"row":1024,"column":45},"end":{"row":1024,"column":46},"action":"remove","lines":["l"]},{"start":{"row":1024,"column":44},"end":{"row":1024,"column":45},"action":"remove","lines":["p"]},{"start":{"row":1024,"column":43},"end":{"row":1024,"column":44},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":42},"end":{"row":1024,"column":43},"action":"remove","lines":["r"]},{"start":{"row":1024,"column":41},"end":{"row":1024,"column":42},"action":"remove","lines":["_"]},{"start":{"row":1024,"column":40},"end":{"row":1024,"column":41},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":39},"end":{"row":1024,"column":40},"action":"remove","lines":["l"]},{"start":{"row":1024,"column":38},"end":{"row":1024,"column":39},"action":"remove","lines":["u"]},{"start":{"row":1024,"column":37},"end":{"row":1024,"column":38},"action":"remove","lines":["s"]},{"start":{"row":1024,"column":36},"end":{"row":1024,"column":37},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":35},"end":{"row":1024,"column":36},"action":"remove","lines":["r"]},{"start":{"row":1024,"column":34},"end":{"row":1024,"column":35},"action":"remove","lines":[","]},{"start":{"row":1024,"column":33},"end":{"row":1024,"column":34},"action":"remove","lines":["s"]}],[{"start":{"row":1024,"column":32},"end":{"row":1024,"column":33},"action":"remove","lines":["e"],"id":5620},{"start":{"row":1024,"column":31},"end":{"row":1024,"column":32},"action":"remove","lines":["i"]},{"start":{"row":1024,"column":30},"end":{"row":1024,"column":31},"action":"remove","lines":["l"]},{"start":{"row":1024,"column":29},"end":{"row":1024,"column":30},"action":"remove","lines":["p"]},{"start":{"row":1024,"column":28},"end":{"row":1024,"column":29},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":27},"end":{"row":1024,"column":28},"action":"remove","lines":["r"]},{"start":{"row":1024,"column":26},"end":{"row":1024,"column":27},"action":"remove","lines":["_"]},{"start":{"row":1024,"column":25},"end":{"row":1024,"column":26},"action":"remove","lines":["d"]},{"start":{"row":1024,"column":24},"end":{"row":1024,"column":25},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":23},"end":{"row":1024,"column":24},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":22},"end":{"row":1024,"column":23},"action":"remove","lines":["c"]},{"start":{"row":1024,"column":21},"end":{"row":1024,"column":22},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":20},"end":{"row":1024,"column":21},"action":"remove","lines":["p"]},{"start":{"row":1024,"column":19},"end":{"row":1024,"column":20},"action":"remove","lines":["x"]},{"start":{"row":1024,"column":18},"end":{"row":1024,"column":19},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":17},"end":{"row":1024,"column":18},"action":"remove","lines":["("]},{"start":{"row":1024,"column":16},"end":{"row":1024,"column":17},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":15},"end":{"row":1024,"column":16},"action":"remove","lines":["n"]},{"start":{"row":1024,"column":14},"end":{"row":1024,"column":15},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":13},"end":{"row":1024,"column":14},"action":"remove","lines":["m"]}],[{"start":{"row":1024,"column":12},"end":{"row":1024,"column":13},"action":"remove","lines":["m"],"id":5621},{"start":{"row":1024,"column":11},"end":{"row":1024,"column":12},"action":"remove","lines":["o"]},{"start":{"row":1024,"column":10},"end":{"row":1024,"column":11},"action":"remove","lines":["c"]},{"start":{"row":1024,"column":9},"end":{"row":1024,"column":10},"action":"remove","lines":["_"]},{"start":{"row":1024,"column":8},"end":{"row":1024,"column":9},"action":"remove","lines":["k"]},{"start":{"row":1024,"column":7},"end":{"row":1024,"column":8},"action":"remove","lines":["c"]},{"start":{"row":1024,"column":6},"end":{"row":1024,"column":7},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":5},"end":{"row":1024,"column":6},"action":"remove","lines":["h"]},{"start":{"row":1024,"column":4},"end":{"row":1024,"column":5},"action":"remove","lines":["c"]}],[{"start":{"row":1024,"column":4},"end":{"row":1030,"column":94},"action":"insert","lines":["for expected_comments,result_commments in zip(expected_post_comments,post_commments):"," check_commenter(expected_comments[\"commenter\"],result_commments[\"commenter\"])"," check_reactions(expected_comments[\"reactions\"],result_commments[\"reactions\"])"," check_replies(expected_comments[\"replies\"],result_commments[\"replies\"])"," assert expected_comments[\"comment_id\"] == result_commments[\"comment_id\"]"," assert expected_comments[\"commented_at\"] == result_commments[\"commented_at\"]"," assert expected_comments[\"comment_content\"] == result_commments[\"comment_content\"] "],"id":5622}],[{"start":{"row":1024,"column":86},"end":{"row":1024,"column":87},"action":"remove","lines":["s"],"id":5623},{"start":{"row":1024,"column":85},"end":{"row":1024,"column":86},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":84},"end":{"row":1024,"column":85},"action":"remove","lines":["n"]},{"start":{"row":1024,"column":83},"end":{"row":1024,"column":84},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":82},"end":{"row":1024,"column":83},"action":"remove","lines":["m"]},{"start":{"row":1024,"column":81},"end":{"row":1024,"column":82},"action":"remove","lines":["m"]},{"start":{"row":1024,"column":80},"end":{"row":1024,"column":81},"action":"remove","lines":["m"]},{"start":{"row":1024,"column":79},"end":{"row":1024,"column":80},"action":"remove","lines":["o"]},{"start":{"row":1024,"column":78},"end":{"row":1024,"column":79},"action":"remove","lines":["c"]},{"start":{"row":1024,"column":77},"end":{"row":1024,"column":78},"action":"remove","lines":["_"]},{"start":{"row":1024,"column":76},"end":{"row":1024,"column":77},"action":"remove","lines":["t"]}],[{"start":{"row":1024,"column":75},"end":{"row":1024,"column":76},"action":"remove","lines":["s"],"id":5624},{"start":{"row":1024,"column":74},"end":{"row":1024,"column":75},"action":"remove","lines":["o"]},{"start":{"row":1024,"column":73},"end":{"row":1024,"column":74},"action":"remove","lines":["p"]}],[{"start":{"row":1024,"column":73},"end":{"row":1024,"column":74},"action":"insert","lines":["r"],"id":5625},{"start":{"row":1024,"column":74},"end":{"row":1024,"column":75},"action":"insert","lines":["e"]},{"start":{"row":1024,"column":75},"end":{"row":1024,"column":76},"action":"insert","lines":["s"]},{"start":{"row":1024,"column":76},"end":{"row":1024,"column":77},"action":"insert","lines":["u"]}],[{"start":{"row":1024,"column":73},"end":{"row":1024,"column":77},"action":"remove","lines":["resu"],"id":5626},{"start":{"row":1024,"column":73},"end":{"row":1024,"column":87},"action":"insert","lines":["result_replies"]}],[{"start":{"row":1024,"column":71},"end":{"row":1024,"column":72},"action":"remove","lines":["s"],"id":5627},{"start":{"row":1024,"column":70},"end":{"row":1024,"column":71},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":69},"end":{"row":1024,"column":70},"action":"remove","lines":["n"]},{"start":{"row":1024,"column":68},"end":{"row":1024,"column":69},"action":"remove","lines":["e"]},{"start":{"row":1024,"column":67},"end":{"row":1024,"column":68},"action":"remove","lines":["m"]},{"start":{"row":1024,"column":66},"end":{"row":1024,"column":67},"action":"remove","lines":["m"]},{"start":{"row":1024,"column":65},"end":{"row":1024,"column":66},"action":"remove","lines":["o"]},{"start":{"row":1024,"column":64},"end":{"row":1024,"column":65},"action":"remove","lines":["c"]},{"start":{"row":1024,"column":63},"end":{"row":1024,"column":64},"action":"remove","lines":["_"]},{"start":{"row":1024,"column":62},"end":{"row":1024,"column":63},"action":"remove","lines":["t"]},{"start":{"row":1024,"column":61},"end":{"row":1024,"column":62},"action":"remove","lines":["s"]}],[{"start":{"row":1024,"column":60},"end":{"row":1024,"column":61},"action":"remove","lines":["o"],"id":5628},{"start":{"row":1024,"column":59},"end":{"row":1024,"column":60},"action":"remove","lines":["p"]}],[{"start":{"row":1024,"column":59},"end":{"row":1024,"column":60},"action":"insert","lines":["r"],"id":5629},{"start":{"row":1024,"column":60},"end":{"row":1024,"column":61},"action":"insert","lines":["e"]}],[{"start":{"row":1024,"column":50},"end":{"row":1024,"column":61},"action":"remove","lines":["expected_re"],"id":5630},{"start":{"row":1024,"column":50},"end":{"row":1024,"column":66},"action":"insert","lines":["expected_replies"]}],[{"start":{"row":1027,"column":8},"end":{"row":1027,"column":79},"action":"remove","lines":["check_replies(expected_comments[\"replies\"],result_commments[\"replies\"])"],"id":5631},{"start":{"row":1027,"column":4},"end":{"row":1027,"column":8},"action":"remove","lines":[" "]},{"start":{"row":1027,"column":0},"end":{"row":1027,"column":4},"action":"remove","lines":[" "]},{"start":{"row":1026,"column":85},"end":{"row":1027,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":147,"column":4},"end":{"row":147,"column":5},"action":"insert","lines":["a"],"id":5632},{"start":{"row":147,"column":5},"end":{"row":147,"column":6},"action":"insert","lines":["s"]},{"start":{"row":147,"column":6},"end":{"row":147,"column":7},"action":"insert","lines":["s"]}],[{"start":{"row":147,"column":4},"end":{"row":147,"column":7},"action":"remove","lines":["ass"],"id":5633},{"start":{"row":147,"column":4},"end":{"row":147,"column":10},"action":"insert","lines":["assert"]}],[{"start":{"row":147,"column":10},"end":{"row":147,"column":11},"action":"insert","lines":[" "],"id":5634},{"start":{"row":147,"column":11},"end":{"row":147,"column":12},"action":"insert","lines":["p"]},{"start":{"row":147,"column":12},"end":{"row":147,"column":13},"action":"insert","lines":["o"]}],[{"start":{"row":147,"column":11},"end":{"row":147,"column":13},"action":"remove","lines":["po"],"id":5635},{"start":{"row":147,"column":11},"end":{"row":147,"column":17},"action":"insert","lines":["post()"]}],[{"start":{"row":147,"column":15},"end":{"row":147,"column":17},"action":"remove","lines":["()"],"id":5636}],[{"start":{"row":147,"column":15},"end":{"row":147,"column":16},"action":"insert","lines":["."],"id":5637}],[{"start":{"row":147,"column":16},"end":{"row":147,"column":17},"action":"insert","lines":["c"],"id":5638},{"start":{"row":147,"column":17},"end":{"row":147,"column":18},"action":"insert","lines":["o"]},{"start":{"row":147,"column":18},"end":{"row":147,"column":19},"action":"insert","lines":["n"]}],[{"start":{"row":147,"column":16},"end":{"row":147,"column":19},"action":"remove","lines":["con"],"id":5639},{"start":{"row":147,"column":16},"end":{"row":147,"column":23},"action":"insert","lines":["content"]}],[{"start":{"row":147,"column":23},"end":{"row":147,"column":24},"action":"insert","lines":[" "],"id":5640},{"start":{"row":147,"column":24},"end":{"row":147,"column":25},"action":"insert","lines":["="]},{"start":{"row":147,"column":25},"end":{"row":147,"column":26},"action":"insert","lines":["="]},{"start":{"row":147,"column":26},"end":{"row":147,"column":27},"action":"insert","lines":["="]}],[{"start":{"row":147,"column":26},"end":{"row":147,"column":27},"action":"remove","lines":["="],"id":5641}],[{"start":{"row":147,"column":26},"end":{"row":147,"column":27},"action":"insert","lines":[" "],"id":5642},{"start":{"row":147,"column":27},"end":{"row":147,"column":28},"action":"insert","lines":["p"]},{"start":{"row":147,"column":28},"end":{"row":147,"column":29},"action":"insert","lines":["o"]}],[{"start":{"row":147,"column":27},"end":{"row":147,"column":29},"action":"remove","lines":["po"],"id":5643},{"start":{"row":147,"column":27},"end":{"row":147,"column":33},"action":"insert","lines":["post()"]}],[{"start":{"row":147,"column":31},"end":{"row":147,"column":33},"action":"remove","lines":["()"],"id":5644}],[{"start":{"row":147,"column":31},"end":{"row":147,"column":32},"action":"insert","lines":["_"],"id":5645},{"start":{"row":147,"column":32},"end":{"row":147,"column":33},"action":"insert","lines":["c"]},{"start":{"row":147,"column":33},"end":{"row":147,"column":34},"action":"insert","lines":["o"]}],[{"start":{"row":147,"column":27},"end":{"row":147,"column":34},"action":"remove","lines":["post_co"],"id":5646},{"start":{"row":147,"column":27},"end":{"row":147,"column":39},"action":"insert","lines":["post_content"]}]]},"ace":{"folds":[],"scrolltop":10058.827254419828,"scrollleft":3,"selection":{"start":{"row":614,"column":4},"end":{"row":614,"column":89},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":590,"state":"start","mode":"ace/mode/python"}},"timestamp":1590038471216,"hash":"ae04aa1dcb534358651e960f8b27a4b84ca925fa"} | [
"[email protected]"
] | |
cbb758a402a190f2d6e6f0bac3ba7dba9a42a43b | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/pacbio/pythonpkgs/pysiv2/bin/testkit_to_json | d59af355fd78ac17429a55e647e303afeb146872 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | #! python
import os
import argparse
import sys
import logging
from logging import log
from pysiv2 import utils
__author__ = 'gconcepcion'
log = logging.getLogger()
def setup_log(alog, file_name=None, level=logging.DEBUG):
if file_name is None:
handler = logging.StreamHandler(sys.stdout)
else:
handler = logging.FileHandler(file_name)
str_formatter = '[%(levelname)s] %(asctime)-15s [%(name)s %(funcName)s %(lineno)d] %(message)s'
formatter = logging.Formatter(str_formatter)
handler.setFormatter(formatter)
alog.addHandler(handler)
alog.setLevel(level)
def get_parser():
desc = ["Generate a pbservice analysis.json from a testkit.cfg"]
parser = argparse.ArgumentParser(description="\n".join(desc))
parser.add_argument('testkit_cfg', help="Path to testkit.cfg")
parser.add_argument("-o", "--output", dest="output", action="store",
help="Output file name - will be automatically "+
"generated if not specified")
parser.add_argument(
'--debug', action='store_true', help='Debug to stdout.')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
testkit = args.testkit_cfg
if args.debug:
setup_log(log, level=logging.DEBUG)
else:
log.addHandler(logging.NullHandler())
if os.path.exists(testkit):
file_name = utils.testkit_to_analysis_json(testkit, output=args.output)
return 0
else:
log.error("Unable to find file: {f}".format(f=testkit))
return 1
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | ||
b4982047c820e343197ea0f6d3ad9ca252d41425 | 5984fdb0c07861f3d3b3a3b1944201b1b7217c1b | /github_contents.py | 135e27621a92afa23f802f284642b8de6771eb77 | [
"Apache-2.0"
] | permissive | jaywgraves/github-contents | 3e9db50f235b3a7a79551d5c48cd76d94f4cca33 | 8bb91a13d6d483227839a603489c67a83325ce63 | refs/heads/master | 2021-04-02T03:27:28.165486 | 2020-03-13T21:48:55 | 2020-03-13T21:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,781 | py | import base64
from requests import Session
class GithubContents:
class NotFound(Exception):
pass
class UnknownError(Exception):
pass
def __init__(self, owner, repo, token, branch=None):
self.owner = owner
self.repo = repo
self.token = token
self.branch = branch
self.session = Session()
def base_url(self):
return "https://api.github.com/repos/{}/{}".format(self.owner, self.repo)
def headers(self):
return {"Authorization": "token {}".format(self.token)}
def read(self, filepath):
"Returns (file_contents_in_bytes, sha1)"
# Try reading using content API
content_url = "{}/contents/{}".format(self.base_url(), filepath)
response = self.session.get(content_url, headers=self.headers())
if response.status_code == 200:
data = response.json()
return base64.b64decode(data["content"]), data["sha"]
elif response.status_code == 404:
raise self.NotFound(filepath)
elif response.status_code == 403:
# It's probably too large
if response.json()["errors"][0]["code"] != "too_large":
raise self.UnknownError(response.content)
else:
return self.read_large(filepath)
else:
raise self.UnknownError(response.content)
def read_large(self, filepath):
master = self.session.get(
self.base_url() + "/git/trees/master?recursive=1", headers=self.headers()
).json()
try:
tree_entry = [t for t in master["tree"] if t["path"] == filepath][0]
except IndexError:
raise self.NotFound(filepath)
data = self.session.get(tree_entry["url"], headers=self.headers()).json()
return base64.b64decode(data["content"]), data["sha"]
def write(
self, filepath, content_bytes, sha=None, commit_message="", committer=None
):
if not isinstance(content_bytes, bytes):
raise TypeError("content_bytes must be a bytestring")
github_url = "{}/contents/{}".format(self.base_url(), filepath)
payload = {
"path": filepath,
"content": base64.b64encode(content_bytes).decode("latin1"),
"message": commit_message,
}
if sha:
payload["sha"] = sha
if committer:
payload["committer"] = committer
response = self.session.put(github_url, json=payload, headers=self.headers())
if (
response.status_code == 403
and response.json()["errors"][0]["code"] == "too_large"
):
return self.write_large(filepath, content_bytes, commit_message, committer)
elif (
sha is None
and response.status_code == 422
and "sha" in response.json().get("message", "")
):
# Missing sha - we need to figure out the sha and try again
_, old_sha = self.read(filepath)
return self.write(
filepath,
content_bytes,
sha=old_sha,
commit_message=commit_message,
committer=committer,
)
elif response.status_code in (201, 200):
updated = response.json()
return updated["content"]["sha"], updated["commit"]["sha"]
else:
raise self.UnknownError(
str(response.status_code) + ":" + repr(response.content)
)
def write_large(self, filepath, content_bytes, commit_message="", committer=None):
if not isinstance(content_bytes, bytes):
raise TypeError("content_bytes must be a bytestring")
# Create a new blob with the file contents
created_blob = self.session.post(
self.base_url() + "/git/blobs",
json={
"encoding": "base64",
"content": base64.b64encode(content_bytes).decode("latin1"),
},
headers=self.headers(),
).json()
# Retrieve master tree sha
master_sha = self.session.get(
self.base_url() + "/git/trees/master?recursive=1", headers=self.headers()
).json()["sha"]
# Construct a new tree
created_tree = self.session.post(
self.base_url() + "/git/trees",
json={
"base_tree": master_sha,
"tree": [
{
"mode": "100644", # file (blob),
"path": filepath,
"type": "blob",
"sha": created_blob["sha"],
}
],
},
headers=self.headers(),
).json()
# Create a commit which references the new tree
payload = {
"message": commit_message,
"parents": [master_sha],
"tree": created_tree["sha"],
}
if committer:
payload["committer"] = committer
created_commit = self.session.post(
self.base_url() + "/git/commits", json=payload, headers=self.headers()
).json()
# Move HEAD reference on master to the new commit
self.session.patch(
self.base_url() + "/git/refs/heads/master",
json={"sha": created_commit["sha"]},
headers=self.headers(),
).json()
return created_blob["sha"], created_commit["sha"]
def branch_exists(self):
assert self.branch
return (
self.session.get(
self.base_url() + "/git/refs/heads/{}".format(self.branch),
headers=self.headers(),
).status_code
== 200
)
| [
"[email protected]"
] | |
0ea25180c5dc115b5b37177068603cec2d6827f4 | 4bb1a23a62bf6dc83a107d4da8daefd9b383fc99 | /work/agc016_a2.py | 750469c7c38ef8a70ef749bb3d8fa1439c1b0a6b | [] | no_license | takushi-m/atcoder-work | 0aeea397c85173318497e08cb849efd459a9f6b6 | f6769f0be9c085bde88129a1e9205fb817bb556a | refs/heads/master | 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | s = input()
n = len(s)
res = 10**5
for c in set(list(s)):
ss = list(s)
cnt = 0
while len(set(ss))>1:
cnt += 1
for i in range(len(ss)-1):
if c==ss[i+1]:
ss[i] = c
ss.pop()
res = min(res, cnt)
print(res) | [
"[email protected]"
] | |
733ae494a4ecbad01c04af80d49ec3f90f6c6b46 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/44/usersdata/98/14637/submittedfiles/desvpad.py | 070c267968854b9ffecc983f6ff646f40f3e8c8c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#comece abaixo
n=input('Digite a quantidade de valores: ')
l=[]
soma=0
soma2=0
for i in range(0,n,1):
l.append(input('Digite um valor: '))
soma=soma+l[i]
media= soma/n
for i in range(1,n+1,1):
soma2=(i-media)**2
s= ((1/(n-1))*soma2)**(1/2)
print ('%.2f' %l[0])
print ('%.2f' %l[n-1])
print ('%.2f' %media)
print ('%.2f'%s) | [
"[email protected]"
] | |
a7a636860665fb9c84f893ef0e212d3debacf0bc | 4d0e86740237b7261770260905681ae3e679b2e8 | /starry/kepler.py | c4042b4ad12a26f7ba8010f3f90059ac4f74e79d | [
"MIT"
] | permissive | brackham/starry | 8ed5a5fa5f113bcbc45bbc463ae0e17c3bfedd09 | 9c462a2c9591b2d8f2be4ec710d791b88b764ddb | refs/heads/master | 2022-11-21T06:55:00.792202 | 2020-06-26T17:19:07 | 2020-06-26T17:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,474 | py | # -*- coding: utf-8 -*-
from . import config
from ._constants import *
from .maps import MapBase, RVBase, ReflectedBase
from ._core import OpsSystem, math
import numpy as np
from astropy import units
from inspect import getmro
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
import os
import logging
logger = logging.getLogger("starry.maps")
__all__ = ["Primary", "Secondary", "System"]
class Body(object):
"""A generic body. Must be subclassed."""
def __init__(
self,
map,
r=1.0,
m=1.0,
prot=1.0,
t0=0.0,
theta0=0.0,
length_unit=units.Rsun,
mass_unit=units.Msun,
time_unit=units.day,
angle_unit=units.degree,
**kwargs,
):
# Surface map
self._lazy = map._lazy
self._math = map._math
self.map = map
# Units
self.length_unit = length_unit
self.mass_unit = mass_unit
self.time_unit = time_unit
self.angle_unit = angle_unit
# Attributes
self.r = r
self.m = m
self.prot = prot
self.t0 = t0
self.theta0 = theta0
@property
def length_unit(self):
"""An ``astropy.units`` unit defining the length metric for this body."""
return self._length_unit
@length_unit.setter
def length_unit(self, value):
assert value.physical_type == "length"
self._length_unit = value
self._length_factor = value.in_units(units.Rsun)
@property
def mass_unit(self):
"""An ``astropy.units`` unit defining the mass metric for this body."""
return self._mass_unit
@mass_unit.setter
def mass_unit(self, value):
assert value.physical_type == "mass"
self._mass_unit = value
self._mass_factor = value.in_units(units.Msun)
@property
def time_unit(self):
"""An ``astropy.units`` unit defining the time metric for this body."""
return self._time_unit
@time_unit.setter
def time_unit(self, value):
assert value.physical_type == "time"
self._time_unit = value
self._time_factor = value.in_units(units.day)
@property
def angle_unit(self):
"""An ``astropy.units`` unit defining the angle metric for this body."""
return self._angle_unit
@angle_unit.setter
def angle_unit(self, value):
assert value.physical_type == "angle"
self._angle_unit = value
self._angle_factor = value.in_units(units.radian)
@property
def _angle_unit(self):
return self._map._angle_unit
@_angle_unit.setter
def _angle_unit(self, value):
self._map._angle_unit = value
@property
def _angle_factor(self):
return self._map._angle_factor
@_angle_factor.setter
def _angle_factor(self, value):
self._map._angle_factor = value
@property
def map(self):
"""The surface map for this body."""
return self._map
@map.setter
def map(self, value):
assert MapBase in getmro(
type(value)
), "The `map` attribute must be a `starry` map instance."
assert (
value._lazy == self._lazy
), "Map must have the same evaluation mode (lazy/greedy)."
self._map = value
@property
def r(self):
"""The radius in units of :py:attr:`length_unit`."""
return self._r / self._length_factor
@r.setter
def r(self, value):
self._r = self._math.cast(value * self._length_factor)
@property
def m(self):
"""The mass in units of :py:attr:`mass_unit`."""
return self._m / self._mass_factor
@m.setter
def m(self, value):
self._m = self._math.cast(value * self._mass_factor)
@property
def prot(self):
"""The rotation period in units of :py:attr:`time_unit`."""
return self._prot / self._time_factor
@prot.setter
def prot(self, value):
self._prot = self._math.cast(value * self._time_factor)
@property
def t0(self):
"""A reference time in units of :py:attr:`time_unit`."""
return self._t0 / self._time_factor
@t0.setter
def t0(self, value):
self._t0 = self._math.cast(value * self._time_factor)
@property
def theta0(self):
"""The map rotational phase at time :py:attr:`t0`."""
return self._theta0 / self._angle_factor
@theta0.setter
def theta0(self, value):
self._theta0 = self._math.cast(value * self._angle_factor)
def _check_kwargs(self, method, kwargs):
if not config.quiet:
for key in kwargs.keys():
message = "Invalid keyword `{0}` in call to `{1}()`. Ignoring."
message = message.format(key, method)
logger.warning(message)
class Primary(Body):
"""A primary (central) body.
Args:
map: The surface map of this body. This should be an instance
returned by :py:func:`starry.Map`.
r (scalar, optional): The radius of the body in units of
:py:attr:`length_unit`. Defaults to 1.0.
m (scalar, optional): The mass of the body in units of
:py:attr:`mass_unit`. Defaults to 1.0.
prot (scalar, optional): The rotation period of the body in units of
:py:attr:`time_unit`. Defaults to 1.0.
t0 (scalar, optional): A reference time in units of
:py:attr:`time_unit`. Defaults to 0.0.
theta0 (scalar, optional): The rotational phase of the map at time
:py:attr:`t0` in units of :py:attr:`angle_unit`. Defaults to 0.0.
length_unit (optional): An ``astropy.units`` unit defining the
distance metric for this object. Defaults to
:py:attr:`astropy.units.Rsun.`
mass_unit (optional): An ``astropy.units`` unit defining the
mass metric for this object. Defaults to
:py:attr:`astropy.units.Msun.`
time_unit (optional): An ``astropy.units`` unit defining the
time metric for this object. Defaults to
:py:attr:`astropy.units.day.`
angle_unit (optional): An ``astropy.units`` unit defining the
angular metric for this object. Defaults to
:py:attr:`astropy.units.degree.`
"""
def __init__(self, map, **kwargs):
# Initialize `Body`
super(Primary, self).__init__(map, **kwargs)
for kw in [
"r",
"m",
"prot",
"t0",
"theta0",
"length_unit",
"mass_unit",
"time_unit",
"angle_unit",
]:
kwargs.pop(kw, None)
self._check_kwargs("Primary", kwargs)
class Secondary(Body):
"""A secondary (orbiting) body.
Args:
map: The surface map of this body. This should be an instance
returned by :py:func:`starry.Map`.
r (scalar, optional): The radius of the body in units of
:py:attr:`length_unit`. Defaults to 1.0.
m (scalar, optional): The mass of the body in units of
:py:attr:`mass_unit`. Defaults to 1.0.
a (scalar, optional): The semi-major axis of the body in units of
:py:attr:`time_unit`. Defaults to 1.0. If :py:attr:`porb` is
also provided, this value is ignored.
porb (scalar, optional): The orbital period of the body in units of
:py:attr:`time_unit`. Defaults to 1.0. Setting this value
overrides :py:attr:`a`.
prot (scalar, optional): The rotation period of the body in units of
:py:attr:`time_unit`. Defaults to 1.0.
t0 (scalar, optional): A reference time in units of
:py:attr:`time_unit`. This is taken to be the time of a reference
transit. Defaults to 0.0.
ecc (scalar, optional): The orbital eccentricity of the body.
Defaults to 0.
w, omega (scalar, optional): The argument of pericenter of the body
in units of :py:attr:`angle_unit`. Defaults to 90 degrees.
Omega (scalar, optional): The longitude of ascending node of the
body in units of :py:attr:`angle_unit`. Defaults to 0 degrees.
inc (scalar, optional): The orbital inclination of the body in
units of :py:attr:`angle_unit`. Defaults to 90 degrees.
theta0 (scalar, optional): The rotational phase of the map at time
:py:attr:`t0` in units of :py:attr:`angle_unit`. Defaults to
0.0.
length_unit (optional): An ``astropy.units`` unit defining the
distance metric for this object. Defaults to
:py:attr:`astropy.units.Rsun.`
mass_unit (optional): An ``astropy.units`` unit defining the
mass metric for this object. Defaults to
:py:attr:`astropy.units.Msun.`
time_unit (optional): An ``astropy.units`` unit defining the
time metric for this object. Defaults to
:py:attr:`astropy.units.day.`
angle_unit (optional): An ``astropy.units`` unit defining the
angular metric for this object. Defaults to
:py:attr:`astropy.units.degree.`
"""
def __init__(self, map, **kwargs):
# Initialize `Body`
super(Secondary, self).__init__(map, **kwargs)
for kw in [
"r",
"m",
"prot",
"t0",
"theta0",
"length_unit",
"mass_unit",
"time_unit",
"angle_unit",
]:
kwargs.pop(kw, None)
# Attributes
if kwargs.get("porb", None) is not None:
self.porb = kwargs.pop("porb", None)
elif kwargs.get("a", None) is not None:
self.a = kwargs.pop("a", None)
else:
raise ValueError("Must provide a value for either `porb` or `a`.")
self.ecc = kwargs.pop("ecc", 0.0)
self.w = kwargs.pop(
"w", kwargs.pop("omega", 0.5 * np.pi / self._angle_factor)
)
self.Omega = kwargs.pop("Omega", 0.0)
self.inc = kwargs.pop("inc", 0.5 * np.pi / self._angle_factor)
self._check_kwargs("Secondary", kwargs)
@property
def porb(self):
"""The orbital period in units of :py:attr:`time_unit`.
.. note::
Setting this value overrides the value of :py:attr:`a`.
"""
if self._porb == 0.0:
return None
else:
return self._porb / self._time_factor
@porb.setter
def porb(self, value):
self._porb = self._math.cast(value * self._time_factor)
self._a = 0.0
@property
def a(self):
"""The semi-major axis in units of :py:attr:`length_unit`.
.. note::
Setting this value overrides the value of :py:attr:`porb`.
"""
if self._a == 0.0:
return None
else:
return self._a / self._length_factor
@a.setter
def a(self, value):
self._a = self._math.cast(value * self._length_factor)
self._porb = 0.0
@property
def ecc(self):
"""The orbital eccentricity."""
return self._ecc
@ecc.setter
def ecc(self, value):
self._ecc = value
@property
def w(self):
"""The longitude of pericenter in units of :py:attr:`angle_unit`."""
return self._w / self._angle_factor
@w.setter
def w(self, value):
self._w = self._math.cast(value * self._angle_factor)
@property
def omega(self):
"""Alias for the longitude of pericenter :py:attr:`w`."""
return self.w
@omega.setter
def omega(self, value):
self.w = value
@property
def Omega(self):
"""The longitude of ascending node in units of :py:attr:`angle_unit`."""
return self._Omega / self._angle_factor
@Omega.setter
def Omega(self, value):
self._Omega = self._math.cast(value * self._angle_factor)
@property
def inc(self):
"""The orbital inclination in units of :py:attr:`angle_unit`."""
return self._inc / self._angle_factor
@inc.setter
def inc(self, value):
self._inc = self._math.cast(value * self._angle_factor)
class System(object):
"""A system of bodies in Keplerian orbits about a central primary body.
Args:
primary (:py:class:`Primary`): The central body.
secondaries (:py:class:`Secondary`): One or more secondary bodies
in orbit about the primary.
time_unit (optional): An ``astropy.units`` unit defining the
time metric for this object. Defaults to
:py:attr:`astropy.units.day.`
light_delay (bool, optional): Account for the light travel time
delay to the barycenter of the system? Default is False.
texp (scalar): The exposure time of each observation. This can be a
scalar or a tensor with the same shape as ``t``. If ``texp`` is
provided, ``t`` is assumed to indicate the timestamp at the middle
of an exposure of length ``texp``.
oversample (int): The number of function evaluations to use when
numerically integrating the exposure time.
order (int): The order of the numerical integration scheme. This must
be one of the following: ``0`` for a centered Riemann sum
(equivalent to the "resampling" procedure suggested by Kipping 2010),
``1`` for the trapezoid rule, or ``2`` for Simpson’s rule.
"""
def _no_spectral(self):
if self._primary._map.nw is not None: # pragma: no cover
raise NotImplementedError(
"Method not yet implemented for spectral maps."
)
def __init__(
self,
primary,
*secondaries,
time_unit=units.day,
light_delay=False,
texp=None,
oversample=7,
order=0,
):
# Units
self.time_unit = time_unit
self._light_delay = bool(light_delay)
if texp is None:
self._texp = 0.0
else:
self._texp = texp
assert self._texp >= 0.0, "Parameter `texp` must be >= 0."
self._oversample = int(oversample)
assert self._oversample > 0, "Parameter `oversample` must be > 0."
self._order = int(order)
assert self._order in [0, 1, 2], "Invalid value for parameter `order`."
# Primary body
assert (
type(primary) is Primary
), "Argument `primary` must be an instance of `Primary`."
assert (
primary._map.__props__["reflected"] == False
), "Reflected light map not allowed for the primary body."
self._primary = primary
self._rv = primary._map.__props__["rv"]
self._lazy = primary._lazy
self._math = primary._math
if self._lazy:
self._linalg = math.lazy_linalg
else:
self._linalg = math.greedy_linalg
# Secondary bodies
assert len(secondaries) > 0, "There must be at least one secondary."
for sec in secondaries:
assert type(sec) is Secondary, (
"Argument `*secondaries` must be a sequence of "
"`Secondary` instances."
)
assert (
sec._map.nw == self._primary._map.nw
), "All bodies must have the same number of wavelength bins `nw`."
assert sec._map.__props__["rv"] == self._rv, (
"Radial velocity must be enabled "
"for either all or none of the bodies."
)
assert (
sec._lazy == self._lazy
), "All bodies must have the same evaluation mode (lazy/greedy)."
reflected = [sec._map.__props__["reflected"] for sec in secondaries]
if np.all(reflected):
self._reflected = True
elif np.any(reflected):
raise ValueError(
"Reflected light must be enabled "
"for either all or none of the secondaries."
)
else:
self._reflected = False
self._secondaries = secondaries
# All bodies
self._bodies = [self._primary] + list(self._secondaries)
# Indices of each of the bodies in the design matrix
Ny = [self._primary._map.Ny] + [
sec._map.Ny for sec in self._secondaries
]
self._inds = []
cur = 0
for N in Ny:
self._inds.append(cur + np.arange(N))
cur += N
# Theano ops class
self.ops = OpsSystem(
self._primary,
self._secondaries,
reflected=self._reflected,
rv=self._rv,
light_delay=self._light_delay,
texp=self._texp,
oversample=self._oversample,
order=self._order,
)
# Solve stuff
self._flux = None
self._C = None
self._solution = None
self._solved_bodies = []
@property
def light_delay(self):
"""Account for the light travel time delay? *Read-only*"""
return self._light_delay
@property
def texp(self):
"""The exposure time in units of :py:attr:`time_unit`. *Read-only*"""
@property
def oversample(self):
"""Oversample factor when integrating over exposure time. *Read-only*"""
return self._oversample
@property
def order(self):
"""The order of the numerical integration scheme. *Read-only*
- ``0``: a centered Riemann sum
- ``1``: trapezoid rule
- ``2``: Simpson’s rule
"""
return self._order
@property
def time_unit(self):
"""An ``astropy.units`` unit defining the time metric for the system."""
return self._time_unit
@time_unit.setter
def time_unit(self, value):
assert value.physical_type == "time"
self._time_unit = value
self._time_factor = value.in_units(units.day)
@property
def primary(self):
"""The primary (central) object in the Keplerian system."""
return self._primary
@property
def secondaries(self):
"""A list of the secondary (orbiting) object(s) in the Keplerian system."""
return self._secondaries
@property
def bodies(self):
"""A list of all objects in the Keplerian system."""
return self._bodies
@property
def map_indices(self):
"""A list of the indices corresponding to each body in the design matrix."""
return self._inds
def show(
self,
t,
cmap="plasma",
res=300,
interval=75,
file=None,
figsize=(3, 3),
html5_video=True,
window_pad=1.0,
):
"""Visualize the Keplerian system.
Note that the body surface intensities are not normalized.
Args:
t (scalar or vector): The time(s) at which to evaluate the orbit and
the map in units of :py:attr:`time_unit`.
cmap (string or colormap instance, optional): The matplotlib colormap
to use. Defaults to ``plasma``.
res (int, optional): The resolution of the map in pixels on a
side. Defaults to 300.
figsize (tuple, optional): Figure size in inches. Default is
(3, 3) for orthographic maps and (7, 3.5) for rectangular
maps.
interval (int, optional): Interval between frames in milliseconds
(animated maps only). Defaults to 75.
file (string, optional): The file name (including the extension)
to save the animation to (animated maps only). Defaults to None.
html5_video (bool, optional): If rendering in a Jupyter notebook,
display as an HTML5 video? Default is True. If False, displays
the animation using Javascript (file size will be larger.)
window_pad (float, optional): Padding around the primary in units
of the primary radius. Bodies outside of this window will be
cropped. Default is 1.0.
"""
# Not yet implemented
if self._primary._map.nw is not None: # pragma: no cover
raise NotImplementedError(
"Method not implemented for spectral maps."
)
# Render the maps & get the orbital positions
if self._rv:
self._primary.map._set_RV_filter()
for sec in self._secondaries:
sec.map._set_RV_filter()
img_pri, img_sec, x, y, z = self.ops.render(
self._math.reshape(self._math.to_array_or_tensor(t), [-1])
* self._time_factor,
res,
self._primary._r,
self._primary._m,
self._primary._prot,
self._primary._t0,
self._primary._theta0,
self._primary._map._inc,
self._primary._map._obl,
self._primary._map._y,
self._primary._map._u,
self._primary._map._f,
self._primary._map._alpha,
self._primary._map._tau,
self._primary._map._delta,
self._math.to_array_or_tensor(
[sec._r for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._m for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._prot for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._t0 for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._theta0 for sec in self._secondaries]
),
self._get_periods(),
self._math.to_array_or_tensor(
[sec._ecc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._w for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._Omega for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._obl for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._y for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._u for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._f for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._alpha for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._tau for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._delta for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._sigr for sec in self._secondaries]
),
)
# Convert to units of the primary radiu
x, y, z = (
x / self._primary._r,
y / self._primary._r,
z / self._primary._r,
)
r = self._math.to_array_or_tensor(
[sec._r for sec in self._secondaries]
)
r = r / self._primary._r
# Evaluate if needed
if config.lazy:
img_pri = img_pri.eval()
img_sec = img_sec.eval()
x = x.eval()
y = y.eval()
z = z.eval()
r = r.eval()
# We need this to be of shape (nplanet, nframe)
x = x.T
y = y.T
z = z.T
# Ensure we have an array of frames
if len(img_pri.shape) == 3:
nframes = img_pri.shape[0]
else: # pragma: no cover
nframes = 1
img_pri = np.reshape(img_pri, (1,) + img_pri.shape)
img_sec = np.reshape(img_sec, (1,) + img_sec.shape)
animated = nframes > 1
# Set up the plot
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis("off")
ax.set_xlim(-1.0 - window_pad, 1.0 + window_pad)
ax.set_ylim(-1.0 - window_pad, 1.0 + window_pad)
# Render the first frame
img = [None for n in range(1 + len(self._secondaries))]
circ = [None for n in range(1 + len(self._secondaries))]
extent = np.array([-1.0, 1.0, -1.0, 1.0])
img[0] = ax.imshow(
img_pri[0],
origin="lower",
extent=extent,
cmap=cmap,
interpolation="none",
vmin=np.nanmin(img_pri),
vmax=np.nanmax(img_pri),
animated=animated,
zorder=0.0,
)
circ[0] = plt.Circle(
(0, 0), 1, color="k", fill=False, zorder=1e-3, lw=2
)
ax.add_artist(circ[0])
for i, _ in enumerate(self._secondaries):
extent = np.array([x[i, 0], x[i, 0], y[i, 0], y[i, 0]]) + (
r[i] * np.array([-1.0, 1.0, -1.0, 1.0])
)
img[i + 1] = ax.imshow(
img_sec[i, 0],
origin="lower",
extent=extent,
cmap=cmap,
interpolation="none",
vmin=np.nanmin(img_sec),
vmax=np.nanmax(img_sec),
animated=animated,
zorder=z[i, 0],
)
circ[i] = plt.Circle(
(x[i, 0], y[i, 0]),
r[i],
color="k",
fill=False,
zorder=z[i, 0] + 1e-3,
lw=2,
)
ax.add_artist(circ[i])
# Animation
if animated:
def updatefig(k):
# Update Primary map
img[0].set_array(img_pri[k])
# Update Secondary maps & positions
for i, _ in enumerate(self._secondaries):
extent = np.array([x[i, k], x[i, k], y[i, k], y[i, k]]) + (
r[i] * np.array([-1.0, 1.0, -1.0, 1.0])
)
if np.any(np.abs(extent) < 1.0 + window_pad):
img[i + 1].set_array(img_sec[i, k])
img[i + 1].set_extent(extent)
img[i + 1].set_zorder(z[i, k])
circ[i].center = (x[i, k], y[i, k])
circ[i].set_zorder(z[i, k] + 1e-3)
return img + circ
ani = FuncAnimation(
fig, updatefig, interval=interval, blit=False, frames=nframes
)
# Business as usual
if (file is not None) and (file != ""):
if file.endswith(".mp4"):
ani.save(file, writer="ffmpeg")
elif file.endswith(".gif"):
ani.save(file, writer="imagemagick")
else: # pragma: no cover
# Try and see what happens!
ani.save(file)
plt.close()
else: # pragma: no cover
try:
if "zmqshell" in str(type(get_ipython())):
plt.close()
if html5_video:
display(HTML(ani.to_html5_video()))
else:
display(HTML(ani.to_jshtml()))
else:
raise NameError("")
except NameError:
plt.show()
plt.close()
# Matplotlib generates an annoying empty
# file when producing an animation. Delete it.
try:
os.remove("None0000000.png")
except FileNotFoundError:
pass
else:
if (file is not None) and (file != ""):
fig.savefig(file)
plt.close()
else: # pragma: no cover
plt.show()
if self._rv:
self._primary.map._unset_RV_filter()
for sec in self._secondaries:
sec.map._unset_RV_filter()
def design_matrix(self, t):
"""Compute the system flux design matrix at times ``t``.
.. note::
This is the *unweighted* design matrix, i.e., it does not
include the scaling by the amplitude of each body's map.
To perform this weighting, do
.. code-block:: python
X = sys.design_matrix(**kwargs)
for i, body in zip(sys.map_indices, sys.bodies):
X[:, i] *= body.map.amp
Args:
t (scalar or vector): An array of times at which to evaluate
the design matrix in units of :py:attr:`time_unit`.
"""
return self.ops.X(
self._math.reshape(self._math.to_array_or_tensor(t), [-1])
* self._time_factor,
self._primary._r,
self._primary._m,
self._primary._prot,
self._primary._t0,
self._primary._theta0,
self._math.to_array_or_tensor(1.0),
self._primary._map._inc,
self._primary._map._obl,
self._primary._map._u,
self._primary._map._f,
self._primary._map._alpha,
self._primary._map._tau,
self._primary._map._delta,
self._math.to_array_or_tensor(
[sec._r for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._m for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._prot for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._t0 for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._theta0 for sec in self._secondaries]
),
self._get_periods(),
self._math.to_array_or_tensor(
[sec._ecc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._w for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._Omega for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[
self._math.to_array_or_tensor(1.0)
for sec in self._secondaries
]
),
self._math.to_array_or_tensor(
[sec._map._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._obl for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._u for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._f for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._alpha for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._tau for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._delta for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._sigr for sec in self._secondaries]
),
)
def flux(self, t, total=True):
"""Compute the system flux at times ``t``.
Args:
t (scalar or vector): An array of times at which to evaluate
the flux in units of :py:attr:`time_unit`.
total (bool, optional): Return the total system flux? Defaults to
True. If False, returns arrays corresponding to the flux
from each body.
"""
X = self.design_matrix(t)
# Weight the ylms by amplitude
if self._reflected:
# If we're doing reflected light, scale the amplitude of
# each of the secondaries by the amplitude of the primary
# (the illumination source).
ay = [self._primary.map.amp * self._primary._map._y] + [
self._primary.map.amp * body.map.amp * body._map._y
for body in self._secondaries
]
else:
ay = [body.map.amp * body._map._y for body in self._bodies]
if total:
return self._math.dot(X, self._math.concatenate(ay))
else:
return [
self._math.dot(X[:, idx], ay[i])
for i, idx in enumerate(self._inds)
]
def rv(self, t, keplerian=True, total=True):
"""Compute the observed radial velocity of the system at times ``t``.
Args:
t (scalar or vector): An array of times at which to evaluate
the radial velocity in units of :py:attr:`time_unit`.
keplerian (bool): Include the Keplerian component of the radial
velocity of the primary? Default is True. If False, this
method returns a model for only the radial velocity anomaly
due to transits (the Rossiter-McLaughlin effect) and
time-variable surface features (Doppler tomography) for all
bodies in the system.
total (bool, optional): Return the total system RV? Defaults to
True. If False, returns arrays corresponding to the RV
contribution from each body.
"""
rv = self.ops.rv(
self._math.reshape(self._math.to_array_or_tensor(t), [-1])
* self._time_factor,
self._primary._r,
self._primary._m,
self._primary._prot,
self._primary._t0,
self._primary._theta0,
self._primary._map._amp,
self._primary._map._inc,
self._primary._map._obl,
self._primary._map._y,
self._primary._map._u,
self._primary._map._alpha,
self._primary._map._tau,
self._primary._map._delta,
self._primary._map._veq,
self._math.to_array_or_tensor(
[sec._r for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._m for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._prot for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._t0 for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._theta0 for sec in self._secondaries]
),
self._get_periods(),
self._math.to_array_or_tensor(
[sec._ecc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._w for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._Omega for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._amp for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._inc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._obl for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._y for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._u for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._alpha for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._tau for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._delta for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._sigr for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._map._veq for sec in self._secondaries]
),
np.array(keplerian),
)
if total:
return self._math.sum(rv, axis=0)
else:
return rv
def position(self, t):
"""Compute the Cartesian positions of all bodies at times ``t``.
Args:
t (scalar or vector): An array of times at which to evaluate
the position in units of :py:attr:`time_unit`.
"""
x, y, z = self.ops.position(
self._math.reshape(self._math.to_array_or_tensor(t), [-1])
* self._time_factor,
self._primary._m,
self._primary._t0,
self._math.to_array_or_tensor(
[sec._m for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._t0 for sec in self._secondaries]
),
self._get_periods(),
self._math.to_array_or_tensor(
[sec._ecc for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._w for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._Omega for sec in self._secondaries]
),
self._math.to_array_or_tensor(
[sec._inc for sec in self._secondaries]
),
)
fac = np.reshape(
[self._primary._length_factor]
+ [sec._length_factor for sec in self._secondaries],
[-1, 1],
)
return (x / fac, y / fac, z / fac)
def _get_periods(self):
periods = [None for sec in self._secondaries]
for i, sec in enumerate(self._secondaries):
if sec.porb:
periods[i] = sec.porb
else:
periods[i] = (
(2 * np.pi)
* sec._a ** (3 / 2)
/ (self._math.sqrt(G_grav * (self._primary._m + sec._m)))
)
return self._math.to_array_or_tensor(periods)
def set_data(self, flux, C=None, cho_C=None):
"""Set the data vector and covariance matrix.
This method is required by the :py:meth:`solve` method, which
analytically computes the posterior over surface maps for all bodies
in the system given a dataset and a prior, provided both are described
as multivariate Gaussians.
Args:
flux (vector): The observed system light curve.
C (scalar, vector, or matrix): The data covariance. This may be
a scalar, in which case the noise is assumed to be
homoscedastic, a vector, in which case the covariance
is assumed to be diagonal, or a matrix specifying the full
covariance of the dataset. Default is None. Either `C` or
`cho_C` must be provided.
cho_C (matrix): The lower Cholesky factorization of the data
covariance matrix. Defaults to None. Either `C` or
`cho_C` must be provided.
"""
self._flux = self._math.cast(flux)
self._C = self._linalg.Covariance(
C=C, cho_C=cho_C, N=self._flux.shape[0]
)
def solve(self, *, design_matrix=None, t=None):
"""Solve the least-squares problem for the posterior over maps for all bodies.
This method solves the generalized least squares problem given a system
light curve and its covariance (set via the :py:meth:`set_data` method)
and a Gaussian prior on the spherical harmonic coefficients
(set via the :py:meth:`set_prior` method). The map amplitudes and
coefficients of each of the bodies in the system are then set to the
maximum a posteriori (MAP) solution.
Args:
design_matrix (matrix, optional): The flux design matrix, the
quantity returned by :py:meth:`design_matrix`. Default is
None, in which case this is computed based on ``kwargs``.
t (vector, optional): The vector of times at which to evaluate
:py:meth:`design_matrix`, if a design matrix is not provided.
Default is None.
Returns:
The posterior mean for the spherical harmonic \
coefficients `l > 0` and the Cholesky factorization of the \
posterior covariance of all of the bodies in the system, \
stacked in order (primary, followed by each of the secondaries \
in the order they were provided.)
.. note::
Users may call the :py:meth:`draw` method of this class to draw
from the posterior after calling :py:meth:`solve`.
"""
# TODO: Implement for spectral maps?
self._no_spectral()
# Check that the data is set
if self._flux is None or self._C is None:
raise ValueError("Please provide a dataset with `set_data()`.")
# Get the full design matrix
if design_matrix is None:
assert t is not None, "Please provide a time vector `t`."
design_matrix = self.design_matrix(t)
X = self._math.cast(design_matrix)
# Get the data vector
f = self._math.cast(self._flux)
# Check for bodies whose priors are set
self._solved_bodies = []
inds = []
dense_L = False
for k, body in enumerate(self._bodies):
if body.map._mu is None or body.map._L is None:
# Subtract out this term from the data vector,
# since it is fixed
f -= body.map.amp * self._math.dot(
X[:, self._inds[k]], body.map.y
)
else:
# Add to our list of indices/bodies to solve for
inds.extend(self._inds[k])
self._solved_bodies.append(body)
if body.map._L.kind in ["matrix", "cholesky"]:
dense_L = True
# Do we have at least one body?
if len(self._solved_bodies) == 0:
raise ValueError("Please provide a prior for at least one body.")
# Keep only the terms we'll solve for
X = X[:, inds]
# Stack our priors
mu = self._math.concatenate(
[body.map._mu for body in self._solved_bodies]
)
if not dense_L:
# We can just concatenate vectors
LInv = self._math.concatenate(
[
body.map._L.inverse * self._math.ones(body.map.Ny)
for body in self._solved_bodies
]
)
else:
# FACT: The inverse of a block diagonal matrix
# is the block diagonal matrix of the inverses.
LInv = self._math.block_diag(
*[
body.map._L.inverse * self._math.eye(body.map.Ny)
for body in self._solved_bodies
]
)
# Compute the MAP solution
self._solution = self._linalg.solve(X, f, self._C.cholesky, mu, LInv)
# Set all the map vectors
x, cho_cov = self._solution
n = 0
for body in self._solved_bodies:
inds = slice(n, n + body.map.Ny)
body.map.amp = x[inds][0]
body.map[1:, :] = x[inds][1:] / body.map.amp
n += body.map.Ny
# Return the mean and covariance
self._solution = (x, cho_cov)
return self._solution
@property
def solution(self):
r"""The posterior probability distribution for the maps in the system.
This is a tuple containing the mean and lower Cholesky factorization of the
covariance of the amplitude-weighted spherical harmonic coefficient vectors,
obtained by solving the regularized least-squares problem
via the :py:meth:`solve` method.
Note that to obtain the actual covariance matrix from the lower Cholesky
factorization :math:`L`, simply compute :math:`L L^\top`.
Note also that this is the posterior for the **amplitude-weighted**
map vectors. Under this convention, the map amplitude is equal to the
first term of the vector of each body and the spherical harmonic coefficients are
equal to the vector normalized by the first term.
"""
if self._solution is None:
raise ValueError("Please call `solve()` first.")
return self._solution
def draw(self):
"""
Draw a map from the posterior distribution and set
the :py:attr:`y` map vector of each body.
Users should call :py:meth:`solve` to enable this attribute.
"""
if self._solution is None:
raise ValueError("Please call `solve()` first.")
# Number of coefficients
N = np.sum([body.map.Ny for body in self._solved_bodies])
# Fast multivariate sampling using the Cholesky factorization
yhat, cho_ycov = self._solution
u = self._math.cast(np.random.randn(N))
x = yhat + self._math.dot(cho_ycov, u)
# Set all the map vectors
n = 0
for body in self._solved_bodies:
inds = slice(n, n + body.map.Ny)
body.map.amp = x[inds][0]
body.map[1:, :] = x[inds][1:] / body.map.amp
n += body.map.Ny
def lnlike(self, *, design_matrix=None, t=None, woodbury=True):
"""Returns the log marginal likelihood of the data given a design matrix.
This method computes the marginal likelihood (marginalized over the
spherical harmonic coefficients of all bodies) given a system
light curve and its covariance (set via the :py:meth:`set_data` method)
and a Gaussian prior on the spherical harmonic coefficients
(set via the :py:meth:`set_prior` method).
Args:
design_matrix (matrix, optional): The flux design matrix, the
quantity returned by :py:meth:`design_matrix`. Default is
None, in which case this is computed based on ``kwargs``.
t (vector, optional): The vector of times at which to evaluate
:py:meth:`design_matrix`, if a design matrix is not provided.
Default is None.
woodbury (bool, optional): Solve the linear problem using the
Woodbury identity? Default is True. The
`Woodbury identity <https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_
is used to speed up matrix operations in the case that the
number of data points is much larger than the number of
spherical harmonic coefficients. In this limit, it can
speed up the code by more than an order of magnitude. Keep
in mind that the numerical stability of the Woodbury identity
is not great, so if you're getting strange results try
disabling this. It's also a good idea to disable this in the
limit of few data points and large spherical harmonic degree.
Returns:
lnlike: The log marginal likelihood.
"""
# TODO: Implement for spectral maps?
self._no_spectral()
# Check that the data is set
if self._flux is None or self._C is None:
raise ValueError("Please provide a dataset with `set_data()`.")
# Get the full design matrix
if design_matrix is None:
assert t is not None, "Please provide a time vector `t`."
design_matrix = self.design_matrix(t)
X = self._math.cast(design_matrix)
# Get the data vector
f = self._math.cast(self._flux)
# Check for bodies whose priors are set
self._solved_bodies = []
inds = []
dense_L = False
for k, body in enumerate(self._bodies):
if body.map._mu is None or body.map._L is None:
# Subtract out this term from the data vector,
# since it is fixed
f -= body.map.amp * self._math.dot(
X[:, self._inds[k]], body.map.y
)
else:
# Add to our list of indices/bodies to solve for
inds.extend(self._inds[k])
self._solved_bodies.append(body)
if body.map._L.kind in ["matrix", "cholesky"]:
dense_L = True
# Do we have at least one body?
if len(self._solved_bodies) == 0:
raise ValueError("Please provide a prior for at least one body.")
# Keep only the terms we'll solve for
X = X[:, inds]
# Stack our priors
mu = self._math.concatenate(
[body.map._mu for body in self._solved_bodies]
)
# Compute the likelihood
if woodbury:
if not dense_L:
# We can just concatenate vectors
LInv = self._math.concatenate(
[
body.map._L.inverse * self._math.ones(body.map.Ny)
for body in self._solved_bodies
]
)
else:
LInv = self._math.block_diag(
*[
body.map._L.inverse * self._math.eye(body.map.Ny)
for body in self._solved_bodies
]
)
lndetL = self._math.cast(
[body.map._L.lndet for body in self._solved_bodies]
)
return self._linalg.lnlike_woodbury(
X, f, self._C.inverse, mu, LInv, self._C.lndet, lndetL
)
else:
if not dense_L:
# We can just concatenate vectors
L = self._math.concatenate(
[
body.map._L.value * self._math.ones(body.map.Ny)
for body in self._solved_bodies
]
)
else:
L = self._math.block_diag(
*[
body.map._L.value * self._math.eye(body.map.Ny)
for body in self._solved_bodies
]
)
return self._linalg.lnlike(X, f, self._C.value, mu, L)
| [
"[email protected]"
] | |
2e06e1980cde49ce41b1e7da5a288d1e723ff451 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/kpi/csv/scouteventgachapointconsume.py | 3e3e67c5ad7740f436e5a8d6a34b403592ebd208 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | # -*- coding: utf-8 -*-
import settings
from platinumegg.app.cabaret.util.db_util import ModelRequestMgr
from platinumegg.app.cabaret.kpi.models.scoutevent import ScoutEventGachaPointConsumeHash
from platinumegg.app.cabaret.kpi.csv.scoutevent import ScoutEventCSVBase
backup_db = getattr(settings, 'DB_BACKUP', settings.DB_READONLY)
class Manager(ScoutEventCSVBase):
"""スカウトイベントガチャポイント消費量.
"""
def __init__(self, date, output_dir):
ScoutEventCSVBase.__init__(self, date, output_dir)
def get_data(self):
model_mgr = ModelRequestMgr()
eventid = self.getScoutEventId(model_mgr)
data = ScoutEventGachaPointConsumeHash.aggregate(eventid)
if data:
return list(data.items())
else:
return None
def delete(self):
model_mgr = ModelRequestMgr()
if self.isScoutEventPresentEnd(model_mgr):
eventid = self.getScoutEventId(model_mgr)
ScoutEventGachaPointConsumeHash.getDB().delete(ScoutEventGachaPointConsumeHash.makeKey(eventid))
| [
"[email protected]"
] | |
8e77edd8dbfd0120efcb306a4b71746957ff489b | 3bed14fe6abcd8370916de178daff9746335b999 | /PythonProjects/02-ObjectsFunctionsAndMethods/src/m4_functions.py | 0e66614b27a449f865d2e95fd574fa3359682bc4 | [
"MIT"
] | permissive | sanaebrahimi/csse120-public | 346912d13aa1721dd780bfb9f0403c8ea2e5c9d0 | 128199b278e5cc5386bdfe5a9151b738ce09f8ff | refs/heads/master | 2022-12-14T12:58:45.803734 | 2020-09-08T03:33:44 | 2020-09-08T03:33:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,907 | py | """
Practice DEFINING and CALLING
FUNCTIONS
Authors: David Mutchler, Sana Ebrahimi, Mohammed Noureddine, Vibha Alangar,
Matt Boutell, Dave Fisher, their colleagues, and
PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# TODO: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
# _
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
###############################################################################
import rosegraphics as rg
def main():
"""
TESTS the functions that you will write below.
You write the tests per the _TODO_s below.
"""
window = rg.TurtleWindow()
# Put your TESTS immediately below this line, as directed by _TODO_s below.
window.close_on_mouse_click()
###############################################################################
# TODO: 3a. Define a function immediately below this _TODO_.
# It takes two arguments that denote, for a right triangle,
# the lengths of the two sides adjacent to its right angle,
# and it returns the length of the hypotenuse of that triangle.
# HINT: Apply the Pythagorean theorem.
# _
# You may name the function and its parameters whatever you wish,
# but choose DESCRIPTIVE (self-documenting) names.
#
# TODO: 3b. In main, CALL your function TWICE (with different values
# for the arguments) and print the returned values,
# to test whether you defined the function correctly.
###############################################################################
###############################################################################
# TODO: 4a. Define a function immediately below this _TODO_.
# It takes two arguments:
# -- a string that represents a color (e.g. "red")
# -- a positive integer that represents the thickness of a Pen.
# _
# The function should do the following (in the order listed):
# a. Constructs two SimpleTurtle objects, where:
# - one has a Pen whose color is "green" and has the GIVEN thickness
# - the other has a Pen whose color is the GIVEN color
# and whose thickness is 5
# _
# Note: the "GIVEN" color means the PARAMETER that represents a color.
# Likewise, the "GIVEN" thickness means the PARAMETER for thickness.
# _
# b. Makes the first (green) SimpleTurtle move FORWARD 100 pixels.
# _
# c. Makes the other (thickness 5) SimpleTurtle move BACKWARD 100 pixels.
# _
# You may name the function and its parameters whatever you wish,
# but choose DESCRIPTIVE (self-documenting) names.
#
# TODO: 4b. In main, CALL your function at least TWICE (with different values
# for the arguments) to test whether you defined the function correctly.
###############################################################################
###############################################################################
# TODO: 5.
# COMMIT-and-PUSH your work (after changing this _TODO_ to DONE).
# _
# As a reminder, here is how you should do so:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3a. In the Commit Changes window that pops up,
# - If there is no message in the
# Commit Message
# sub-box, put one there, e.g. "Done."
# 3b: In that same Commit Changes window that pops up:
# - Press the Commit and Push button.
# (Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.)
# _
# COMMIT adds the changed work to the version control system on your COMPUTER.
# PUSH adds the changed work into your repository in the "cloud".
# _
# Always PUSH (in addition to the COMMIT) so that your work
# is backed-up in the cloud. If you COMMIT but forget to PUSH,
# you can subsequently do the PUSH by:
# VCS ~ Git ~ Push...
# _
# Oh, one more thing:
# Do you have any blue bars on the scrollbar-like thing to the
# right? If so, click on each blue bar and change its _TODO_ to
# DONE and then run the module (to make sure you did not break
# anything) and COMMIT-and-PUSH again.
# _
# You can COMMIT-and-PUSH as often as you like.
# DO IT FREQUENTLY; AT LEAST once per module.
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| [
"[email protected]"
] | |
f5a69f68051f5b68b6d2f65567725b63d246bfaa | 8d6f1dc44c0d9cc48842ba694e7e7767c02c7524 | /Learning_python_AnandPython/Start/problem3.py | 399885b1b75a3e9022f1fc43ed46fff930396fd7 | [] | no_license | ccsreenidhin/Practice_Anand_Python_Problems | d91f40359a0d6e08d02d80f4f77137a5e8beb4ad | 8140f615e935b293c1640aeb54d71b4a44373bdb | refs/heads/master | 2022-10-17T20:42:31.130630 | 2017-07-02T16:52:00 | 2020-06-11T12:28:40 | 271,540,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | #Problem 3: Create a python script with the following text and see the output. 1+2
1+2
| [
"[email protected]"
] | |
d356ff767d243dca741ca1a7a526a9ab397e7661 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_load_balancer_network_interfaces_operations.py | 5e351840793d45108df3555656e21cf0f89b9bd3 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 5,623 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations:
"""LoadBalancerNetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceListResult"]:
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
| [
"[email protected]"
] | |
e5cbe4defe3eb48759672a185f2e75739378bb9d | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/excel.py | b95245ec2ba4408c6f85a9b4e3915339b51d9ddc | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-Advertising-Acknowledgement",
"MIT"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 6,332 | py | # file openpyxl/writer/excel.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write a .xlsx file."""
# Python stdlib imports
from zipfile import ZipFile, ZIP_DEFLATED
from ....compat import BytesIO as StringIO
# package imports
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
from ..writer.strings import create_string_table, write_string_table
from ..writer.workbook import write_content_types, write_root_rels, \
write_workbook_rels, write_properties_app, write_properties_core, \
write_workbook
from ..writer.theme import write_theme
from ..writer.styles import StyleWriter
from ..writer.drawings import DrawingWriter, ShapeWriter
from ..writer.charts import ChartWriter
from ..writer.worksheet import write_worksheet, write_worksheet_rels
class ExcelWriter(object):
"""Write a workbook object to an Excel file."""
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleWriter(self.workbook)
def write_data(self, archive):
"""Write the various xml files into the zip archive."""
# cleanup all worksheets
shared_string_table = self._write_string_table(archive)
archive.writestr(ARC_CONTENT_TYPES, write_content_types(self.workbook))
archive.writestr(ARC_ROOT_RELS, write_root_rels(self.workbook))
archive.writestr(ARC_WORKBOOK_RELS, write_workbook_rels(self.workbook))
archive.writestr(ARC_APP, write_properties_app(self.workbook))
archive.writestr(ARC_CORE,
write_properties_core(self.workbook.properties))
archive.writestr(ARC_THEME, write_theme())
archive.writestr(ARC_STYLE, self.style_writer.write_table())
archive.writestr(ARC_WORKBOOK, write_workbook(self.workbook))
self._write_worksheets(archive, shared_string_table, self.style_writer)
def _write_string_table(self, archive):
for ws in self.workbook.worksheets:
ws.garbage_collect()
shared_string_table = create_string_table(self.workbook)
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
for k, v in shared_string_table.items():
shared_string_table[k] = bytes(v)
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
drawing_id = 1
chart_id = 1
shape_id = 1
for i, sheet in enumerate(self.workbook.worksheets):
archive.writestr(PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1),
write_worksheet(sheet, shared_string_table,
style_writer.get_style_by_hash()))
if sheet._charts or sheet.relationships:
archive.writestr(PACKAGE_WORKSHEETS +
'/_rels/sheet%d.xml.rels' % (i + 1),
write_worksheet_rels(sheet, drawing_id))
if sheet._charts:
dw = DrawingWriter(sheet)
archive.writestr(PACKAGE_DRAWINGS + '/drawing%d.xml' % drawing_id,
dw.write())
archive.writestr(PACKAGE_DRAWINGS + '/_rels/drawing%d.xml.rels' % drawing_id,
dw.write_rels(chart_id))
drawing_id += 1
for chart in sheet._charts:
cw = ChartWriter(chart)
archive.writestr(PACKAGE_CHARTS + '/chart%d.xml' % chart_id,
cw.write())
if chart._shapes:
archive.writestr(PACKAGE_CHARTS + '/_rels/chart%d.xml.rels' % chart_id,
cw.write_rels(drawing_id))
sw = ShapeWriter(chart._shapes)
archive.writestr(PACKAGE_DRAWINGS + '/drawing%d.xml' % drawing_id,
sw.write(shape_id))
shape_id += len(chart._shapes)
drawing_id += 1
chart_id += 1
def save(self, filename):
"""Write data into the archive."""
archive = ZipFile(filename, 'w', ZIP_DEFLATED)
self.write_data(archive)
archive.close()
def save_workbook(workbook, filename):
"""Save the given workbook on the filesystem under the name filename.
:param workbook: the workbook to save
:type workbook: :class:`openpyxl.workbook.Workbook`
:param filename: the path to which save the workbook
:type filename: string
:rtype: bool
"""
writer = ExcelWriter(workbook)
writer.save(filename)
return True
def save_virtual_workbook(workbook):
"""Return an in-memory workbook, suitable for a Django response."""
writer = ExcelWriter(workbook)
temp_buffer = StringIO()
try:
archive = ZipFile(temp_buffer, 'w', ZIP_DEFLATED)
writer.write_data(archive)
finally:
archive.close()
virtual_workbook = temp_buffer.getvalue()
temp_buffer.close()
return virtual_workbook
| [
"[email protected]"
] | |
0d69b1cca7ac402b2bf0126e0fbc92837503b45f | 0f85db2a2bda863359ad7c81ec6ebba5b42ad939 | /36-challenges/ex115.py | 590d9cf00f800827b703040bb59b5bcfdda09f1e | [] | no_license | ferreret/python-bootcamp-udemy | 894a08ba086bad41ba02f2015112956545f3b581 | ce499458d7da9ff64f9113114cf855afbc6f8163 | refs/heads/master | 2022-11-18T00:12:24.613797 | 2020-07-15T19:46:45 | 2020-07-15T19:46:45 | 257,936,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py |
'''
includes([1, 2, 3], 1) # True
includes([1, 2, 3], 1, 2) # False
includes({ 'a': 1, 'b': 2 }, 1) # True
includes({ 'a': 1, 'b': 2 }, 'a') # False
includes('abcd', 'b') # True
includes('abcd', 'e') # False
'''
def includes(collection, value, start_index=0):
if (isinstance(collection, dict)):
return any(x == value for x in collection.values())
return any(x == value for x in collection[start_index:])
print(includes([1, 2, 3], 1)) # True
print(includes([1, 2, 3], 1, 2)) # False
print(includes({'a': 1, 'b': 2}, 1)) # True
print(includes({'a': 1, 'b': 2}, 'a')) # False
print(includes('abcd', 'b')) # True
print(includes('abcd', 'e')) # False
| [
"[email protected]"
] | |
a1f84e1cd613f2db636b17cd80dc027b0d1e3c59 | d43100d78daa1a8167e462e0faaa7d2a0fe97671 | /touchtechnology/common/tests/test_models.py | a7a26375da8cde01ddd7115cf63998b17a077224 | [
"BSD-3-Clause"
] | permissive | goodtune/vitriolic | 1d6ee4758ed41f1674b70311be88c7135b2d1ed0 | d4b3da0a8c9b5ccbda4d898003d82934ccad6a7b | refs/heads/main | 2023-08-22T21:29:05.488534 | 2023-07-23T03:00:16 | 2023-07-23T03:00:16 | 73,355,905 | 0 | 0 | BSD-3-Clause | 2023-09-04T16:25:44 | 2016-11-10T07:06:41 | Python | UTF-8 | Python | false | false | 355 | py | from django.utils.encoding import smart_str
from test_plus import TestCase
from touchtechnology.common.tests import factories
class SitemapNodeTests(TestCase):
def setUp(self):
self.object = factories.SitemapNodeFactory.create()
def test_string_representation(self):
self.assertEqual(self.object.title, smart_str(self.object))
| [
"[email protected]"
] | |
ca12ea91f5c1cfafc228d306af427cdb5e2fd9fe | 5b28005b6ee600e6eeca2fc7c57c346e23da285f | /nomadic_recording_lib/ui/qt/bases/qtsimple.py | b93a45beb012474f93e49d848144358df034b7e6 | [] | no_license | nocarryr/wowza_logparse | c31d2db7ad854c6b0d13495a0ede5f406c2fce3f | d6daa5bf58bae1db48ac30031a845bf975c7d5cc | refs/heads/master | 2021-01-17T07:19:00.347206 | 2017-06-24T16:57:32 | 2017-06-24T16:57:32 | 25,835,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py |
from ...bases import simple
class Color(simple.Color):
pass
class EntryBuffer(simple.EntryBuffer):
pass
class Spin(simple.Spin):
pass
class Radio(simple.Radio):
pass
class Toggle(simple.Toggle):
pass
class Fader(simple.Fader):
pass
class ScaledFader(simple.ScaledFader):
pass
| [
"[email protected]"
] | |
030b8a4210f6316bcd16d65f28a93e647afdd838 | 7d58cb5bb403d394e609a1f4be8f438cfcaa3895 | /queue/arrayqueue.py | 1f9d7b91dcefda86a4f23f09064ecd934caeccc8 | [] | no_license | TechInTech/dataStructure | 7a07a0ca3f4ccf4b60f766df536908a36520dd51 | eb4d6c315867ebb676a1119a5099024aa37988eb | refs/heads/master | 2020-05-03T04:23:55.319238 | 2019-05-24T07:27:47 | 2019-05-24T07:27:47 | 178,420,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,129 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019/2/21 11:04
# @Author : Despicable Me
# @Email :
# @File : arrayqueue.py
# @Software: PyCharm
# @Explain :
from arrays import Array
from abstractqueue import AbstractQueue
class ArrayQueue1(AbstractQueue):
"""移动队尾rear"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection = None):
self._items = Array(ArrayQueue1.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
cursor = 0
while cursor < len(self):
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue1.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
cursor = 0
oldItem = self._items[cursor]
for i range(0, len(self) - 1):
self._items[i] = self._items[i + 1]
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
for i in range(len(self)):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[0] = item
self._size += 1
elif (self._rear + 1) < ArrayQueue1.DEFAULT_CAPACITY:
self._items[self._rear] = item
self._rear += 1
self._size += 1
else:
raise KeyError("the queue is full.")
class ArrayQueue2(AbstractQueue):
"""移动队头front"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection = None):
self._items = Array(ArrayQueue2.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
cursor = self._front
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue2.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
oldItem = self._items[self._front]
if self._front < self._rear:
self._front += 1
elif self._front == self._rear:
self._front = 0
self._rear = 0
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
for i in range(self._front, self._rear + 1):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[self._front] = item
self._size += 1
elif (self._rear + 1) < ArrayQueue2.DEFAULT_CAPACITY:
self._items[self._rear] = item
self._rear += 1
self._size += 1
if self._front > 0 and self._rear == (ArrayQueue2.DEFAULT_CAPACITY - 1):
for i in range(self._front, self._rear + 1):
self._items[i - 1] = self._items[i]
self._front -= 1
self._rear -= 1
else:
raise KeyError("the queue is full.")
class ArrayQueue3(AbstractQueue):
"""循环队列"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection=None):
self._items = Array(ArrayQueue3.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
if self._front <= self._rear:
cursor = self._front
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
else:
cursor = self._front
while cursor <= len(self) - 1:
yield self._items[cursor]
cursor += 1
cursor = 0
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue3.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
oldItem = self._items[self._front]
if self._front == (len(self) -1):
self._front = 0
else:
self._front += 1
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
elif self._front <= self._rear:
for i in range(self._front, self._rear + 1):
if self._items[i] == item:
return True
return False
else:
for i in range(0, self._rear + 1):
if self._items[i] == item:
return True
for i in range(self._front, len(self)):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[self._rear] = item
self._size += 1
elif len(self) == ArrayQueue3.DEFAULT_CAPACITY:
raise KeyError("the queue is full.")
elif self._rear == len(self) - 1:
self._items[0] = item
self._rear += 1
self._size += 1
else:
self._rear += 1
self._items[self._rear] = item
self._size += 1 | [
"[email protected]"
] | |
36a075bdc76b06cadad18921299df41c9f0f1358 | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030151703.py | 98dd14774d314d94bec33cfc3bedc340f84a5aba | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.models import ClusterableModel
from modelcluster.fields import ParentalKey
from wagtail.core.models import Orderable
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, InlinePanel
class MenuItem(Orderable):
link_title = models.CharField(blank=True, max_length=50)
link_url = models.CharField(max_length=500, blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE,
)
open_in_new_tab = models.BooleanField(
default=False,
blank=True,
)
panels = [
FieldPanel('link_title'),
FieldPanel('link_url'),
PageChooserPanel('link_page'),
FieldPanel('open_in_new_tab',),
]
page = ParentalKey('Menu', related_name='menu_items')
@property
def link(self):
if self.link_
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
panels = [
FieldPanel('title'),
FieldPanel('slug'),
InlinePanel('menu_items', label='Menu Item'),
]
def __str__(self):
return self.title
| [
"[email protected]"
] | |
00e1101a2d4dffa2fae86d26f0dbd14410a93620 | 5d5b6a7bd7fffe46980d41e452fe92c28a08d5d1 | /UDP--/asyncio-queue.py | 57c5600728289fcd7729ba4f31fd87e3791cea9d | [] | no_license | calllivecn/testing | ce21442f1398b177675ca2b655c4ed3aaf1edcb3 | 05c1d335d54bb12fbbcf3721260763e4537dcaf4 | refs/heads/master | 2023-08-20T21:51:15.511595 | 2023-06-13T13:26:37 | 2023-06-13T13:26:37 | 49,574,572 | 0 | 1 | null | 2022-12-08T09:29:09 | 2016-01-13T13:11:57 | Python | UTF-8 | Python | false | false | 1,523 | py | #!/usr/bin/env python3
# coding=utf-8
# date 2022-09-09 21:21:12
# author calllivecn <[email protected]>
import queue
import asyncio
import threading
async def customer(q):
while (task := await q.get()) is not None:
q.task_done()
print(f"customer: {task}")
async def producter(q):
for i in range(10):
c = f"生产资料:{i}"
await q.put(c)
print(c)
await q.put(None)
def customer2(q):
while (task := q.get()) is not None:
q.task_done()
print(f"customer: {task}")
def producter2(q):
for i in range(10):
c = f"生产资料:{i}"
q.put(c)
print(c)
q.put(None)
class run(threading.Thread):
def __init__(self, queue):
super().__init__()
self.queue = queue
def run(self):
customer2(self.queue)
async def async_main():
q = asyncio.Queue(2)
print("启动消费者")
th = asyncio.create_task(customer(q))
print("启动生产者")
p = asyncio.create_task(producter(q))
# 这样才是并发的
await th
await p
async def async_main2():
q = asyncio.Queue(2)
print("启动消费者")
print("启动生产者")
L = await asyncio.gather(customer(q), producter(q))
print("结果:", L)
def main():
q = queue.Queue(2)
print("启动消费者")
th = run(q)
th.start()
print("启动生产者")
producter2(q)
if __name__ == "__main__":
# asyncio.run(async_main())
asyncio.run(async_main2())
# main() | [
"[email protected]"
] | |
688d5d0aa4a544d0b6ebdd24b9ca3c2c5ebfae91 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/models/iot_dps_properties_description_py3.py | c8f57abf34739bf39900f01dedfbe64c8f796761 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 3,854 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IotDpsPropertiesDescription(Model):
"""the service specific properties of a provisoning service, including keys,
linked iot hubs, current state, and system generated properties such as
hostname and idScope.
Variables are only populated by the server, and will be ignored when
sending a request.
:param state: Current state of the provisioning service. Possible values
include: 'Activating', 'Active', 'Deleting', 'Deleted',
'ActivationFailed', 'DeletionFailed', 'Transitioning', 'Suspending',
'Suspended', 'Resuming', 'FailingOver', 'FailoverFailed'
:type state: str or ~azure.mgmt.iothubprovisioningservices.models.State
:param provisioning_state: The ARM provisioning state of the provisioning
service.
:type provisioning_state: str
:param iot_hubs: List of IoT hubs assosciated with this provisioning
service.
:type iot_hubs:
list[~azure.mgmt.iothubprovisioningservices.models.IotHubDefinitionDescription]
:param allocation_policy: Allocation policy to be used by this
provisioning service. Possible values include: 'Hashed', 'GeoLatency',
'Static'
:type allocation_policy: str or
~azure.mgmt.iothubprovisioningservices.models.AllocationPolicy
:ivar service_operations_host_name: Service endpoint for provisioning
service.
:vartype service_operations_host_name: str
:ivar device_provisioning_host_name: Device endpoint for this provisioning
service.
:vartype device_provisioning_host_name: str
:ivar id_scope: Unique identifier of this provisioning service.
:vartype id_scope: str
:param authorization_policies: List of authorization keys for a
provisioning service.
:type authorization_policies:
list[~azure.mgmt.iothubprovisioningservices.models.SharedAccessSignatureAuthorizationRuleAccessRightsDescription]
"""
_validation = {
'service_operations_host_name': {'readonly': True},
'device_provisioning_host_name': {'readonly': True},
'id_scope': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'iot_hubs': {'key': 'iotHubs', 'type': '[IotHubDefinitionDescription]'},
'allocation_policy': {'key': 'allocationPolicy', 'type': 'str'},
'service_operations_host_name': {'key': 'serviceOperationsHostName', 'type': 'str'},
'device_provisioning_host_name': {'key': 'deviceProvisioningHostName', 'type': 'str'},
'id_scope': {'key': 'idScope', 'type': 'str'},
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRuleAccessRightsDescription]'},
}
def __init__(self, *, state=None, provisioning_state: str=None, iot_hubs=None, allocation_policy=None, authorization_policies=None, **kwargs) -> None:
super(IotDpsPropertiesDescription, self).__init__(**kwargs)
self.state = state
self.provisioning_state = provisioning_state
self.iot_hubs = iot_hubs
self.allocation_policy = allocation_policy
self.service_operations_host_name = None
self.device_provisioning_host_name = None
self.id_scope = None
self.authorization_policies = authorization_policies
| [
"[email protected]"
] | |
1c83b0d6834355f9d556f510537c7ebea1b4ac9f | 3fd8eae327323ea45d31909d537fd7ee8f49c2de | /torch_stft/util.py | e1a92295009ab8e82ce1f2060b0da5cb790e44c7 | [
"BSD-3-Clause"
] | permissive | taeminlee/torch-stft | 4f61c754f8a953d51d492404de602310bfaa38ca | c6236f77af113207a78feff93b4b9cbeeccb9143 | refs/heads/master | 2020-11-29T19:55:50.041494 | 2019-12-26T06:15:14 | 2019-12-26T06:15:14 | 230,204,083 | 0 | 0 | BSD-3-Clause | 2019-12-26T05:58:20 | 2019-12-26T05:58:19 | null | UTF-8 | Python | false | false | 1,657 | py | import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x | [
"[email protected]"
] | |
5c9b1176bbf81a0b9b815632de912c5e83333052 | 2e60bdaf03181f1479701efebbb495f88615df4c | /nlp/ner/lstm/dataset/dataset.py | b55fb2e52e9c0e63d501499eba7771fc0cb3eac0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | whatisnull/tensorflow_nlp | dc67589ee4069f7a71baa1640d796bac3445bb5c | 0ecb1e12bbe1fc3d5a63e68d788547d0ae92aeef | refs/heads/master | 2023-04-23T08:23:55.914154 | 2019-09-15T03:47:55 | 2019-09-15T03:47:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | # -*- coding:utf-8 -*-
import numpy as np
class Dataset(object):
def __init__(self, word_data, tag_data):
self._start = 0
self._cursor = 0
self._num_samples = word_data.shape[0]
self._word_data = word_data
self._tag_data = tag_data
@property
def word_data(self):
return self._word_data
@property
def tag_data(self):
return self._tag_data
@property
def num_samples(self):
return self._num_samples
def has_next(self):
return self._cursor < self._num_samples
def reset(self):
self._cursor = 0
self._start = 0
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
self._start = self._cursor
self._cursor += batch_size
if self._start + batch_size > self._num_samples:
rest_num_samples = self._num_samples - self._start
word_batch = np.zeros((batch_size, self._word_data.shape[1]), dtype=np.int32)
tag_batch = np.zeros((batch_size, self._word_data.shape[1]), dtype=np.int32)
word_batch[0:rest_num_samples] = self._word_data[self._start:self._num_samples]
tag_batch[0:rest_num_samples] = self._tag_data[self._start:self._num_samples]
return word_batch, tag_batch
else:
end = self._cursor
return self._word_data[self._start:end], self._tag_data[self._start:end] | [
"[email protected]"
] | |
5f1748a400b64ef1172d106936b097ddee8ac1b9 | b94aa4896a0cef82f70b9d1d1bfde805daa35103 | /app/tag/apps.py | 982271075d9ed2a57e93f378c330b98673792431 | [] | no_license | engrogerio/icontrol | 818ab659ae30f9270c47743ccb6402ef946ff55a | 53944e4c0381cc5e9a5a31ca36e437ee0d17ee67 | refs/heads/master | 2021-11-02T04:07:40.508205 | 2021-04-26T14:44:19 | 2021-04-26T14:44:19 | 90,092,719 | 1 | 0 | null | 2021-09-22T17:37:32 | 2017-05-03T00:56:17 | HTML | UTF-8 | Python | false | false | 89 | py | # from django.apps import AppConfig
#
#
# class TagConfig(AppConfig):
# name = 'tag'
| [
"[email protected]"
] | |
f1b5952e8a7a9ce23248158e8e80096bd678262e | 60f2e437c1918b7c05193ed1bc344f093a86549c | /ex.2.py | 2029cb73ff052f4656a3a7a55dcf485889851903 | [] | no_license | Furyheadgod/Coding-1 | 1e19d2f3cc42f710fcbaf6c2aeafef4da803e80c | 95b455fec7349e45ab53c5e5b10400c317c90fbd | refs/heads/master | 2022-11-27T19:14:19.044317 | 2020-08-03T06:54:03 | 2020-08-03T06:54:03 | 284,621,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print('Hello,world')
| [
"[email protected]"
] | |
55db3a4364388812f9330dd5cd0b3b3fcbe28cf3 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_25569.py | dd5aa8a33084e83e1ab009429efddba37d8befcf | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,834 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((419.471, 464.824, 407.725), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((474.833, 467.05, 449.77), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((541.551, 478.38, 494.736), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((506.413, 570.983, 397.661), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((681.76, 468.473, 628.552), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((459.492, 461.984, 430.998), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((458.408, 461.676, 429.658), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((455.187, 439.204, 446.898), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((477.5, 429.004, 433.009), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((496.239, 431.146, 412.239), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((505.447, 425.865, 386.089), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((500.046, 418.483, 359.512), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((431.21, 458.372, 430.983), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((567.522, 372.287, 291.484), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((669.2, 379.878, 466.001), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((669.2, 379.878, 466.001), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((642.344, 382.297, 454.476), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((615.166, 386.691, 444.203), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((587.01, 391.669, 437.422), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((558.726, 399.115, 435.802), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((532.258, 410.895, 441.901), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((509.615, 425.836, 453.446), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((724.004, 380.818, 319.628), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((294.663, 484.892, 589.792), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((519.733, 435.963, 498.695), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((519.733, 435.963, 498.695), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((514.256, 463.278, 507.101), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((513.377, 492.372, 503.912), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((529.293, 509.095, 486.132), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((431.931, 521.855, 409.05), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((632.756, 501.699, 556.082), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((464.91, 492.488, 439.311), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((464.891, 492.569, 439.283), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((437.27, 486.959, 438.853), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((424.645, 461.719, 439.572), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((432.052, 441.092, 421.73), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((452.441, 430.381, 405.19), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((469.987, 413.193, 391.514), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((488.127, 395.108, 379.511), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((463.221, 394.097, 461.38), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((514.543, 399.685, 296.142), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((456.861, 424.761, 495.316), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((468.852, 447.296, 489.352), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((496.003, 495.883, 474.514), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((523.22, 544.54, 459.68), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((447.85, 571.189, 447.112), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((616.663, 590.257, 450.723), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((393.813, 451.902, 460.96), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((417.718, 460.693, 474.091), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((439.105, 468.931, 492.443), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((462.158, 478.56, 508.555), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((485.141, 487.649, 525.747), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((510.024, 497.004, 540.4), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((466.722, 471.928, 480.486), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((562.403, 524.492, 601.075), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
84064f13bfdd302ead83e4bef54ded511537fc93 | 87dae6d55c66df1d40d6881272009319a1600cb3 | /Practica_6__Ejercicio_4__Inmigrantes2.py | f10e29a73ebd529401d44818335e229ff2f34447 | [] | no_license | abaldeg/EjerciciosPython | 92a30a82c05ec75aa7f313c8a6fa0dd052a8db11 | c8a3238587ebf6b10dbff32516c81bf00bb01630 | refs/heads/master | 2021-07-09T07:46:11.584855 | 2020-11-09T11:51:50 | 2020-11-09T11:51:50 | 210,438,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | # Practica 6 - Ejercicio 4
try:
entrada = open("Apellidos.txt")
armenia = open("armenia.txt", "wt")
italia = open("italia.txt", "wt")
españa = open("españa.txt", "wt")
print("\nLeyendo datos...")
datos = entrada.readline()
while datos:
if datos.upper().find("IAN,")!=-1:
armenia.write(datos.title()+"\n")
elif datos.upper().find("INI,")!=-1:
italia.write(datos.title()+"\n")
elif datos.upper().find("EZ,")!=-1:
españa.write(datos.title()+"\n")
datos = entrada.readline()
print("Archivos generados correctamente")
except FileNotFoundError:
print("No se encontró el archivo de entrada")
except OSError as error:
print("ERROR:",str(error))
finally:
try:
entrada.close()
armenia.close()
italia.close()
españa.close()
except NameError:
pass | [
"[email protected]"
] | |
a84a3a1979c3cf029fedadcb95908324fb1a010c | fd4fac4c6001dcedee7d5e87327368b5050c45d5 | /htseq/db/archive/fusion-tables-client-python-read-only/src/sql/sqlbuilder.py | 799056bbeb57353eadef5ef07793ae90e6830b53 | [] | no_license | nickloman/omicsmaps | 6b92e4dbe568287af1049d8d2814a5bad934942b | fd17a2f84d3dc4be86539e223c77f5e4bc5880ed | refs/heads/master | 2021-01-23T13:32:07.623246 | 2015-01-04T12:55:11 | 2015-01-04T12:55:11 | 28,738,982 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,551 | py | #!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
""" Builds SQL strings.
Builds SQL strings to pass to FTClient query method.
"""
__author__ = '[email protected] (Kathryn Hurley)'
class SQL:
""" Helper class for building SQL queries """
def showTables(self):
""" Build a SHOW TABLES sql statement.
Returns:
the sql statement
"""
return 'SHOW TABLES'
def describeTable(self, table_id):
""" Build a DESCRIBE <tableid> sql statement.
Args:
table_id: the ID of the table to describe
Returns:
the sql statement
"""
return 'DESCRIBE %d' % (table_id)
def createTable(self, table):
""" Build a CREATE TABLE sql statement.
Args:
table: a dictionary representing the table. example:
{
"tablename":
{
"col_name1":"STRING",
"col_name2":"NUMBER",
"col_name3":"LOCATION",
"col_name4":"DATETIME"
}
}
Returns:
the sql statement
"""
table_name = table.keys()[0]
cols_and_datatypes = ",".join(["'%s': %s" % (col[0], col[1])
for col in sorted(table.get(table_name).items())])
return "CREATE TABLE '%s' (%s)" % (table_name, cols_and_datatypes)
def select(self, table_id, cols=None, condition=None):
""" Build a SELECT sql statement.
Args:
table_id: the id of the table
cols: a list of columns to return. If None, return all
condition: a statement to add to the WHERE clause. For example,
"age > 30" or "Name = 'Steve'". Use single quotes as per the API.
Returns:
the sql statement
"""
stringCols = "*"
if cols: stringCols = ("'%s'" % ("','".join(cols))) \
.replace("\'rowid\'", "rowid") \
.replace("\'ROWID\'", "ROWID")
if condition: select = 'SELECT %s FROM %d WHERE %s' % (stringCols, table_id, condition)
else: select = 'SELECT %s FROM %d' % (stringCols, table_id)
return select
def update(self, table_id, cols, values=None, row_id=None):
""" Build an UPDATE sql statement.
Args:
table_id: the id of the table
cols: list of columns to update
values: list of the new values
row_id: the id of the row to update
OR if values is None and type cols is a dictionary -
table_id: the id of the table
cols: dictionary of column name to value pairs
row_id: the id of the row to update
Returns:
the sql statement
"""
if row_id == None: return None
if type(cols) == type({}):
updateStatement = ""
count = 1
for col,value in cols.iteritems():
if type(value).__name__ == 'int':
updateStatement = '%s%s=%d' % (updateStatement, col, value)
elif type(value).__name__ == 'float':
updateStatement = '%s%s=%f' % (updateStatement, col, value)
else:
updateStatement = "%s%s='%s'" % (updateStatement, col,
value.encode('string-escape'))
if count < len(cols): updateStatement = "%s," % (updateStatement)
count += 1
return "UPDATE %d SET %s WHERE ROWID = '%d'" % (table_id,
updateStatement, row_id)
else:
if len(cols) != len(values): return None
updateStatement = ""
count = 1
for i in range(len(cols)):
updateStatement = "%s'%s' = " % (updateStatement, cols[i])
if type(values[i]).__name__ == 'int':
updateStatement = "%s%d" % (updateStatement, values[i])
elif type(values[i]).__name__ == 'float':
updateStatement = "%s%f" % (updateStatement, values[i])
else:
updateStatement = "%s'%s'" % (updateStatement,
values[i].encode('string-escape'))
if count < len(cols): updateStatement = "%s," % (updateStatement)
count += 1
return "UPDATE %d SET %s WHERE ROWID = '%d'" % (table_id, updateStatement, row_id)
def delete(self, table_id, row_id):
""" Build DELETE sql statement.
Args:
table_id: the id of the table
row_id: the id of the row to delete
Returns:
the sql statement
"""
return "DELETE FROM %d WHERE ROWID = '%d'" % (table_id, row_id)
def insert(self, table_id, values):
""" Build an INSERT sql statement.
Args:
table_id: the id of the table
values: dictionary of column to value. Example:
{
"col_name1":12,
"col_name2":"mystring",
"col_name3":"Mountain View",
"col_name4":"9/10/2010"
}
Returns:
the sql statement
"""
stringValues = ""
count = 1
cols = values.keys()
values = values.values()
for value in values:
if type(value).__name__=='int':
stringValues = '%s%d' % (stringValues, value)
elif type(value).__name__=='float':
stringValues = '%s%f' % (stringValues, value)
else:
stringValues = "%s'%s'" % (stringValues, value.replace("'", "\\'"))
if count < len(values): stringValues = "%s," % (stringValues)
count += 1
str = 'INSERT INTO %d (%s) VALUES (%s)' % \
(int(table_id), ','.join(["'%s'" % col for col in cols]), stringValues)
return str
def dropTable(self, table_id):
""" Build DROP TABLE sql statement.
Args:
table_id: the id of the table
Returns:
the sql statement
"""
return "DROP TABLE %d" % (table_id)
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
4bd4c4e0ff1fbe41880fd8d8a63c1d1e1915ffd3 | 0afd2338fb9ea3f706b305286d05f7496c229797 | /performance_autotest/resultdata.py | 39761b10991397abcbd6b4474cb8b81c919c8c8f | [] | no_license | xuwei13253838782/script | 32ec8ddb9dacde3fcb103c50ba409596204644fc | 051d0f327636d814cdff244decb2078358dbea18 | refs/heads/master | 2022-04-27T23:03:19.797128 | 2020-04-26T11:00:48 | 2020-04-26T11:00:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,908 | py | # -*- coding:utf-8 -*-
# @Time : 2019/8/30 13:56
# @Author : zengln
# @File : resultdata.py
import json
import re
import os.path
from performance_autotest.log import logger
from performance_autotest.customexception import CustomError
class FileAnalyse(object):
"""
文件解析基础接口
"""
def __init__(self):
self.name = None
def file_analyse(self, file):
self.name = os.path.basename(file)
class NmonAnalyse(FileAnalyse):
def __init__(self):
# 初始化变量
super().__init__()
self.cpu = float(0)
self.mem = float(0)
self.disk = float(0)
self.net = float(0)
def set_ip(self, ip):
self.ip = ip
def file_analyse(self, file):
"""
Nmon 文件解析入口
:param file nmon 文件全路径
"""
logger.info("%s 文件数据解析开始" % os.path.basename(file))
super().file_analyse(file)
cpu_line = []
mem_line = []
disk_line = []
net_line = []
# 打开文件, 提取存有关键数据的行
with open(file, "r", encoding='utf8') as nmonfile:
text = nmonfile.readlines()
for line in text:
# cpu
if "CPU_ALL,T" in line:
cpu_line.append(line)
# mem
elif "MEM,T" in line:
mem_line.append(line)
# disk
elif "DISK" in line:
disk_line.append(line)
# net
elif "NET," in line:
net_line.append(line)
# 分别对关键数据进行处理
logger.info("开始提取cpu数据")
self.fetch_cpu(cpu_line)
logger.info("开始提取内存数据")
self.fetch_mem(mem_line)
logger.info("开始提取磁盘数据")
self.fetch_disk(disk_line)
logger.info("开始提取网络数据")
self.fetch_net(net_line)
logger.info("%s 文件数据解析结束" % os.path.basename(file))
def fetch_cpu(self, lines):
"""
:param lines: 带 cpu 关键数据行
"""
cpu_sum = float(0)
for line in lines:
cpus = line.split(",")
# sys% datas[2] user datas[3]
# total = sys + user
cpu_sum += (float(cpus[3]) + float(cpus[2]))
self.cpu = round(cpu_sum / len(lines), 2)
logger.debug("cpu: %.2f%%" % self.cpu)
def fetch_mem(self, lines):
"""
获取 mem 的关键数据包括: 纯物理内存使用率, 包含虚拟内存的内存使用率(无则为0)
:param lines: 带 mem 关键数据行
"""
mem_sum = float(0)
mem_virtual_sum = float(0)
for line in lines:
mems = line.split(",")
if len(mems) == 17:
# (Memtotal - Memfree - cached - buffers)/Memtotal * 100
mem_sum += ((float(mems[2]) - float(mems[6]) - float(mems[11]) - float(mems[14])) / float(
mems[2]) * 100)
elif len(mems) == 8:
# (Real total - Real free)/Real total * 100
mem_sum += ((float(mems[6]) - float(mems[4])) / float(mems[6]) * 100)
# (Real total - Real free + Virtual total - Virtual free) /(Real total + Virtual total) * 100
mem_virtual_sum += ((float(mems[6]) - float(mems[4]) + float(mems[7]) - float(mems[5])) / (
float(mems[6]) + float(mems[7])) * 100)
else:
raise CustomError("暂不支持此内存页面数据读取")
self.mem = (round(mem_sum / len(lines), 2), round(mem_virtual_sum / len(lines), 2))
logger.debug("mem: 不含虚拟内存的使用率 %.2f%%, 包含虚拟内存的使用率 %.2f%%" % (self.mem[0], self.mem[1]))
def fetch_disk(self, lines):
"""
获取 disk 的关键数据包括: disk-read(KB/S),disk-write(KB/S),io(io/s),disk-busy(%)
:param lines: 带 disk 关键数据行
"""
# 累加和
diskread_sum = float(0)
diskwrite_sum = float(0)
diskio_sum = float(0)
# diskbusy 每列均值
diskbusy_avg = []
# diskbusy 最大值
diskbusy_max = float(0)
# 次数统计
diskread_num = 0
diskwrite_num = 0
diskio_num = 0
diskbusy_num = 0
for line in lines:
disks = line.split(",")
if "DISKREAD,T" in line:
# diskread
disk_read_line_sum = float(0)
# 统计每行之和
for diskread_index in range(2, len(disks)):
disk_read_line_sum += float(disks[diskread_index])
# 累加
diskread_sum += disk_read_line_sum
# 计算总行数
diskread_num += 1
elif "DISKWRITE,T" in line:
# diskwrite
disk_write_line_sum = float(0)
# 统计每行之和
for diskwrite_index in range(2, len(disks)):
disk_write_line_sum += float(disks[diskwrite_index])
# 累加
diskwrite_sum += disk_write_line_sum
# 计算总行数
diskwrite_num += 1
elif "DISKXFER,T" in line:
# 每秒 IO 操作次数
disk_io_line_sum = float(0)
# 统计每行之和
for diskio_index in range(2, len(disks)):
disk_io_line_sum += float(disks[diskio_index])
# 累加
diskio_sum += disk_io_line_sum
# 计算总行数
diskio_num += 1
elif "DISKBUSY,T" in line:
# 获取 busi 每列初始值
if len(diskbusy_avg) == 0:
for disk_busy_line_index in range(2, len(disks)):
diskbusy_avg.append(float(disks[disk_busy_line_index]))
else:
diskbusy_num += 1
# 计算 busi 每列均值
for disk_busy_line_index in range(2, len(disks)):
diskbusy_avg[disk_busy_line_index - 2] = (float(
diskbusy_avg[disk_busy_line_index - 2]) * diskbusy_num + float(
disks[disk_busy_line_index])) / (diskbusy_num + 1)
# 获取 busi 最大列的均值
for disk_busy in diskbusy_avg:
if disk_busy > diskbusy_max:
diskbusy_max = disk_busy
self.disk = (round(diskread_sum / diskread_num, 2), round(diskwrite_sum / diskwrite_num, 2),
round(diskio_sum / diskio_num, 2), round(diskbusy_max, 2))
logger.debug("disk: diskread %.2f, diskwrite %.2f, diskio %.2f, diskbusy %.2f%%" % (
self.disk[0], self.disk[1], self.disk[2], self.disk[3]))
def fetch_net(self, lines):
"""
获取 net read(KB/s) 和 write(KB/s) 均值
:param lines:包含 net 关键数据的行
:return:
"""
# read 列索引
net_read_index = []
# write 列索引
net_write_index = []
# 所有 raad 列均值
net_read = []
# 所有 write 列均值
net_write = []
# read 列均值最大值
net_read_max = float(0)
# write 列均值最大值
net_write_max = float(0)
for line in lines:
disks = line.split(",")
if not "NET,T" in line:
for net_name_index in range(2, len(disks)):
net_name = disks[net_name_index]
# 获取 read 所在列
if "read" in net_name:
avg_read = 0
net_read_index.append(net_name_index)
# 获取 write 所在列
elif "write" in net_name:
avg_write = 0
net_write_index.append(net_name_index)
else:
# 获取每个 read 列的均值
if not len(net_read_index) == 0:
avg_read += 1
net_read_len_index = 0
for net_read_num_index in net_read_index:
if avg_read == 1:
net_read.append(float(disks[net_read_num_index]))
else:
net_read[net_read_len_index] = (float(net_read[net_read_len_index]) * (avg_read - 1) + float(
disks[net_read_num_index])) / avg_read
net_read_len_index += 1
# 获取每个 write 列的均值
if not len(net_write_index) == 0:
avg_write += 1
net_write_len_index = 0
for net_write_num_index in net_write_index:
if avg_write == 1:
net_write.append(float(disks[net_write_num_index]))
else:
net_write[net_write_len_index] = (float(net_write[net_write_len_index]) * (
avg_write - 1) + float(disks[net_write_num_index])) / avg_write
net_write_len_index += 1
for net_read_avg in net_read:
if net_read_avg > net_read_max:
net_read_max = net_read_avg
for net_write_avg in net_write:
if net_write_avg > net_write_max:
net_write_max = net_write_avg
self.net = (round(net_read_max, 2), round(net_write_max, 2))
logger.debug("net: 网络读取最大值 %.2f, 网络写入最大值 %.2f" % (self.net[0], self.net[1]))
class JmeterAnalyse(FileAnalyse):
def __init__(self):
# 保存解析结果
super().__init__()
self.result_dict = {}
def file_analyse(self, file):
"""
解析jmeter报告
:param file: jmeter报告所在目录
"""
logger.info("开始解析%s jmeter结果文件" % os.path.basename(file))
super().file_analyse(file)
file_all_path = file + r"\content\js\dashboard.js"
with open(file_all_path, "r", encoding="utf8") as jmeterfile:
text = jmeterfile.read()
static_data_match_result = re.match(r'[\s\S]*statisticsTable"\),(.*?), function', text)
if static_data_match_result is not None:
static_json_data = static_data_match_result.group(1).strip()
logger.debug("取到 %s 的压测结果数据为: %s" % (os.path.basename(file), static_json_data))
static_data = json.loads(static_json_data)
logger.debug("转化成json格式:%s" % static_data)
if "items" not in static_data.keys():
raise CustomError("%s获取压测结果失败,提取到的数据中未找到item标签" % os.path.basename(file))
static_items_data = static_data["items"]
logger.debug("提取到的数据为: %s" % static_items_data)
for static_item_data in static_items_data:
tmp_data = static_item_data['data']
# list: [Transaction, TPS, Error%, Response Time(average), Response Time(min), Response Time(max)]
tmp_list = [tmp_data[1], round(float(tmp_data[10]), 2), tmp_data[3], round(float(tmp_data[4]), 2),
round(float(tmp_data[5]), 2), round(float(tmp_data[6]), 2)]
# dict: {name:list}
self.result_dict[tmp_data[0]] = tmp_list
logger.debug("%s 提取结果 %s" % (os.path.basename(file), self.result_dict))
else:
raise CustomError("%s获取压测结果失败,未找到匹配数据" % os.path.basename(file))
logger.info("jmeter 结果文件解析结束")
class LoadRunnerAnalyse(FileAnalyse):
def __init__(self):
super().__init__()
self.result_dict = {}
def file_analyse(self, file):
"""
解析 Loadrunner 报告
:param file: loadrunner 报告所在路径
"""
logger.info("开始解析 %s loadrunner 报告" % os.path.basename(file))
super().file_analyse(file)
tps_list = []
resp_avg_list = []
resp_min_list = []
resp_max_list = []
summary_html_path = file + r'\An_Report1\summary.html'
content_html_path = file + r'\An_Report1\contents.html'
with open(summary_html_path, "r", encoding='utf8') as summary_html_file:
summary_str = summary_html_file.read()
transaction_name_list = re.findall(r'headers="LraTransaction Name".*?8">(.*?)</td>', summary_str)
logger.debug("trasaction_name_list is None: %s" % str(False if(transaction_name_list is not None) else True))
pass_list = re.findall(r'headers="LraPass".*?8">(.*?)</td>', summary_str)
logger.debug("pass_list is None: %s" % str(False if (pass_list is not None) else True))
fail_list = re.findall(r'headers="LraFail".*?8">(.*?)</td>', summary_str)
logger.debug("fail_list is None: %s" % str(False if (fail_list is not None) else True))
if not pass_list or not fail_list or not transaction_name_list:
raise CustomError("%s 有未匹配到的数据" % self.name)
# TPS 从 TPS html 页面中获取, 先从 contents.html 获取到 TPS html 名称
# Respnse Time 从 Response Time html 页面中获取,先从 contents.html 获取到 Response Time html 名称
with open(content_html_path, "r", encoding='utf8') as content_html_file:
content_str = content_html_file.read()
tps_html_name_match = re.match(r'[\s\S]*href="(.*?)" Target.*?>Transactions per Second', content_str)
response_time_html_name_match = re.match(r'[\s\S]*href="(.*?)" Target.*?>Average Transaction Response Time'
, content_str)
if tps_html_name_match is None:
raise CustomError("%s 未找到 tps html 报告" % self.name)
elif response_time_html_name_match is None:
raise CustomError("%s 未找到 Respnse Time html 报告" % self.name)
tps_html_name = tps_html_name_match.group(1)
logger.debug("%s tps html name %s " % (os.path.basename(file), tps_html_name))
tps_html_path = file + r'\An_Report1' + os.path.sep + tps_html_name
logger.debug("%s tps html path %s " % (os.path.basename(file), tps_html_path))
response_time_html_name = response_time_html_name_match.group(1)
logger.debug("%s response time html name %s" % (os.path.basename(file), response_time_html_name))
response_time_html_path = file + r'\An_Report1' + os.path.sep + response_time_html_name
logger.debug("%s response time html path %s" % (os.path.basename(file), response_time_html_path))
self.fetch_tps(tps_html_path, tps_list)
self.fetch_resp_time(response_time_html_path, resp_avg_list, resp_min_list, resp_max_list)
# 长整数取到的数字带有逗号,例如1024是1,024,在取数字时,先将逗号去掉
for index in range(0, len(transaction_name_list)):
transaction_name = transaction_name_list[index]
logger.debug("transaction name %s" % transaction_name)
tps = tps_list[index]
logger.debug("tps %s" % tps)
pass_tsc = pass_list[index].replace(",", "")
logger.debug("pass transaction: %s" % pass_tsc)
fail_tsc = fail_list[index].replace(",", "")
logger.debug("fail transaction: %s" % fail_tsc)
# 时间转化成 ms 单位
resp_avg = resp_avg_list[index]
logger.debug("resp average time : %sms" % resp_avg)
resp_max = resp_max_list[index]
logger.debug("resp max time: %sms" % resp_max)
resp_min = resp_min_list[index]
logger.debug("resp min time: %sms" % resp_min)
all_tsc = str(int(fail_tsc) + int(pass_tsc))
error = round(int(fail_tsc)/int(all_tsc) * 100, 2)
# list: [Transaction, TPS, Error%, Response Time(average), Response Time(min), Response Time(max)]
data_list = [all_tsc, tps, error, resp_avg, resp_min, resp_max]
# dict:{transaction name:list}
self.result_dict[transaction_name] = data_list
logger.info("loadrunner 报告解析结束")
def fetch_tps(self, file_path, tps_list):
"""
提取 tps html 中 tps 的值
:param file_path: tps html 绝对路径
:param tps_list: 保存 tps 值的 list
"""
logger.debug("%s 开始提取 tps 数据" % self.name)
with open(file_path, "r", encoding='utf8') as tps_html_file:
tps_str = tps_html_file.read()
tps_table_list = re.findall(r'<tr class="legendRow">([\s\S]*?)</tr>', tps_str)
if not tps_table_list:
raise CustomError("%s 未匹配到 tps 数据" % self.name)
logger.debug("%s 共匹配到 %d 条tps记录" % (self.name, len(tps_table_list)))
for index in range(0, len(tps_table_list)):
tps_table_str = tps_table_list[index].replace("\n", "")
tps_data_list = tps_table_str.split("<td>", 5)
# 判断是否为成功记录,成功记录提取数据, 失败记录跳过
if tps_data_list[2][:-5].split(":")[1] != "Pass":
continue
logger.debug("%s 交易 transaction %s tps %s" % (
self.name, tps_data_list[2][:-5].split(":")[0], tps_data_list[4][:-5]))
tps_list.append(tps_data_list[4][:-5])
def fetch_resp_time(self, file_path, resp_avg_list, resp_min_list, resp_max_list):
"""
提取 response time html 中 各 response time 的值
:param file_path: response time html 绝对路径
:param resp_avg_list: 保存 response time average 值
:param resp_min_list: 保存 response time min 值
:param resp_max_list: 保存 response time max 值
"""
logger.debug("%s 开始提取 response time 数据" % self.name)
with open(file_path, "r", encoding='utf8') as response_time_html_file:
response_time_str = response_time_html_file.read()
response_time_table_list = re.findall(r'<tr class="legendRow">([\s\S]*?)</tr>', response_time_str)
if not response_time_table_list:
raise CustomError("%s 未匹配到 response time 数据" % self.name)
logger.debug("%s 共匹配到 %d 条 response time 记录" % (self.name, len(response_time_table_list)))
for index in range(0, len(response_time_table_list)):
response_time_table_str = response_time_table_list[index].replace("\n", "")
response_time_data_list = response_time_table_str.split("<td>", 6)
trasaction_name = response_time_data_list[2][:-5]
# 单位转化为 ms
response_time_average = round(float(response_time_data_list[4][:-5]) * 1000, 2)
logger.debug("%s 交易 transcation %s response time average: %.2fms" % (
self.name, trasaction_name, response_time_average))
resp_avg_list.append(response_time_average)
response_time_min = round(float(response_time_data_list[3][:-5]) * 1000, 2)
logger.debug("%s 交易 transcation %s response time min: %.2fms" % (
self.name, trasaction_name, response_time_min))
resp_min_list.append(response_time_min)
response_time_max = round(float(response_time_data_list[5][:-5]) * 1000, 2)
logger.debug("%s 交易 transcation %s response time max: %.2fms" % (
self.name, trasaction_name, response_time_max))
resp_max_list.append(response_time_max)
if __name__ == "__main__":
# nmonfile = r'D:\work\工具\nmon\71Vusr.nmon'
# nmonfile = r'C:\Users\zengjn22046\Desktop\大额贷记往账1Vuser.nmon'
# nmon = NmonAnalyse()
# nmon.file_analyse(nmonfile)
# print(nmon.cpu)
# print(nmon.mem)
# print(nmon.disk)
# print(nmon.net)
jmetrfile = r"C:\Users\zengjn22046\Desktop\yecxwz50"
jmeter = JmeterAnalyse()
jmeter.file_analyse(jmetrfile)
# loadrunner_file = r'C:\Users\zengjn\Desktop\Get\scenario\res'
# loadrunner = LoadRunnerAnalyse()
# loadrunner.file_analyse(loadrunner_file)
# print(loadrunner.result_dict) | [
"[email protected]"
] | |
83a367b329229b92477b1a88fa5d6ed59bce7bf3 | 9577a61a677142067b9c9f9b60e192e66904d8aa | /docs/images/plot.py | 7879ac19e771ef5dbef1c2416913ed4a67c88706 | [
"Apache-2.0"
] | permissive | QuantumBFS/YaoAD.jl | 9e9a8409e927564ebecaaae1218b9757e17c6abc | e591ccbe6927a907d6454458676e6035a966f09b | refs/heads/master | 2020-05-26T10:01:25.824430 | 2020-03-11T02:09:47 | 2020-03-11T02:09:47 | 188,196,347 | 2 | 1 | Apache-2.0 | 2020-02-23T21:54:54 | 2019-05-23T08:49:21 | Julia | UTF-8 | Python | false | false | 534 | py | #!/usr/bin/env python
import fire
from plotlib import *
class PLT(object):
def fig1(self, tp='pdf'):
nsite = 4
data = np.loadtxt("../data/loss_history_%d.dat"%nsite)
EG = -8
with DataPlt(figsize=(5,4), filename="fig1.%s"%tp) as dp:
plt.plot(np.arange(len(data)), data/nsite, lw=2)
plt.xlabel("Training Step")
plt.ylabel("Energy/Site")
plt.axhline(EG/nsite, ls='--', lw=2)
plt.tight_layout()
plt.xlim(0,200)
fire.Fire(PLT())
| [
"[email protected]"
] | |
80f65c753fd32363d2f48a628df5baae43e116fe | 22e6dcbebad329b32579e531af8b33bc657088c9 | /AtCoder/ABC111/ProbC.py | 7857a4a06712c062e6574447a30cb76569137f91 | [] | no_license | daigo0927/ProgrammingContest | a63b74bb79ece46181b03dc359bf665604b11ea5 | f54aa8f485ebfd30d5ee84fd74fa9e0661c2a7df | refs/heads/master | 2021-06-21T09:26:23.699668 | 2019-06-22T18:51:32 | 2019-06-22T18:51:32 | 132,655,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | n = int(input())
v = list(map(int, input().split()))
ans = 0
even = v[::2]
nums_e = {}
for e in even:
if not e in nums_e.keys():
nums_e[e] = 1
else:
nums_e[e] += 1
e_sorted = []
for key, value in nums_e.items():
e_sorted.append([value, key])
e_sorted = sorted(e_sorted, reverse = True) + [[0, 0-1]]
odd = v[1::2]
nums_o = {}
for o in odd:
if not o in nums_o.keys():
nums_o[o] = 1
else:
nums_o[o] += 1
o_sorted = []
for key, value in nums_o.items():
o_sorted.append([value, key])
o_sorted = sorted(o_sorted, reverse = True) + [[-1, 0]]
if e_sorted[0][1] != o_sorted[0][1]:
print(n - e_sorted[0][0] - o_sorted[0][0])
else:
ans1 = n - e_sorted[1][0] - o_sorted[0][0]
ans2 = n - e_sorted[0][0] - o_sorted[1][0]
print(min(ans1, ans2))
| [
"[email protected]"
] | |
9cc470b1f05b841aacd134bb19085958e4ce42f5 | ea1373d9a58ad198c15d35a6daddb4e06d21aa39 | /netScripts/3DCNN_Voxel/pyScripts/train_3DCNN_Atrous.py | 49f410789f33494add4cf245cea132bbeacb5b00 | [] | no_license | screnary/VoxSegNet | bb2778dfc460dfafdbd923f79755f7f0776dc36f | 264f2efc0a589018a1fc68c111626beacbe095a5 | refs/heads/master | 2020-09-04T03:41:47.067129 | 2019-11-07T06:34:29 | 2019-11-07T06:34:29 | 219,649,857 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,507 | py | """ 3DCNN, Unet and others train """
""" WZJ:20171013-Feature Extraction Network-(Pretrain 3DCNN)"""
""" WZJ:20180122-Feature Extraction Network-(Pretrain 3DCNN_Unet_extract)"""
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import json
import os
import sys
import pdb
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
# sys.path.append(os.path.dirname(BASE_DIR))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
Common_SET_DIR = os.path.join(BASE_DIR, '../CommonFile')
sys.path.append(Common_SET_DIR)
import globals as g_
# Network Training Parameters Setting
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='3DCNN_Atrous', help='Model name: baseline segmentation network 3DCNN [default: 3DCNN_model]')
parser.add_argument('--log_dir', default='log-3DCNN_Atrous', help='Log dir [default: log]')
# parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--vox_size', type=int, default=48, help='voxel space size [32/48/128] [default: 32]')
parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 32]') # for extract, defult 8; if RoI, maybe less
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--wd', type=float, default=0.0005, help='Weight Decay [default: 0.0]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=8000, help='Decay step for lr decay [default: 50000, 200000]')
parser.add_argument('--decay_rate', type=float, default=0.75, help='Decay rate for lr decay [default: 0.8]')
parser.add_argument('--Continue_MODE', type=bool, default=False, help='if train continue [default: False, from scratch]')
parser.add_argument('--Continue_Epoch', type=int, default=100, help='train continue from epoch [default: 50]')
parser.add_argument('--clsname', default='')
parser.add_argument('--atrous_block_num', type=int, default=3)
parser.add_argument('--withEmpty', dest='ignore_Empty', action='store_false')
parser.add_argument('--noEmpty', dest='ignore_Empty', action='store_true')
parser.set_defaults(ignore_Empty=False)
# parser.add_argument('--ignore_Empty', type=bool, default=False)
FLAGS = parser.parse_args()
# CLASSNAME = FLAGS.clsname
# print(CLASSNAME)
# # CLASSNAME = g_.CATEGORY_NAME
# part_num = g_.NUM_CLASSES - 1 # part num, Motorbike is 6; Earphone is 3; Rocked is 3;
CLASSNAME = FLAGS.clsname
part_num = g_.part_dict[CLASSNAME]
print('class name is ', CLASSNAME, '\tpart num is ', part_num,
'\tignore_Empty:', FLAGS.ignore_Empty,
'\tContinue_MODE:', FLAGS.Continue_MODE)
if CLASSNAME == 'Rocket' or CLASSNAME == 'Airplane':
upright = 'z'
else:
upright = 'y'
BATCH_SIZE = FLAGS.batch_size
VOX_SIZE = FLAGS.vox_size
# NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model + '.py')
LOG_DIR = os.path.join(FLAGS.log_dir+g_.Data_suf, str(VOX_SIZE), CLASSNAME)
if not FLAGS.ignore_Empty:
LOG_DIR = os.path.join(FLAGS.log_dir+g_.Data_suf, str(VOX_SIZE),
CLASSNAME+'-withBG'+'-ABlock'+str(FLAGS.atrous_block_num))
if not os.path.exists(FLAGS.log_dir+g_.Data_suf):
os.mkdir(FLAGS.log_dir+g_.Data_suf)
os.mkdir(os.path.join(FLAGS.log_dir+g_.Data_suf, str(VOX_SIZE)))
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
MODEL_STORAGE_PATH = os.path.join(LOG_DIR, "trained_models")
if not os.path.exists(MODEL_STORAGE_PATH):
os.mkdir(MODEL_STORAGE_PATH)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train_3DCNN_Atrous.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
Continue_MODE = FLAGS.Continue_MODE # "train_from_scratch"--False or "train"--True
Continue_Epoch = FLAGS.Continue_Epoch
checkpoint_dir = os.path.join(FLAGS.log_dir+g_.Data_suf, str(VOX_SIZE),
CLASSNAME+'-withBG'+'-ABlock'+str(FLAGS.atrous_block_num), "trained_models")
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
# ShapeNet official train/val split
# h5_data_dir =
TRAIN_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf+'/'+'0.'+CLASSNAME+'_filelistset/'+'train_hdf5_file_list.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf+'/'+'0.'+CLASSNAME+'_filelistset/'+'test_hdf5_file_list.txt'))
# Categories name parsing
color_map_file = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf+'/part_color_mapping.json')
color_map = json.load(open(color_map_file, 'r'))
all_obj_cats_file = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf+'/all_object_categories.txt')
fin = open(all_obj_cats_file, 'r')
lines = [line.rstrip() for line in fin.readlines()]
all_obj_cats = [(line.split()[0], line.split()[1]) for line in lines]
fin.close()
all_cats = json.load(open(os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf+'/overallid_to_catid_partid.json'), 'r'))
NUM_CATEGORIES = 16
NUM_PART_CATS = len(all_cats)
# # ---------------the functions definitions----------------
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
# decayed_learning_rate = learning_rate * decay_rate^(global_step / decay_step)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE,
batch * BATCH_SIZE, # current index into the dataset
DECAY_STEP,
DECAY_RATE,
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # clip the learning rate
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch * BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def augment_to_target_num(fea, t_num):
assert(fea.shape[0] <= t_num)
cur_len = fea.shape[0]
res = np.array(fea)
while cur_len < t_num:
res = np.concatenate((res, fea)) # axis=0
cur_len += fea.shape[0]
return res[:t_num, ...]
def train():
with tf.Graph().as_default():
with tf.device('/gpu:' + str(GPU_INDEX)):
volumes_ph, seg_ph = MODEL.placeholder_inputs(BATCH_SIZE, VOX_SIZE)
is_training_ph = tf.placeholder(tf.bool, shape=())
print(is_training_ph)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0) # in the train_op operation, increased by 1
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
if FLAGS.ignore_Empty:
pred, feature = MODEL.get_model(volumes_ph, is_training_ph,
part_num, bn_decay=bn_decay, weight_decay=FLAGS.wd)
loss = MODEL.get_loss(pred, seg_ph, part_num)
else:
if FLAGS.atrous_block_num == 1:
# 1 block [1,2,3]
pred, feature = MODEL.get_model_1block(volumes_ph, is_training_ph,
part_num+1, bn_decay=bn_decay, weight_decay=FLAGS.wd)
elif FLAGS.atrous_block_num == 2:
# 2 block [1,2,3] [1,3,5]
pred, feature = MODEL.get_model_2block_v1(volumes_ph, is_training_ph,
part_num+1, bn_decay=bn_decay, weight_decay=FLAGS.wd)
elif FLAGS.atrous_block_num == 3:
# 3 block [1,2,3] [1,3,5] [2,3,7]
pred, feature = MODEL.get_model_3block(volumes_ph, is_training_ph,
part_num+1, bn_decay=bn_decay, weight_decay=FLAGS.wd)
loss = MODEL.get_loss_withback(pred, seg_ph, part_num+1)
tf.summary.scalar('loss', loss)
if FLAGS.ignore_Empty:
seg_new = tf.subtract(seg_ph, tf.constant(1, dtype=tf.float32))
ignore_void = tf.constant(-1, dtype=tf.float32)
mask_valid = tf.cast(tf.not_equal(seg_new, ignore_void), dtype=tf.float32)
correct = tf.equal(tf.argmax(pred, -1), tf.to_int64(seg_new)) # argmax value >= 0, so ignore -1
accuracy_per_instance = tf.reduce_sum(
tf.cast(correct, tf.float32), axis=[1,2,3])/\
tf.reduce_sum(mask_valid, axis=[1,2,3])
accuracy = tf.reduce_mean(accuracy_per_instance)
else:
seg_new = seg_ph
ignore_void = tf.constant(0, dtype=tf.float32)
mask_valid = tf.cast(tf.not_equal(seg_new, ignore_void), dtype=tf.float32)
correct = tf.cast(
tf.equal(tf.argmax(pred, -1), tf.to_int64(seg_new)), tf.float32)\
*mask_valid # argmax value >= 0, so ignore -1
accuracy_per_instance = tf.reduce_sum(
tf.cast(correct, tf.float32), axis=[1,2,3])/\
tf.reduce_sum(mask_valid, axis=[1,2,3])
accuracy = tf.reduce_mean(accuracy_per_instance)
tf.summary.scalar('accuracy', accuracy)
# pdb.set_trace()
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables
saver = tf.train.Saver(max_to_keep=50)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_ph: True})
# Unet_vars = tf.trainable_variables()
# for v in Unet_vars:
# print('var_name is : ', v.name) # len=43
# pdb.set_trace()
# all_vars = tf.all_variables()
# for v in all_vars:
# print('var_name is : ', v.name) # len=149
# pdb.set_trace()
# continue training from last training
if Continue_MODE:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Continue training from the model {}".format(
ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
ops = {'volumes_ph': volumes_ph,
'seg_ph': seg_ph,
'is_training_ph': is_training_ph,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
if not os.path.exists(MODEL_STORAGE_PATH):
os.mkdir(MODEL_STORAGE_PATH)
epoch_start = 0
if Continue_MODE:
epoch_start = Continue_Epoch # set by human
# eval_one_epoch(sess, ops, test_writer)
IoU_acc_max = 0.0
epoch_use = 0
for epoch in range(MAX_EPOCH):
log_string('\n>>>>>>>>>> Training for the epoch %d/%d ...' % (epoch+epoch_start+1, MAX_EPOCH+epoch_start))
train_one_epoch(sess, ops, train_writer)
if (epoch+epoch_start+1) % 1 == 0:
log_string('<<<<<<<<<< Testing on the test dataset ...')
sys.stdout.flush()
IoU_acc = eval_one_epoch(sess, ops, test_writer)
save_Flag = False
if IoU_acc >= IoU_acc_max:
IoU_acc_max = IoU_acc
epoch_use = epoch+epoch_start+1
save_Flag = True
# Save the variables to disk
# if (save_Flag is True) or ((epoch+epoch_start+1) >= 5 and (epoch+epoch_start+1) % 5 == 0):
if (save_Flag is True) or epoch+epoch_start+1==MAX_EPOCH:
save_path = saver.save(sess, os.path.join(MODEL_STORAGE_PATH, "model_epoch_" + str(epoch+epoch_start+1) + ".ckpt"))
log_string("Model saved in file: %s" % save_path)
log_string("use model: %s, iou is %.4f" % ('epoch_num_'+str(epoch_use), IoU_acc_max))
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
for fn in range(len(TRAIN_FILES)):
cur_train_filename = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf, str(VOX_SIZE), TRAIN_FILES[train_file_idxs[fn]])
log_string('--- Loading train file ' + TRAIN_FILES[train_file_idxs[fn]] + '---')
current_data, current_seg, _ = provider.load_h5_volumes_data(cur_train_filename)
# pdb.set_trace()
num_data = current_data.shape[0]
num_batches = num_data // BATCH_SIZE
# # shuffle the training data
idx = np.arange(num_data)
np.random.shuffle(idx)
current_data = current_data[idx, ...]
current_seg = current_seg[idx, ...] # shape is [b, vsize, vsize, vsize, 1]
current_seg = np.squeeze(current_seg) # to the same dim of placeholder [b, vsize, vsize, vsize]
current_seg = current_seg.astype(np.float32)
total_accuracy = 0.0
total_seen = 0.0
loss_sum = 0.0
total_acc_iou = 0.0
if FLAGS.ignore_Empty:
# from 0 to part_num
iou_oids = range(part_num) # for Motorbike part detection
else:
# from 0 to part_num+1
iou_oids = range(1, part_num+1, 1)
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
if start_idx % 100 == 0:
print('%d/%d ...' % (start_idx, num_data))
input_data = current_data[start_idx:end_idx, ...]
input_seg = current_seg[start_idx:end_idx, ...]
# augment data by rotation along upright axis
#input_data, input_seg = provider.rotate_voxel_data(
# input_data, input_seg, axis=upright)
# pdb.set_trace()
feed_dict = {ops['volumes_ph']: input_data,
ops['seg_ph']: input_seg,
ops['is_training_ph']: is_training}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.reshape(np.argmax(pred_val, -1), (BATCH_SIZE, VOX_SIZE, VOX_SIZE, VOX_SIZE))
if FLAGS.ignore_Empty:
cur_seg_new = input_seg - 1.0
else:
cur_seg_new = input_seg
# pred is from 0 value, but seg gt is from 1 value (0 for the back)
cur_voxel = np.reshape(input_data,
(BATCH_SIZE, VOX_SIZE, VOX_SIZE, VOX_SIZE))
mask = cur_voxel>0
mask = mask.astype(np.float32)
correct = np.sum((pred_val == cur_seg_new)*mask, axis=(1,2,3))
seen_per_instance = np.sum(mask, axis=(1,2,3))
acc_per_instance = np.array(correct) / np.array(seen_per_instance)
total_accuracy += np.sum(acc_per_instance)
total_seen += BATCH_SIZE
loss_sum += loss_val
iou_log = '' # iou details string
intersect_mask = np.int32((pred_val == cur_seg_new)*mask) # [B,V,V,V]
# pdb.set_trace()
for bid in range(BATCH_SIZE):
# bid # batch id
total_iou = 0.0 # for this 3D shape.
intersect_mask_bid = intersect_mask[bid, ...]
mask_bid = mask[bid, ...]
pred_val_bid = pred_val[bid, ...]
cur_seg_bid = cur_seg_new[bid, ...]
for oid in iou_oids:
n_pred = np.sum((pred_val_bid == oid) * mask_bid) # only the valid grids' pred
# n_pred = np.sum(seg_pred_val == oid)
n_gt = np.sum(cur_seg_bid == oid)
n_intersect = np.sum(np.int32(cur_seg_bid == oid) * intersect_mask_bid)
n_union = n_pred + n_gt - n_intersect
iou_log += '_pred:' + str(n_pred) + '_gt:' + str(n_gt) + '_intersect:' + str(n_intersect) + '_union:' + str(n_union) + '_'
if n_union == 0:
total_iou += 1
iou_log += '_:1\n'
else:
total_iou += n_intersect * 1.0 / n_union # sum across parts
iou_log += '_:'+str(n_intersect*1.0/n_union)+'\n'
avg_iou = total_iou / len(iou_oids) # average iou across parts, for one object
# pdb.set_trace()
total_acc_iou += avg_iou
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_accuracy / float(total_seen)))
log_string('train IoU accuracy: %f\n-----------------------------'
% (total_acc_iou / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
total_accuracy = 0.0
total_seen = 0.0
loss_sum = 0.0
total_acc_iou = 0.0
if FLAGS.ignore_Empty:
# from 0 to part_num
iou_oids = range(part_num)
else:
# from 0 to part_num+1
iou_oids = range(1, part_num+1, 1)
# total_seen_class = np.zeros((NUM_CATEGORIES)).astype(np.float32)
# total_accuracy_class = np.zeros((NUM_CATEGORIES)).astype(np.float32)
for fn in range(len(TEST_FILES)):
cur_test_filename = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf, str(VOX_SIZE), TEST_FILES[fn])
log_string('----Loading Validation file ' + TEST_FILES[fn] + '----')
# pdb.set_trace()
current_data, current_seg, _ = provider.load_h5_volumes_data(cur_test_filename)
# current_label = np.squeeze(current_label)
current_seg = np.squeeze(current_seg)
current_seg = current_seg.astype(np.float32)
BATCH_SIZE_eval = BATCH_SIZE
num_data = current_data.shape[0]
num_batches = np.ceil(num_data / BATCH_SIZE_eval).astype(int)
# pdb.set_trace()
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE_eval
end_idx = (batch_idx + 1) * BATCH_SIZE_eval
if min(num_data-start_idx, end_idx-start_idx) < BATCH_SIZE_eval:
input_data = augment_to_target_num(current_data[start_idx:min(end_idx, num_data), ...], BATCH_SIZE_eval)
input_seg = augment_to_target_num(current_seg[start_idx:min(end_idx, num_data), ...], BATCH_SIZE_eval)
else:
input_data = current_data[start_idx:end_idx, ...]
input_seg = current_seg[start_idx:end_idx, ...]
feed_dict = {ops['volumes_ph']: input_data,
ops['seg_ph']: input_seg,
ops['is_training_ph']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
if math.isnan(loss_val):
print('Detected NaN')
# pdb.set_trace()
pred_val = np.reshape(np.argmax(pred_val, -1), (BATCH_SIZE_eval, VOX_SIZE, VOX_SIZE, VOX_SIZE)) # B*Vox_size*Vox_size*Vox_size
if FLAGS.ignore_Empty:
cur_seg_new = input_seg - 1.0
else:
cur_seg_new = input_seg
mask = np.reshape(input_data > 0,
(BATCH_SIZE, VOX_SIZE, VOX_SIZE, VOX_SIZE))
mask = mask.astype(np.float32) # [B,V,V,V]
correct = np.sum((pred_val == cur_seg_new)*mask, axis=(1,2,3))
seen_per_instance = np.sum(mask, axis=(1,2,3))
acc_per_instance = np.array(correct) / np.array(seen_per_instance)
total_accuracy += np.sum(acc_per_instance)
total_seen += BATCH_SIZE_eval
loss_sum += (loss_val * BATCH_SIZE_eval)
iou_log = '' # iou details string
intersect_mask = np.int32((pred_val == cur_seg_new)*mask) # [B,V,V,V]
# pdb.set_trace()
for bid in range(BATCH_SIZE_eval):
# bid # batch id
total_iou = 0.0 # for this 3D shape.
intersect_mask_bid = intersect_mask[bid, ...]
mask_bid = mask[bid, ...]
pred_val_bid = pred_val[bid, ...]
cur_seg_bid = cur_seg_new[bid, ...]
for oid in iou_oids:
n_pred = np.sum((pred_val_bid == oid) * mask_bid) # only the valid grids' pred
# n_pred = np.sum(seg_pred_val == oid)
n_gt = np.sum(cur_seg_bid == oid)
n_intersect = np.sum(np.int32(cur_seg_bid == oid) * intersect_mask_bid)
n_union = n_pred + n_gt - n_intersect
iou_log += '_pred:' + str(n_pred) + '_gt:' + str(n_gt) + '_intersect:' + str(n_intersect) + '_union:' + str(n_union) + '_'
if n_union == 0:
total_iou += 1
iou_log += '_:1\n'
else:
total_iou += n_intersect * 1.0 / n_union # sum across parts
iou_log += '_:'+str(n_intersect*1.0/n_union)+'\n'
avg_iou = total_iou / len(iou_oids) # average iou across parts, for one object
# pdb.set_trace()
total_acc_iou += avg_iou
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f' % (total_accuracy / float(total_seen)))
log_string('eval IoU accuracy: %f' % (total_acc_iou / float(total_seen)))
return total_acc_iou / float(total_seen)
if __name__ == "__main__":
train()
LOG_FOUT.close()
| [
"[email protected]"
] | |
e02ff52237bff0755f85da47d9f1621c003504e9 | 6e9077236ad68b60e1076c3f1ae4a4664235f5b1 | /src/collocate.py | ad7070f75cf2d8e34478d4727dd1ac6f48ed3228 | [] | no_license | julieweeds/Compounds | 084fc203c95a7ef5943c2093e8bced73b79a0c02 | 16b9bdc4a8aafee1e5b041e59b8bcf19954a48b8 | refs/heads/master | 2020-06-04T09:42:05.368744 | 2014-06-26T16:22:18 | 2014-06-26T16:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,082 | py | __author__ = 'juliewe'
import sys,os,random
from operator import itemgetter
from conf import configure
class TaggingException(Exception):
pass
def untag(astring,achar='/'):
parts=astring.split(achar)
if len(parts)==2:
return(parts[0],parts[1])
elif len(parts)>1:
tag=parts.pop()
word=parts[0]
for part in parts[1:]:
word = word+'-'+part
return (word,tag)
else:
raise TaggingException
class Collocates:
def __init__(self,config):
self.clist=[]
self.fdict={}
#self.midict={}
self.moddict={}
self.modfdict={}
self.entrylist=[]
self.entrydict={}
self.noundict={}
self.freqthresh=config['freqthresh']
self.entrythresh=config['entrythresh']
self.featurematch=config['featurematch']
self.tagmatch=config['tagmatch']
self.stopwordlimit=config['stopwordlimit']
self.testing=config['testing']
self.sample=config['sample']
self.parameters=config
if self.testing:
self.linestop=10
else:
self.linestop=100
random.seed(42)
self.sorted=False
# def makemoddict(self):
# filepath=os.path.join(self.parameters['parentdir'],self.parameters['altdatadir'],self.parameters['freqfile'])
# with open(filepath,'r') as instream:
# print "Reading "+filepath
# linesread=0
# for line in instream:
# linesread+=1
# fields=line.rstrip().split('\t')
# try:
# noun = fields[0]
# self.moddict[noun]=1
# if self.noundict.get(noun,0)>0:
# while len(fields[1:])>0:
# freq=float(fields.pop())
# feature=fields.pop()
# if freq>self.freqthresh:
# parts=feature.split(':')
# if parts[0]==self.parameters['inversefeatures'][self.featurematch] and self.entrydict.get(parts[1],0)>0:
# label=parts[1]+self.featurematch+':'+noun
# self.modfdict[label]=freq
# except TaggingException:
# print "Ignoring "+line
# if linesread%self.linestop==0:
# print "Read "+str(linesread)+" lines"
# if self.testing: break
# print "Size of mod freq dict is "+str(len(self.modfdict.keys()))
def processfreqfile(self):
self.inversedict={}
if len(self.moddict.keys())>0:
usemoddict=True
else:
usemoddict=False
filepath = os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['freqfile'])
outpath = filepath+".cached"
print "Reading "+filepath
linesread=0
#print self.parameters['inversefeatures'][self.featurematch]
with open(filepath,'r') as instream:
with open(outpath,'w')as outstream:
for line in instream:
fields=line.rstrip().split('\t')
entry=fields[0]
if entry in self.entrylist:
outstream.write(line)
while len(fields[1:])>0:
freq=float(fields.pop())
feature=fields.pop()
if freq> self.freqthresh:
parts=feature.split(':')
if parts[0]==self.featurematch:
tag=parts[1].split('/')[1]
if tag==self.tagmatch:
label=entry+':'+feature
self.fdict[label]=freq
self.noundict[parts[1]]=1
#print parts[0]
#if parts[0]==self.parameters['inversefeatures'][self.featurematch]: #amod-DEP has not been included as possible feature of adjective so cannot pick up these internal modifiers here
# self.inversedict[entry]=self.inversedict.get(entry,0)+freq
linesread+=1
if linesread%self.linestop==0:
print "Read "+str(linesread)+" lines"
if self.testing:
break
print "Size of freq dict is "+str(len(self.fdict))
print "Size of noun dict is "+str(len(self.noundict))
# print self.noundict
# print self.inversedict
def processassocfile(self):
filepath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['assocfile'])
outpath=filepath+".cached"
print "Reading "+filepath
linesread=0
with open(filepath,'r') as instream:
with open(outpath,'w') as outstream:
for line in instream:
fields=line.rstrip().split('\t')
entry=fields[0]
if entry in self.entrylist:
outstream.write(line)
while len(fields[1:])>0:
score=float(fields.pop())
feature=fields.pop()
parts=feature.split(':')
if parts[0]==self.featurematch:
label=entry+':'+feature
freq=self.fdict.get(label,0)
if freq>self.freqthresh:
# altfreq=self.modfdict.get(label,0)
# if altfreq>self.freqthresh:
#self.midict[label]=score
self.clist.append((label,freq,score))
else:
pass
#print "Ignoring low frequency "+label+" f1 = "+str(freq)
linesread+=1
if linesread%self.linestop==0:
print "Read "+str(linesread)+" lines"
if self.testing:
break
def processentries(self):
filepath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['entryfile'])
print "Reading "+filepath
linesread=0
mylist=[]
with open(filepath,'r') as instream:
for line in instream:
try:
fields=line.rstrip().split('\t')
if float(fields[1])>self.entrythresh:
entry=fields[0]
if len(entry)>self.stopwordlimit:
mylist.append(entry)
except TaggingException:
print "Warning: ignoring ",line
continue
linesread+=1
#if linesread%10000==0:
#print "Read "+str(linesread)+" lines"
print str(len(mylist))+" words over entry threshold"
random.shuffle(mylist)
self.entrylist=list(mylist[0:self.sample])
return
def viewlist(self):
total= len(self.clist)
print total
if not self.sorted:
self.clist.sort(key=itemgetter(2),reverse=True)
self.sorted=True
print "Top 10: ", self.clist[0:10]
print "Bottom 10: ", self.clist[total-10:total]
def outputlist(self):
headdict={}
headover={}
thresh=self.parameters['upperfreqthresh']
if not self.sorted:
self.clist.sort(key=itemgetter(2),reverse=True)
self.sorted=True
filepath=os.path.join(parameters['parentdir'],parameters['datadir'],parameters['collocatefile'])
with open(filepath,'w') as outstream:
for tuple in self.clist:
label=tuple[0]
head=label.split(':')[0]
outstream.write(label+'\t'+str(tuple[1])+'\t'+str(tuple[2])+'\n')
headdict[head]=headdict.get(head,0)+1
if tuple[1] > thresh-1:
headover[head]=headover.get(head,0)+1
print "Number of phrases per head"
print len(headdict.keys()),headdict
print "Number of phrases with frequency over "+str(thresh)+" per head"
print len(headover.keys()),headover
soverlist=[]
for adj in headdict.keys():
if headdict[adj]>100:
soverlist.append(adj)
print "Adjectives with more than 100 phrases with frequency over "+str(self.freqthresh)+": " +str(len(soverlist))
print soverlist
soverlist=[]
for adj in headover.keys():
if headover[adj]>100:
soverlist.append(adj)
print "Adjectives with more than 100 phrases with frequency over 100: " +str(len(soverlist))
print soverlist
opath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],'adjectives')
with open(opath,'w') as outstream:
for adj in soverlist:
outstream.write(adj+'\n')
class SourceCollocates(Collocates):
def processboledaline(self,line):
fields=line.rstrip().split('\t')
phrase=fields[0]
type=fields[1]
parts=phrase.split('_')
mod=parts[0]
head=parts[1]
try:
noun=untag(head,'-')[0]
adj=untag(mod,'-')[0]
if self.parameters['featurematch']=='amod-DEP':
label=noun+'/N:'+self.parameters['featurematch']+':'+adj+'/J'
if not self.parameters['allheads']:
self.entrylist.append(adj)
self.entrylist.append(noun)
elif self.parameters['featurematch']=='amod-HEAD':
label=adj+'/J:'+self.parameters['featurematch']+':'+noun+'/N'
if not self.parameters['allheads']:
self.entrylist.append(noun)
self.entrylist.append(adj)
#print label
#self.srcdict[label]=type
self.srcposdict[label]=self.linesread
self.revposdict[self.linesread]=label
self.srctypedict[label]=type
#self.modlist.append(adj)
except TaggingException:
print "Ignoring "+line
def processlistline(self,line):
fields=line.rstrip().split(' ')
try:
adj=fields[0]
type=fields[1]
self.entrylist.append(adj)
self.entrydict[adj]=1
self.srcposdict[adj]=self.linesread
self.revposdict[self.linesread]=adj
self.srctypedict[adj]=type
except TaggingException:
print "Ignoring "+line
def processsource(self):
self.srctypedict={}
self.srcposdict={}
self.revposdict={}
self.entrylist=[]
#self.modlist=[]
filepath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['source'])
with open(filepath,'r') as instream:
print "Reading "+filepath
self.linesread=0
for line in instream:
self.linesread+=1
if self.parameters['adjlist']:
self.processlistline(line)
else:
self.processboledaline(line)
self.largest=self.linesread
def mergelists(self):
reslist=[]
for (label,freq,pmi) in self.clist:
pos = self.srcposdict.get(label,0)
if pos > 0:
type = self.srctypedict[label]
reslist.append((pos,label,freq,pmi,type))
reslist.sort()
filepath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['mergefile'])
counter=1
missinglist=[]
under100=[]
with open(filepath,'w') as outstream:
for tuple in reslist:
while counter<tuple[0]:
label=self.revposdict[counter]
if self.testing:
print label, self.srctypedict[label],0,0
outstream.write(label+'\t'+self.srctypedict[label]+'\t0\t0\n')
counter+=1
missinglist.append(label)
outstream.write(tuple[1]+'\t'+tuple[4]+'\t'+str(tuple[2])+'\t'+str(tuple[3])+'\n')
if tuple[2] < 100:
under100.append(label)
if self.testing:
print tuple[1],tuple[4],tuple[2],tuple[3]
counter+=1
while counter<self.largest+1:
label=self.revposdict[counter]
if self.testing:
print label, self.srctypedict[label],0,0
outstream.write(label+'\t'+self.srctypedict[label]+'\t0\t0\n')
counter+=1
missinglist.append(label)
print "Missing phrases: "+str(len(missinglist))
print missinglist
print "Frequency under 100: "+str(len(under100))
print under100
def divide(self):
#self.clist contains (label,freq,pmi) where label is adj:rel:noun where adj is in list
#convert to matrix
cmat=[]
row=[]
adj=''
for (label,freq,pmi) in self.clist:
parts=label.split(':')
thisadj=parts[0]
if thisadj != adj and len(row)>0:
cmat.append(row)
row=[]
adj=thisadj
row.append((label,freq,pmi))
if len(row)>0:
cmat.append(row)
print len(cmat)
trainingpath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['collocatefile'][0])
testingpath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['collocatefile'][1])
sparepath=os.path.join(self.parameters['parentdir'],self.parameters['datadir'],self.parameters['collocatefile'][2])
with open(trainingpath,'w') as training:
with open(testingpath,'w') as testing:
with open(sparepath,'w') as sparestream:
for row in cmat:
print len(row), row[0]
random.shuffle(row)
self.writetofile(row[0:50],training)
self.writetofile(row[50:100],testing)
self.writetofile(row[100:],sparestream)
def writetofile(self,alist,outstream):
if len(alist)==0:
return
else:
try:
adj=alist[0][0].split(':')[0]
type = self.srctypedict[adj]
for (label,freq,pmi) in alist:
outstream.write(label+'\t'+str(freq)+'\t'+str(pmi)+'\t'+type+'\n')
except TaggingException:
print "Tagging error ",alist
def go(parameters):
mycollocates = Collocates(parameters)
mycollocates.processentries()
print mycollocates.entrylist
#exit()
mycollocates.processfreqfile()
mycollocates.processassocfile()
mycollocates.viewlist()
mycollocates.outputlist()
def analyse(parameters):
mycollocates=SourceCollocates(parameters)
mycollocates.processsource()
mycollocates.processfreqfile()
# if mycollocates.parameters['adjlist']:
# mycollocates.makemoddict()
mycollocates.processassocfile()
if mycollocates.parameters['adjlist']:
mycollocates.divide()
else:
mycollocates.viewlist()
mycollocates.mergelists()
mycollocates.outputlist()
if __name__=='__main__':
parameters=configure(sys.argv)
if parameters['usesource']:
analyse(parameters)
else:
go(parameters)
| [
"[email protected]"
] | |
88f48c50255fd52358931c5475225ecf37aff576 | 8994e080d0ec6ef7a680ccc7de8acec475e8f06a | /project_euler/problems/problems1_10.py | 93ecd6bdaaafb14d6d42dae4e2e7cd6f388b9e48 | [] | no_license | lffsantos/codigos_demos | 8bc209ba4378082dfa7e831e2f7d39d4f7cb0239 | bd32a12554d26bccf7f135d939698b332461f50f | refs/heads/master | 2021-01-10T08:43:12.457516 | 2015-11-16T17:15:59 | 2015-11-16T17:15:59 | 45,993,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | """
`` sum_multiples_3_5`` soma valores multiplos de 3 e 5 de uma lista::
>>> sum_values = P1(1000)
>>> result = sum_values.sum_multiples_3_5()
>>> print result
233168
"""
from libutils import check_primos_number
class P1:
def __init__(self, number):
self.number = number
def sum_multiples_3_5(self):
return sum([i for i in xrange(self.number) if i%3==0 or i%5==0])
class P2:
def sum_values_fibonacci(self, max_value):
penult, last_value = 1, 1
soma = 0
while last_value < max_value:
if last_value % 2 == 0:
soma += last_value
penult, last_value = last_value, last_value + penult
return soma
class P3:
def largest_value_primos_factor(self, number):
lista = []
largest_value = 2
i = 2
while True:
if number%i == 0 and i not in lista:
largest_value = i
number /= i
if number == 1:
break
i += 1
return largest_value
class P4:
def largest_palindrome_product(self):
last_n = 999
largest_palindrome = 0
for i in xrange(999, 99,-1):
for j in xrange(last_n, 99,-1):
if self.check_palindrome(i * j) and largest_palindrome < i*j:
largest_palindrome = i*j
last_n -= 1
return largest_palindrome
def check_palindrome(self, number):
str_number = str(number)
if str_number == str_number[::-1]:
return True
else:
return False
class P5:
# slow code
def smallest_multiple(self, divisible):
number = divisible
cont = 0
i = 1
while True:
if number%i == 0:
cont +=1
i +=1
else:
number += 2
i = 1
cont = 0
if i == divisible:
i = 1
if cont == divisible:
break
return number
class P6:
def sum_square_difference(self, max_value):
soma_1, soma_2 = 0, 0
for i in xrange(max_value+1):
soma_1 += pow(i, 2)
soma_2 += i
soma_2 = pow(soma_2,2)
diff = soma_2 - soma_1
return diff
class P7:
def value_n_prime(self, position):
cont = 1
number = 1
while True:
if check_primos_number(number):
cont +=1
if cont == position:
break
number +=2
return number
class P8:
def __init__(self):
self.serie = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
def largest_product_in_serie(self, number):
serie_list = []
i = 0
max_product = 0
for s in self.serie:
serie_list.append(s)
i+=1
if len(serie_list) > number-1:
temp_product = eval('*'.join(serie_list[i-number:i]))
if max_product < temp_product:
max_product = temp_product
return max_product
| [
"[email protected]"
] | |
8be6496c26ae7209b534e5a00a5f87083b90ed55 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1674486_0/Python/Epcylon/diamonds.py | 2e2157e6604aa88f7abc29801ac943c856abe3e6 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | #!/usr/bin/python
"""
Author: Morten Lied Johansen - [email protected]
Google CodeJam 2012
Round: 1C
Problem: Diamond inheritance
"""
import collections
import os
import sys
import time
from pprint import pformat
from cStringIO import StringIO
import unittest
import logging
from logging import info, debug, error
from multiprocessing import Pool
# Set up basic logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
def yield_line_of_items(reader):
for x in reader.readline().strip().split():
yield x
def read_line_of_items(reader):
return list(yield_line_of_items(reader))
def yield_line_of_ints(reader):
for i in yield_line_of_items(reader):
yield int(i)
def read_line_of_ints(reader):
return list(yield_line_of_ints(reader))
def yield_lines_of_items(reader, num=1):
for i in range(num):
yield read_line_of_items(reader)
def read_lines_of_items(reader, num=1):
return list(yield_lines_of_ints(reader, num))
def yield_lines_of_ints(reader, num=1):
for i in range(num):
yield read_line_of_ints(reader)
def read_lines_of_ints(reader, num=1):
return list(yield_lines_of_ints(reader, num))
def run_in_process(case_solver):
return case_solver.solve()
class Solver(object):
def __init__(self, input_name, use_mp=False):
self.input_name = input_name
self.output_name = self._make_output_name()
self.use_mp = use_mp
def _make_output_name(self):
basename, ext = os.path.splitext(self.input_name)
output_name = basename + ".out"
return output_name
def open_output(self):
return open(self.output_name, "w")
def open_input(self):
return open(self.input_name, "r")
def main(self):
input = self.open_input()
output = self.open_output()
self.solve(input, output)
def solve(self, input, output):
number_of_cases = read_line_of_ints(input)[0]
solvers = list()
for casenr in xrange(number_of_cases):
solvers.append(CaseSolver(casenr+1, *self.read_case_input(input)))
if self.use_mp:
p = Pool()
solutions = p.map(run_in_process, solvers)
else:
solutions = map(run_in_process, solvers)
for casenr, result in sorted(solutions):
output.write("Case #%d: %s\n" % (casenr, result))
output.flush()
def read_case_input(self, input):
number_of_classes = read_line_of_ints(input)[0]
debug("Number of classes: %d", number_of_classes)
nodes = [Node(0, [])]
roots = set(range(1, number_of_classes+1))
for id in xrange(1, number_of_classes+1):
node_list = read_line_of_ints(input)
if node_list[0]:
parent_ids = node_list[1:]
node = Node(id, parent_ids)
roots.difference_update(parent_ids)
else:
node = Node(id, list())
nodes.append(node)
debug("Roots: %r", roots)
debug("Nodes: %r", nodes)
return nodes, roots
class Node(object):
def __init__(self, id, parent_ids):
self.id = id
self.parent_ids = parent_ids
self.visited_by_root = collections.defaultdict(bool)
def __repr__(self):
return "<Node(%d, %r)>" % (self.id, self.parent_ids)
class DiamondFound(Exception):
pass
class CaseSolver(object):
def __init__(self, casenr, nodes, roots):
self.casenr = casenr
self.nodes = nodes
self.roots = roots
def solve(self):
info("Solving case %d", self.casenr)
result = "No"
try:
for id in self.roots:
self.process(id, id)
except DiamondFound:
result = "Yes"
debug("Result: %s", result)
return self.casenr, result
def process(self, id, root):
node = self.nodes[id]
for parent_id in node.parent_ids:
parent = self.nodes[parent_id]
if parent.visited_by_root[root]:
raise DiamondFound()
parent.visited_by_root[root] = True
for parent_id in node.parent_ids:
self.process(parent_id, root)
# === Verify correctness of sample data
class SampleTester(unittest.TestCase):
def setUp(self):
self.data = open("sample.correct", "r").read()
def test_sample(self):
output = StringIO()
solver = Solver("sample.in")
input = solver.open_input()
solver.solve(input, output)
self.assertEqual(self.data, output.getvalue())
if __name__ == "__main__":
if "--debug" in sys.argv:
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
use_mp = False
if "--use-mp" in sys.argv:
use_mp = True
input_name = sys.argv[1]
if input_name == "test":
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTester)
unittest.TextTestRunner(verbosity=2).run(suite)
else:
start = time.time()
solver = Solver(input_name, use_mp)
solver.main()
end = time.time()
info("Time spent: %s" % time.strftime("%M minutes, %S seconds", time.gmtime(end-start)))
| [
"[email protected]"
] | |
336de6e435da28ab320f65a19b891c0a296eeb6b | 39398e12e41dd9574488af872c2a06546ddca4ad | /factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/notifications/consumers.py | 2638d7ce68bdebcf6c1321c2bb511b38216a4d8d | [
"MIT"
] | permissive | piyushka17/azure-intelligent-edge-patterns | 612dc2ff9442fe37343844ca642308cf3d892240 | 0d088899afb0022daa2ac434226824dba2c997c1 | refs/heads/master | 2022-12-01T03:04:14.881931 | 2020-08-11T14:38:10 | 2020-08-11T14:38:10 | 286,632,773 | 0 | 0 | MIT | 2020-08-11T03:07:46 | 2020-08-11T03:07:46 | null | UTF-8 | Python | false | false | 1,408 | py | # -*- coding: utf-8 -*-
"""Notification Consumer
"""
# pylint: disable=unused-import
import asyncio
# pylint: enable=unused-import
import logging
from channels.generic.websocket import AsyncJsonWebsocketConsumer
# from .models import Notification
logger = logging.getLogger(__name__)
class NotificationConsumer(AsyncJsonWebsocketConsumer):
"""NotificationConsumer
"""
async def websocket_connect(self, message):
"""websocket connect
"""
# Auth here
await self.accept()
#self.channel_name = "notification"
await self.channel_layer.group_add("notification", self.channel_name)
logger.info("connect %s", message)
async def websocket_receive(self, message):
"""websocket receive
"""
logger.info("recieve %s", message)
await self.send("Connected")
await self.channel_layer.group_send("link", {
"type": "link.send",
"message": "msg from websocket",
})
async def websocket_disconnect(self, message):
"""websocket close
"""
logger.info("disconnect %s", message)
await self.close()
await self.channel_layer.group_discard("link", self.channel_name)
async def notification_send(self, event):
"""websocket send
"""
logger.info("notification_send!!!!!!!!!!")
await self.send_json(event)
| [
"[email protected]"
] | |
69b428753f435a7479a61e841418797a33179325 | 70ad3badf3fa6e2edf1889d8640f25a7ec0d9db1 | /catkin_ws/devel/lib/python2.7/dist-packages/imu_sequenced/msg/_ImuSequenced.py | 99d968d430ecbcb1e2469ea2c47ee3bce764a400 | [] | no_license | MathieuHwei/OldGaitMaven | 758a937dfda2cf4f1aee266dbbf682ef34989199 | 873f7d9089c5d1c0772bd3447e2b0a31dac68b70 | refs/heads/main | 2023-06-17T18:40:06.230823 | 2021-07-19T23:08:20 | 2021-07-19T23:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,765 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from imu_sequenced/ImuSequenced.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
import sensor_msgs.msg
class ImuSequenced(genpy.Message):
_md5sum = "009eb952c3a8276f5b47ebd92ca7e51e"
_type = "imu_sequenced/ImuSequenced"
_has_header = False #flag to mark the presence of a Header object
_full_text = """sensor_msgs/Imu imu
uint32 seq
================================================================================
MSG: sensor_msgs/Imu
# This is a message to hold data from an IMU (Inertial Measurement Unit)
#
# Accelerations should be in m/s^2 (not in g's), and rotational velocity should be in rad/sec
#
# If the covariance of the measurement is known, it should be filled in (if all you know is the
# variance of each measurement, e.g. from the datasheet, just put those along the diagonal)
# A covariance matrix of all zeros will be interpreted as "covariance unknown", and to use the
# data a covariance will have to be assumed or gotten from some other source
#
# If you have no estimate for one of the data elements (e.g. your IMU doesn't produce an orientation
# estimate), please set element 0 of the associated covariance matrix to -1
# If you are interpreting this message, please check for a value of -1 in the first element of each
# covariance matrix, and disregard the associated estimate.
Header header
geometry_msgs/Quaternion orientation
float64[9] orientation_covariance # Row major about x, y, z axes
geometry_msgs/Vector3 angular_velocity
float64[9] angular_velocity_covariance # Row major about x, y, z axes
geometry_msgs/Vector3 linear_acceleration
float64[9] linear_acceleration_covariance # Row major x, y z
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z"""
__slots__ = ['imu','seq']
_slot_types = ['sensor_msgs/Imu','uint32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
imu,seq
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ImuSequenced, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.imu is None:
self.imu = sensor_msgs.msg.Imu()
if self.seq is None:
self.seq = 0
else:
self.imu = sensor_msgs.msg.Imu()
self.seq = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.imu.header.seq, _x.imu.header.stamp.secs, _x.imu.header.stamp.nsecs))
_x = self.imu.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_4d().pack(_x.imu.orientation.x, _x.imu.orientation.y, _x.imu.orientation.z, _x.imu.orientation.w))
buff.write(_get_struct_9d().pack(*self.imu.orientation_covariance))
_x = self
buff.write(_get_struct_3d().pack(_x.imu.angular_velocity.x, _x.imu.angular_velocity.y, _x.imu.angular_velocity.z))
buff.write(_get_struct_9d().pack(*self.imu.angular_velocity_covariance))
_x = self
buff.write(_get_struct_3d().pack(_x.imu.linear_acceleration.x, _x.imu.linear_acceleration.y, _x.imu.linear_acceleration.z))
buff.write(_get_struct_9d().pack(*self.imu.linear_acceleration_covariance))
buff.write(_get_struct_I().pack(self.seq))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.imu is None:
self.imu = sensor_msgs.msg.Imu()
end = 0
_x = self
start = end
end += 12
(_x.imu.header.seq, _x.imu.header.stamp.secs, _x.imu.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.imu.header.frame_id = str[start:end].decode('utf-8')
else:
self.imu.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.imu.orientation.x, _x.imu.orientation.y, _x.imu.orientation.z, _x.imu.orientation.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 72
self.imu.orientation_covariance = _get_struct_9d().unpack(str[start:end])
_x = self
start = end
end += 24
(_x.imu.angular_velocity.x, _x.imu.angular_velocity.y, _x.imu.angular_velocity.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.imu.angular_velocity_covariance = _get_struct_9d().unpack(str[start:end])
_x = self
start = end
end += 24
(_x.imu.linear_acceleration.x, _x.imu.linear_acceleration.y, _x.imu.linear_acceleration.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.imu.linear_acceleration_covariance = _get_struct_9d().unpack(str[start:end])
start = end
end += 4
(self.seq,) = _get_struct_I().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.imu.header.seq, _x.imu.header.stamp.secs, _x.imu.header.stamp.nsecs))
_x = self.imu.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_4d().pack(_x.imu.orientation.x, _x.imu.orientation.y, _x.imu.orientation.z, _x.imu.orientation.w))
buff.write(self.imu.orientation_covariance.tostring())
_x = self
buff.write(_get_struct_3d().pack(_x.imu.angular_velocity.x, _x.imu.angular_velocity.y, _x.imu.angular_velocity.z))
buff.write(self.imu.angular_velocity_covariance.tostring())
_x = self
buff.write(_get_struct_3d().pack(_x.imu.linear_acceleration.x, _x.imu.linear_acceleration.y, _x.imu.linear_acceleration.z))
buff.write(self.imu.linear_acceleration_covariance.tostring())
buff.write(_get_struct_I().pack(self.seq))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.imu is None:
self.imu = sensor_msgs.msg.Imu()
end = 0
_x = self
start = end
end += 12
(_x.imu.header.seq, _x.imu.header.stamp.secs, _x.imu.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.imu.header.frame_id = str[start:end].decode('utf-8')
else:
self.imu.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.imu.orientation.x, _x.imu.orientation.y, _x.imu.orientation.z, _x.imu.orientation.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 72
self.imu.orientation_covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
_x = self
start = end
end += 24
(_x.imu.angular_velocity.x, _x.imu.angular_velocity.y, _x.imu.angular_velocity.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.imu.angular_velocity_covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
_x = self
start = end
end += 24
(_x.imu.linear_acceleration.x, _x.imu.linear_acceleration.y, _x.imu.linear_acceleration.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.imu.linear_acceleration_covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
start = end
end += 4
(self.seq,) = _get_struct_I().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_9d = None
def _get_struct_9d():
global _struct_9d
if _struct_9d is None:
_struct_9d = struct.Struct("<9d")
return _struct_9d
| [
"[email protected]"
] | |
455d28a828552783f50ad986c42b3151379aa912 | 1e19cab9c19562477cf561a88949faeee3731015 | /spider/spider/spiders/a360doc.py | a19ef4b2474d1f0ec38f40bef7ecb33f13f2e203 | [] | no_license | sugyli/a_dou | 62f5c3090f4001b68613a0b7c30526a58f512aa7 | 4c3121495416361d7f4bfe97e3ed15c61c28f1e3 | refs/heads/master | 2021-06-24T12:30:44.018193 | 2019-12-02T05:27:41 | 2019-12-02T05:27:41 | 205,197,259 | 0 | 0 | null | 2021-02-08T20:36:17 | 2019-08-29T15:45:23 | JavaScript | UTF-8 | Python | false | false | 11,909 | py | # -*- coding: utf-8 -*-
import scrapy,traceback,json,emoji,html,copy
import logging
logger = logging.getLogger(__name__)
from urllib import parse
import helpers,re
from bigdbs.models import BigDb
from ..items import BigDbSpiderItem
from .. import help
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
class A360docSpider(scrapy.Spider):
name = '360doc'
#allowed_domains = ['www.360doc.com','www.360doc.cn','image109.360doc.cn']
wapheaders = {
"HOST": "www.360doc.cn",
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36"
}
#9 先执行 10 后执行
custom_settings = {
"IMAGES_URLS_FIELD": 'images',
"UserAgentList": settings['UserAgentList'],
"ITEM_PIPELINES": {
'spider.pipelines.BigDbImagePipeline': 9,
'spider.pipelines.BigDbSpiderPipeline': 10,
}
}
noneedurl=[
'http://www.360doc.cn/article/14020892_871182943.html',
'http://www.360doc.cn/article/54623748_871222082.html',
'http://www.360doc.cn/article/28625038_871239766.html',
'http://www.360doc.cn/article/273090_240724762.html',
'http://www.360doc.cn/article/52901360_871157169.html',
'http://www.360doc.cn/article/6748870_871057964.html',
'http://www.360doc.cn/article/1427138_873834882.html',
'http://www.360doc.cn/article/10813888_870045548.html',
'http://www.360doc.cn/article/34614342_760046274.html'
]
def start_requests(self):
try:
parameter = [
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '社会',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '9',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '文化',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '7',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '人生',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '163',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '生活',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '2',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '健康',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '6',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '教育',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '10',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '职场',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '3',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '财经',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '440',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '娱乐',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '5',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '艺术',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1',
'classid': '1',
'subclassid': '0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getReadRooms.ashx',
'category': '上网',
'num': 30,
'data': {
'topnum':'20',
'pagenum':'1',
'classid':'12',
'subclassid':'0'
}
},
{
'url': 'http://www.360doc.cn/ajax/index/getOriginal.ashx',
'category': '综合',
'num': 30,
'data': {
'topnum': '20',
'pagenum': '1'
}
}
]
for row in parameter:
for i in range(1, row['num']):
# url =row['url'].replace('@@@@@',str(i))
#
# if row['type'] == 'web':
# yield scrapy.Request(url
# , meta={
# 'category':row['category'],
# 'type': row['type']
# }
# , headers=self.headers
# , callback=self.parse_list)
#
headers = copy.deepcopy(self.wapheaders)
headers['Origin'] = 'http://www.360doc.cn'
headers['Referer']='http://www.360doc.cn/index.html?classid=13&subclassid=0'
headers['X-Requested-With']='XMLHttpRequest'
headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8'
row['data']['pagenum'] = str(i)
yield scrapy.FormRequest(row['url']
, method='POST'
, meta={
'category': row['category'],
}
, headers=headers
#, body=json.dumps(row['data'])
, formdata=row['data']
, callback=self.parse_list)
except Exception:
raise Exception('start_requests 开头',traceback.format_exc())
#从列表中获取到了内容地址 这个位置可以调试
def parse_list(self, response):
try:
category=response.meta.get("category")
if response.status ==200:
rs=json.loads(response.body.decode('utf-8'))
if len(rs['data'])>0:
for row in rs['data']:
name = row['articletitle']
wapurl = row['arttitleurl']
if wapurl not in self.noneedurl:
obj=BigDb.objects.filter(norm=wapurl)
if not obj:
yield scrapy.Request(wapurl
, meta={
"norm": wapurl,
"name": name,
"category": category
}
, headers=self.wapheaders
, callback=self.parse)
else:
print(f"{name} 已经存在不添加")
else:
print(f"{wapurl} 设置不采集")
else:
logger.error(f"出错来源 {response.url} 请求失败 状态码 {response.status}")
except Exception:
raise Exception('parse_list 开头',traceback.format_exc())
def parse(self, response):
"""
tempstr = "hello you hello python are you ok"
import re
rex = r'(hello|Use)'
print re.sub(rex,"Bye",tempstr)
"""
try:
htmlstr = response.css('#artcontentdiv').extract()[0]
compile_br = re.compile(r'<\s*br[^>]*>', re.I)
htmllist = compile_br.split(htmlstr)
if len(htmllist)>1:
htmlstr = \
''.join(['<p>{}</p>'.format(s) for s in htmllist if s.strip()])
images = response.css('#artcontentdiv img::attr(src)').extract()
#检查图片地址完整
for i in range(len(images)):
if not re.match(r'^https?:/{2}\w.+$', images[i]):
images[i] = parse.urljoin(response.url,images[i])
rep_image="%%%%%%%%%%"
rex=r"""(<img\s.*?\s?src\s*=\s*['|"]?[^\s'"]+.*?>)"""
# 这么写的目的是不区分大小
compile_img=re.compile(rex, re.I)
htmlstr=compile_img.sub(rep_image, htmlstr)
htmlstr = help.handle_content(htmlstr)
if htmlstr or len(images) > 0:
item = BigDbSpiderItem()
item['images'] = images
item['image_headers']={
}
item['bigdb'] = {
'name': response.meta.get("name"),
'norm': response.meta.get("norm"),
'content': htmlstr,
'normslug': '360doc',
'status': 'P'
}
item['category'] = response.meta.get("category")
item['rep'] = {
'rep_image':rep_image
}
item['image_prefix']= '360doc'
yield item
else:
logger.error(f"{response.meta.get('name')} 内容为空 不入库 {response.meta.get('norm')}")
except Exception:
raise Exception('parse 开头',traceback.format_exc())
| [
"“[email protected]”"
] | |
54cc4616d2c462da1ff90769d605ca3f52d839c6 | d6a0cc09c0fd86d95bc0ee8034acf09334a2c377 | /plant/크롤링2.py | 847846c5eb4d4617364ebcee452fe8201ec28bfc | [] | no_license | Kimuksung/bigdata | e2b0e04e817a7113cba8c5d4acdd8cf20b664147 | e7cce223eb55709d9ebcb631e39360b347a02764 | refs/heads/master | 2021-07-13T10:16:54.498961 | 2021-03-08T02:27:28 | 2021-03-08T02:27:28 | 241,530,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
# 습도 - https://www.weather.go.kr/weather/observation/currentweather.jsp?tm=2020.06.19.01:00&type=t13&mode=0®=100&auto_man=m&stn=129
# 이슬점 - https://www.weather.go.kr/weather/observation/currentweather.jsp?tm=2020.06.19.01:00&type=t12&mode=0®=100&auto_man=m&stn=129
import datetime
date1 = '2016-05-01'
date2 = '2019-04-18'
start = datetime.datetime.strptime(date1, '%Y-%m-%d')
end = datetime.datetime.strptime(date2, '%Y-%m-%d')
step = datetime.timedelta(days=30)
tmp = {}
while start <= end:
print (str(start.date()).replace("-" , "."))
basic_path = "https://www.weather.go.kr/weather/observation/currentweather.jsp?type=t13&mode=2&stn=129®=101&auto_man=m&tm="
basic_path2 = ".01:00&dtm=0"
date = str(start.date()).replace("-" , ".")
url = basic_path+date+ basic_path2
html = urlopen(url)
source = html.read()
#print(html)
soup = BeautifulSoup(source, "html5lib")
#print(soup)
tr=soup.find_all("tr")
#print(tr)
html.close()
# =============================================================================
# tr[1].find("a")
# tr[1].a.attrs
# tr[1].a.string
# tr[1].find_all("td")[1:]
# =============================================================================
for i in range(1 ,32):#len(tr)):
#print(tr[i].a.string)
tmp2 = []
for j in tr[i].find_all("td")[1:]:
#print(j.string)
tmp2.append(j.string)
tmp[str(start.year)+"."+tr[i].a.string] = tmp2
start += step
print(tmp.keys())
answer={}
#for i in tmp.keys():
for i in tmp.keys():
print(i)
for j in range(0,8):
answer[(datetime.datetime.strptime(i, '%Y.%m.%d') + datetime.timedelta(hours= (j+1)*3)).strftime('%Y-%m-%d %H')] = tmp[i][j]
#print((datetime.datetime.strptime(i, '%Y.%m.%d') + datetime.timedelta(hours= (j+1)*3)).strftime('%Y-%m-%d %H'))
type(answer)
answer['2016-05-02 00']
'''
import pandas as pd
df = pd.DataFrame(answer.values() , index = answer.keys() , columns = ["dew_point"])
df
df.to_csv("dewpoint.csv")
print(df)
df2 = pd.read_csv("dewpoint.csv")
type(df2)
df2 = df2.set_index('Unnamed: 0')
df2
''' | [
"[email protected]"
] | |
0e38d2a2bdbcdc5299f5b0bd9fd4047035b1acff | d88868386b529a7adecb1f0caf1db6cdf743951f | /triematcher.py | 2bbfb3bf0e5e2006e2004960a8ad0aa5c738a320 | [] | no_license | hldai/ks-studio-el | 4b87476846a8aa987203d882e51562b1847445f9 | 995ae4af67c360744170125da63472e940bed87d | refs/heads/master | 2020-12-24T21:21:25.911325 | 2016-12-01T10:56:38 | 2016-12-01T10:56:38 | 58,045,365 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | class TrieNode:
def __init__(self):
self.ch = None
self.child = None
self.next_child = None
self.rid = None
def add_child(self, ch):
if not self.child:
self.child = TrieNode()
self.child.ch = ch
return self.child
cur_child = self.child
while cur_child.ch != ch and cur_child.next_child:
cur_child = cur_child.next_child
if cur_child.ch == ch:
return cur_child
cur_child.next_child = TrieNode()
cur_child.next_child.ch = ch
return cur_child.next_child
def find_child(self, ch):
cur_child = self.child
while cur_child:
if cur_child.ch == ch:
return cur_child
cur_child = cur_child.next_child
return None
class MeshDetect:
word_sep = [',', '.', '"', '\'', '(', ')', '/', '-', '\n', ';']
def __init__(self, dict_file, exclude_words_file):
exclude_words_set = None
if exclude_words_file:
exclude_words_set = MeshDetect.load_exclude_words(exclude_words_file)
self.trie_root = TrieNode()
print 'Loading mesh dict ...'
fin = open(dict_file, 'rb')
fin.readline()
cur_name = None
line_idx = 0
for line_idx, line in enumerate(fin):
line = line.strip()
if cur_name:
cur_rid = line
if cur_name.isupper():
self.add_term(cur_name, cur_rid)
cur_name = None
continue
cur_name_lc = cur_name.lower()
if not exclude_words_set or cur_name_lc not in exclude_words_set:
self.add_term(cur_name, cur_rid)
if cur_name_lc != cur_name:
self.add_term(cur_name_lc, cur_rid)
cur_name = None
else:
cur_name = line.decode('utf-8')
fin.close()
print line_idx, 'lines'
def add_term(self, term, rid):
cur_node = self.trie_root
for ch in term:
cur_node = cur_node.add_child(ch)
if not cur_node.rid:
cur_node.rid = rid
def match(self, text, beg_pos):
cur_node = self.trie_root
pos = beg_pos
hit_node = None
hit_pos = -1
result_span = [beg_pos, -1]
while pos < len(text) and cur_node:
cur_node = cur_node.find_child(text[pos])
if cur_node and cur_node.rid:
hit_node = cur_node
hit_pos = pos
pos += 1
if hit_node:
result_span[1] = hit_pos
return result_span, hit_node.rid
return None
def find_all_terms(self, doc_text):
span_list = list()
id_list = list()
# results = list()
pos = 0
text_len = len(doc_text)
while pos < text_len:
# print doc_text[pos:]
result = self.match(doc_text, pos)
if result and (result[0][1] == text_len - 1 or MeshDetect.is_word_sep(doc_text[result[0][1] + 1])):
# results.append(result)
span_list.append(result[0])
id_list.append(result[1])
pos = result[0][1] + 1
else:
while pos < text_len and not MeshDetect.is_word_sep(doc_text[pos]):
pos += 1
pos += 1
return span_list, id_list
@staticmethod
def is_word_sep(ch):
if ch.isspace():
return True
return ch in MeshDetect.word_sep
@staticmethod
def load_exclude_words(file_name):
fin = open(file_name, 'rb')
fin.readline()
words_set = set()
for line in fin:
words_set.add(line.strip())
fin.close()
return words_set
| [
"[email protected]"
] | |
da5596d54ac00268281f5b368ce5bdb61bbf3e85 | 1575d5acc07eb67cb4e3cd523a24bb1d39efcb84 | /pattenRecognition/mnist/demo-p2/train.py | 6fa6b5694e1fb2743ed1bdcf92262344f744728e | [] | no_license | ChenLiangbo/DeepLearning | 4bd80ddb2a41b883ef70947a8b1fdb3b19656df0 | 3464c27116dc00bd597d2b9c25313964e1d89797 | refs/heads/master | 2020-12-24T12:39:27.666215 | 2017-05-09T13:49:44 | 2017-05-09T13:49:44 | 72,974,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | #!usr/bin/env/python
# -*- coding: utf-8 -*-
import numpy as np
import input_data
from cnnModel import MyCNNmodel
trainNumber = 55000
mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)
x_train, y_train, x_test, y_test = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
print "x_train.shape = ",x_train.shape
x_train = x_train[0:trainNumber]
y_train = y_train[0:trainNumber]
x_train = x_train.reshape(-1, 28, 28, 1)
print "x_train.shape = ",(x_train.shape,y_train.shape)
myModel = MyCNNmodel()
myModel.iterTimes = 200
myModel.batchSize = 178
myModel.train(x_train,y_train)
import os
import json
content = {"iterTimes":myModel.iterTimes,"batchSize":myModel.batchSize,"trainNumber":trainNumber}
command = 'python predict.py'
os.system(command)
fp = open('./file/mnist_result.txt','ab')
content = json.dumps(content)
fp.write(content)
fp.write('\r\n')
fp.close()
| [
"[email protected]"
] | |
c2d224c8012c4eab20df0ed6980600ceaa4e0906 | e1f941604b7a8bf4b86b700a2a0fd302dbe70434 | /add_extinction_column_rejected.py | 8f288639ed8d6c93a2bbc7373188bbcdf20a6e05 | [] | no_license | frenchd24/gt | fcc0bfee6b7f3c68871218f237e31c395889b5de | e0840800d8a3f906844b30701b294f102b030243 | refs/heads/master | 2021-06-03T18:05:10.797021 | 2020-07-06T04:22:37 | 2020-07-06T04:22:37 | 109,777,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,074 | py | #!/usr/bin/env python
'''
By David French ([email protected])
$Id: add_extinction_column_rejected.py, v 2.1 09/08/17
does the thing below but on file rejected_results_redo.csv
Based on:
Id: add_extinction_column.py, v 2.0 04/24/17
Add extinction column to return_basic_full.csv based on new IRSA values
Returns: return_basic_full_extinc.csv
** Successfully made /usr/data/moosejaw/frenchd/GT_update2/return_basic_full_extinc.csv
on 04/24/17
**
ADOPTED FROM: add_extinction_column.py, v 1.0 02/06/14 =
"
Adds a column in NewGalaxyTable2.csv for E(B-V) based on values from IRSA.
**Successfully made NewGalaxyTable3.csv on 02/13/15**
"
'''
import sys
import os
import csv
# import string
# import warnings
# import urllib
# import numpy
from pylab import *
# import atpy
import math
import getpass
import itertools
from utilities import *
# import scipy.optimize as optimization
# import pickle
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.ticker import NullFormatter
def schechter(m,phi,mstar,alpha):
# construct a Schechter luminosity function and return the associated density function
# s = 0.4*log(10)*phi*(10**(0.4*(mstar-m)))**(alpha +1) * exp(-10**(0.4*(mstar-m)))
# s = 0.4*log(10)*phi*(10**(0.4*(m-mstar)*(alpha +1))) * exp(-10**(0.4*(m-mstar)))
s = 0.4 * math.log(10) * phi * (10**(0.4*(mstar-m)))**(alpha +1) * exp(-10**(0.4*(mstar-m)))
return s
def absoluteMag(m,d):
# m is apparent magnitude, d is distance in Mpc
M = float(m) - 5*log10((float(d)*10**6)/10)
return M
def lstarValue(mstar,m):
# calculate and return L/Lstar
lratio = 10**(-0.4*(m-mstar))
return lratio
def readlines_into_lists(file, header):
# take in a file. Reads each line and splits on spaces. returns a list of rows,
# each of which is list of values
# header is a list of characters that indicates header lines that should be skipped
# example: header = ['\','|']
outList = []
lines = file.readlines()
for l in lines:
isHeader = False
for c in header:
# if a header character, 'c', is found at the start of the line, 'l'
if str(l)[0] == c:
isHeader = True
print 'was header: ',str(l)
if not isHeader:
splitLine = l.split()
outList.append(splitLine)
return outList
def main():
# open and read the galaxy table
if getpass.getuser() == 'frenchd':
inputFilename = '/usr/data/moosejaw/frenchd/GT_update2/rejected_results_redo.csv'
outputFilename = '/usr/data/moosejaw/frenchd/GT_update2/rejected_results_redo_extinc.csv'
extincTableDirectory = '/usr/data/moosejaw/frenchd/GT_update2/extincTables/'
else:
print 'Could not determine username. Exiting.'
sys.exit()
# open the galaxy file
inputFile = open(inputFilename,'rU')
reader = csv.DictReader(inputFile)
# new fieldnames for updated galaxy table
fieldnames = ('preferredName',\
'oldName',\
'redshift',\
'degreesJ2000RA_Dec',\
'J2000RA_Dec',\
'galacticLong_Lat',\
'rIndependentDistMean_sd_min_max (Mpc)',\
'morphology',\
'distanceIndicator',\
'luminosityClass',\
'EBminusV',\
'radialVelocity (km/s)',\
'vcorr (km/s)',\
'angDiameters (arcsec)',\
'linDiameters (kpc)',\
'distvcorr (Mpc)',\
'inclination (deg)',\
'photometry',\
'alternativeNames')
writerOutFile = open(outputFilename,'wt')
writer = csv.DictWriter(writerOutFile, fieldnames=fieldnames)
headers = dict((n,n) for n in fieldnames)
writer.writerow(headers)
header = ['\\','|']
# open output
file1 = open('{0}/rejected_results_redo_extinc.txt'.format(extincTableDirectory),'rU')
lines1 = readlines_into_lists(file1, header)
allLines = lines1
count = 0
# for gline, eline in zip(reader, itertools.chain(lines1,lines2,lines3,lines4,lines5,lines6)):
for gline, eline in zip(reader, allLines):
ra,dec = eval(gline['degreesJ2000RA_Dec'])
era,edec = eline[0],eline[1]
EBminusV_SF = eline[3]
meanEBminusV_SF = eline[4]
# convert ra,dec to same format as the extinction values
rat = trunc(ra,9)
lenRat = len(rat)
if lenRat == 3:
rat = str(rat)+'000000'
if lenRat == 4:
rat = str(rat)+'00000'
if lenRat ==5:
rat = str(rat)+'0000'
if lenRat == 6:
rat = str(rat)+'000'
if lenRat == 7:
rat = str(rat)+'00'
if lenRat == 8:
rat = str(rat)+'0'
else:
rat = str(rat)
dect = trunc(dec,9)
dect2 = trunc(str(dect),5)
rat2 = trunc(str(rat),5)
erat = trunc(str(era),5)
edect = trunc(str(edec),5)
# if count <20000:
# lines1
#
# if count>=20000 and count<40000:
# file2.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=40000 and count<60000:
# file3.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=60000 and count<80000:
# file4.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=80000 and count <100000:
# file5.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=100000:
# file6.write(' {0} {1}\n'.format(rat,dect))
count+=1
if erat == rat2 and edect == dect2:
# print 'match ',count
objectInfoList = (\
gline['preferredName'],\
gline['oldName'],\
gline['redshift'],\
gline['degreesJ2000RA_Dec'],\
gline['J2000RA_Dec'],\
gline['galacticLong_Lat'],\
gline['rIndependentDistMean_sd_min_max (Mpc)'],\
gline['morphology'],\
gline['distanceIndicator'],\
gline['luminosityClass'],\
meanEBminusV_SF,\
gline['radialVelocity (km/s)'],\
gline['vcorr (km/s)'],\
gline['angDiameters (arcsec)'],\
gline['linDiameters (kpc)'],\
gline['distvcorr (Mpc)'],\
gline['inclination (deg)'],\
gline['photometry'],\
gline['alternativeNames'])
row = dict((f,o) for f,o in zip(fieldnames,objectInfoList))
writer.writerow(row)
# update counter
percentComplete = round((float(count)/130759)*100,2)
sys.stdout.write('Percent complete: {0}\r'.format(percentComplete))
sys.stdout.flush()
else:
print 'no match: {0},{1} != {2},{3}'.format(erat, edect, rat2, dect2)
print 'count = ',count
file1.close()
inputFile.close()
writerOutFile.close()
if __name__=="__main__":
main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.