content
stringlengths
0
1.55M
<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>snorkel.labeling filter_unlabeled_dataframe<class_stmt>TestAnalysis(unittest.TestCase)<block_start><def_stmt>test_filter_unlabeled_dataframe self<arrow><none><block_start>X=pd.DataFrame(dict(A=["x" "y" "z"] B=[1 2 3]))<line_sep>y=np.array([[0.25 0.25 0.25 0.25] [1.0 0.0 0.0 0.0] [0.2 0.3 0.5 0.0]])<line_sep>L=np.array([[0 1 -1] [-1 -1 -1] [1 1 0]])<line_sep>X_filtered,y_filtered=filter_unlabeled_dataframe(X y L)<line_sep>np.array_equal(X_filtered.values np.array([["x" 1] ["z" 3]]))<line_sep>np.testing.assert_array_almost_equal(y_filtered np.array([[0.25 0.25 0.25 0.25] [0.2 0.3 0.5 0.0]]))<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>collections deque<import_from_stmt>typing Iterable<def_stmt>truncate string:str max_line_len:int=60 max_lines:int=1 tail:bool=<false> <arrow>str<block_start>"""Truncate a string using ellipsis. For multi-line inputs, each line is truncated independently. For example: >>> truncate("abcdefghijklmnop\n1234", max_line_len=10) "abcdefg...\n1234" :param string: The string to truncate. :param max_line_len: The maximum number of characters in each line. :param max_lines: The maximum number of lines in the output string. :return: A (possibly truncated) string. """<line_sep><return>truncate_lines(str(string).split("\n") max_line_len=max_line_len max_lines=max_lines tail=tail )<block_end><def_stmt>truncate_lines lines:Iterable[str] max_line_len:int=60 max_lines:int=1 tail:bool=<false> <arrow>str<block_start>"""Truncate a sequence of lines, one string per line, using ellipsis. Each line is truncated independently and combined into a single multi-line string. For example: >>> truncate_lines(["abcdefghijklmnop", "1234"], max_line_len=10) "abcdefg...\n1234" :param string: The string to truncate. :param max_line_len: The maximum number of characters in each line. :param max_lines: The maximum number of lines in the output string. :return: A (possibly truncated) string. """<if_stmt>max_line_len<le>3<block_start><raise>ValueError("Lines must be greater than 3 characeters long.")<block_end><def_stmt>_truncate_line line:str<block_start><if_stmt>len(line)<g>max_line_len<block_start><return>f"{line[:max_line_len-3]}..."<block_end><return>line<block_end><def_stmt>_consume iterable n<block_start>"""Consume fist or last `n` elements from iterable."""<if_stmt>tail<block_start><yield><from>deque(iterable n)<block_end><else_stmt><block_start><for_stmt>_ range(n)<block_start><try_stmt><block_start><yield>next(iterable)<block_end><except_stmt>StopIteration<block_start><return><block_end><block_end><block_end><block_end>lines=iter(lines)<line_sep>truncated_lines=[_truncate_line(str(ln))<for>ln _consume(lines max_lines)]<line_sep># Truncate the final line if required. <try_stmt><block_start>next(lines)<line_sep>truncated_lines[-1]=_truncate_line(f"{truncated_lines[-1]}...")<block_end><except_stmt>StopIteration<block_start><pass><block_end><return>"\n".join(truncated_lines)<block_end>
# -*- coding: utf-8 -*- """ proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by <NAME> and contributors. :license: BSD, see LICENSE for more details. """<import_from_stmt>typing Optional Tuple Callable<import_stmt>paramiko<class_stmt>Tunnel<block_start>"""Establishes a tunnel between local (machine where Tunnel is running) and remote host. Once a tunnel has been established, remote host can route HTTP(s) traffic to localhost over tunnel. """<def_stmt>__init__ self ssh_username:str remote_addr:Tuple[str int] private_pem_key:str remote_proxy_port:int conn_handler:Callable[[paramiko.channel.Channel] <none>]<arrow><none><block_start>self.remote_addr=remote_addr<line_sep>self.ssh_username=ssh_username<line_sep>self.private_pem_key=private_pem_key<line_sep>self.remote_proxy_port=remote_proxy_port<line_sep>self.conn_handler=conn_handler<block_end><def_stmt>run self<arrow><none><block_start>ssh=paramiko.SSHClient()<line_sep>ssh.load_system_host_keys()<line_sep>ssh.set_missing_host_key_policy(paramiko.WarningPolicy())<try_stmt><block_start>ssh.connect(hostname=self.remote_addr[0] port=self.remote_addr[1] username=self.ssh_username key_filename=self.private_pem_key)<line_sep>print('SSH connection established...')<line_sep>transport:Optional[paramiko.transport.Transport]=ssh.get_transport()<assert_stmt>transport<is><not><none><line_sep>transport.request_port_forward('' self.remote_proxy_port)<line_sep>print('Tunnel port forward setup successful...')<while_stmt><true><block_start>conn:Optional[paramiko.channel.Channel]=transport.accept(timeout=1)<assert_stmt>conn<is><not><none><line_sep>e=transport.get_exception()<if_stmt>e<block_start><raise>e<block_end><if_stmt>conn<is><none><block_start><continue><block_end>self.conn_handler(conn)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><finally_stmt><block_start>ssh.close()<block_end><block_end><block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>numpy genfromtxt<line_sep>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>path=os.path.join(cur_dir "results_arima_forecasts.csv")<with_stmt>open(path "rb")<as>fd<block_start>forecast_results=genfromtxt(fd names=<true> delimiter="," dtype=float)<block_end># NOTE: # stata gives no indication of no convergence for 112 CSS but gives a # different answer than x12arima, gretl simply fails to converge # redid stata with starting parameters from x12arima # it looks like stata uses a different formula for the CSS likelihood # they appear to be using a larger sample than R, gretl, or us. # CSS results are therefore taken from R and gretl <class_stmt>ARIMA111(object)<block_start><def_stmt>__init__ self method="mle"<block_start>self.k_ar=1<line_sep>self.k_diff=1<line_sep>self.k_ma=1<if_stmt>method<eq>"mle"# from stata <block_start><import_from_stmt>.arima111_results results<line_sep># unpack stata results self.__dict__.update(results)<line_sep>self.resid=self.resid[1:]<line_sep>self.params=self.params[:-1]<line_sep>self.sigma2=self.sigma<power>2<line_sep>self.aic=self.icstats[4]<line_sep>self.bic=self.icstats[5]<line_sep>self.fittedvalues=self.xb[1:]# no idea why this initial value self.linear=self.y[1:]<line_sep># stata bse are OPG # self.bse = np.diag(self.cov_params) ** .5 # from gretl self.arroots=[1.0640+0j]<line_sep>self.maroots=[1.2971+0j]<line_sep>self.hqic=496.8653<line_sep>self.aic_gretl=491.5112<line_sep>self.bic_gretl=504.7442<line_sep>self.tvalues=[4.280 20.57 -8.590]<line_sep>self.pvalues=[1.87e-5 5.53e-94 8.73e-18]<line_sep>self.cov_params=[[0.0423583 -0.00167449 0.00262911] [-0.00167449 0.00208858 -0.0035068] [0.00262911 -0.0035068 0.00805622]]<line_sep>self.bse=np.sqrt(np.diag(self.cov_params))<line_sep># these bse are approx [.205811, .0457010, .0897565] # from stata # forecast = genfromtxt(open(cur_dir+"/arima111_forecasts.csv"), # delimiter=",", skip_header=1, usecols=[1,2,3,4,5]) # self.forecast = forecast[203:,1] # self.fcerr = forecast[203:,2] # self.fc_conf_int = forecast[203:,3:] # from gretl self.forecast=forecast_results['fc111c'][-25:]<line_sep>self.forecasterr=forecast_results['fc111cse'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc111cdyn']<line_sep>self.forecasterr_dyn=forecast_results['fc111cdynse']<block_end><else_stmt># coefs, bse, tvalues, and pvalues taken from R because gretl # uses mean not constant <block_start>self.bse=[0.21583833 0.03844939 0.08566390]<line_sep>self.params=[1.0087257 0.9455393 -0.8021834]<line_sep>self.sigma2=0.6355913<line_sep>self.tvalues=[4.673524 24.591788 -9.364311]<line_sep>self.pvalues=[5.464467e-06 0 0]<line_sep>self.cov_params=np.array([[0.046586183 0.002331183 -0.004647432] [0.002331183 0.001478356 -0.002726201] [-0.004647432 -0.002726201 0.007338304]])<line_sep># from gretl self.llf=-239.6601<line_sep>self.aic=487.3202<line_sep>self.bic=500.5334<line_sep>self.hqic=492.6669<line_sep>self.arroots=[1.0578+0j]<line_sep>self.maroots=[1.2473+0j]<line_sep># cov_params = np.array([[0.00369569, -0.00271777, 0.00269806], # [0, 0.00209573, -0.00224559], # [0, 0, 0.00342769]]) # self.cov_params = cov_params + cov_params.T - \ # np.diag(np.diag(cov_params)) # self.bse = np.sqrt(np.diag(self.cov_params)) self.resid=[-0.015830 -0.236884 -0.093946 -0.281152 -0.089983 -0.226336 -0.351666 -0.198703 -0.258418 -0.259026 -0.149513 -0.325703 -0.165703 -0.279229 -0.295711 -0.120018 -0.289870 -0.154243 -0.348403 -0.273902 -0.240894 -0.182791 -0.252930 -0.152441 -0.296412 -0.128941 0.024068 -0.243972 -0.011436 -0.392437 -0.217022 -0.118190 -0.133489 -0.045755 -0.169953 0.025010 -0.107754 -0.119661 0.070794 -0.065586 -0.080390 0.007741 -0.016138 -0.235283 -0.121907 -0.125546 -0.428463 -0.087713 -0.298131 -0.277757 -0.261422 -0.248326 -0.137826 -0.043771 0.437100 -0.150051 0.751890 0.424180 0.450514 0.277089 0.732583 0.225086 -0.403648 -0.040509 -0.132975 -0.112572 -0.696214 0.003079 -0.003491 -0.108758 0.401383 -0.162302 -0.141547 0.175094 0.245346 0.607134 0.519045 0.248419 0.920521 1.097613 0.755983 1.271156 1.216969 -0.121014 0.340712 0.732750 0.068915 0.603912 0.060157 -0.803110 -1.044392 1.040311 -0.984497 -1.611668 -0.258198 -0.112970 -0.091071 0.226487 0.097475 -0.311423 -0.061105 -0.449488 0.317277 -0.329734 -0.181248 0.443263 -2.223262 0.096836 -0.033782 0.456032 0.476052 0.197564 0.263362 0.021578 0.216803 0.284249 0.343786 0.196981 0.773819 0.169070 -0.343097 0.918962 0.096363 0.298610 1.571685 -0.236620 -1.073822 -0.194208 -0.250742 -0.101530 -0.076437 -0.056319 0.059811 -0.041620 -0.128404 -0.403446 0.059654 -0.347208 -0.095257 0.217668 -0.015057 0.087431 0.275062 -0.263580 -0.122746 0.195629 0.367272 -0.184188 0.146368 0.127777 -0.587128 -0.498538 0.172490 -0.456741 -0.694000 0.199392 -0.140634 -0.029636 0.364818 -0.097080 0.510745 0.230842 0.595504 0.709721 0.012218 0.520223 -0.445174 -0.168341 -0.935465 -0.894203 0.733417 -0.279707 0.258861 0.417969 -0.443542 -0.477955 0.288992 0.442126 0.075826 0.665759 0.571509 -0.204055 0.835901 -0.375693 3.292828 -1.469299 -0.122206 0.617909 -2.250468 0.570871 1.166013 0.079873 0.463372 1.981434 -0.142869 3.023376 -3.713161 -6.120150 -0.007487 1.267027 1.176930]<line_sep>self.linear=[29.3658 29.6069 29.6339 29.8312 29.8400 30.0663 30.1617 30.1187 30.2384 30.2990 30.3595 30.5457 30.5457 30.7192 30.7757 30.8100 31.0399 31.0942 31.2984 31.2939 31.3609 31.4628 31.6329 31.7324 31.9464 32.0089 32.2559 32.6940 32.8614 33.2924 33.3170 33.5182 33.8335 34.1458 34.5700 34.8750 35.4078 35.8197 36.2292 36.8656 37.3804 37.8923 38.5161 39.1353 39.5219 40.0255 40.5285 40.6877 41.1981 41.4778 41.7614 42.0483 42.3378 42.7438 43.2629 44.3501 44.8481 46.3758 47.6495 49.0229 50.2674 52.0749 53.4036 54.0405 55.0330 55.9126 56.7962 56.9969 57.9035 58.8088 59.5986 60.9623 61.7415 62.5249 63.6547 64.8929 66.5810 68.2516 69.6795 71.9024 74.4440 76.7288 79.6830 82.7210 84.3593 86.4672 89.0311 90.8961 93.3398 95.2031 96.0444 96.4597 99.0845 99.5117 99.0582 99.9130 100.8911 101.8735 103.2025 104.4114 105.1611 106.1495 106.6827 108.0297 108.6812 109.4567 110.9233 109.4032 110.2338 110.9440 112.2239 113.6024 114.7366 115.9784 116.9832 118.2158 119.5562 121.0030 122.3262 124.3309 125.7431 126.5810 128.8036 130.2014 131.8283 134.9366 136.1738 136.3942 137.4507 138.4015 139.4764 140.5563 141.6402 142.8416 143.9284 144.9034 145.5403 146.6472 147.2953 148.1823 149.4151 150.4126 151.5249 152.8636 153.6227 154.5044 155.7327 157.1842 158.0536 159.2722 160.4871 160.8985 161.3275 162.4567 162.8940 163.0006 164.0406 164.7296 165.5352 166.7971 167.5893 169.0692 170.3045 171.9903 173.8878 175.0798 176.8452 177.5683 178.5355 178.5942 178.5666 180.2797 180.9411 182.1820 183.6435 184.1780 184.6110 185.8579 187.3242 188.4342 190.2285 192.0041 192.9641 195.0757 195.9072 200.8693 200.8222 202.0821 204.1505 203.0031 204.7540 207.2581 208.6696 210.5136 214.1399 215.5866 220.6022 218.2942 212.6785 213.2020 215.2081]<line_sep># forecasting is not any different for css # except you lose the first p+1 observations for in-sample # these results are from x-12 arima self.forecast=forecast_results['fc111c_css'][-25:]<line_sep>self.forecasterr=forecast_results['fc111cse_css'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc111cdyn_css']<line_sep>self.forecasterr_dyn=forecast_results['fc111cdynse_css']<block_end><block_end><block_end><class_stmt>ARIMA211(object)<block_start><def_stmt>__init__ self method="mle"<block_start><if_stmt>method<eq>'mle'# from stata <block_start><import_from_stmt>.arima111_results results<line_sep>self.__dict__.update(results)<line_sep>self.resid=self.resid[1:]<line_sep>self.params=self.params[:-1]<line_sep>self.sigma2=self.sigma<power>2<line_sep>self.aic=self.icstats[4]<line_sep>self.bic=self.icstats[5]<line_sep>self.fittedvalues=self.xb[1:]# no idea why this initial value self.linear=self.y[1:]<line_sep>self.k_diff=1<line_sep># stata bse are OPG # self.bse = np.diag(self.cov_params) ** .5 # from gretl self.arroots=[1.027+0j 5.7255+0j]<line_sep>self.maroots=[1.1442+0j]<line_sep>self.hqic=496.5314<line_sep>self.aic_gretl=489.8388<line_sep>self.bic_gretl=506.3801<line_sep>self.tvalues=[3.468 11.14 -1.941 12.55]<line_sep>self.pvalues=[.0005 8.14e-29 .0522 3.91e-36]<line_sep>cov_params=np.array([[0.0616906 -0.00250187 0.0010129 0.00260485] [0 0.0105302 -0.00867819 -0.00525614] [0 0 0.00759185 0.00361962] [0 0 0 0.00484898]])<line_sep>self.cov_params=(cov_params+cov_params.T-np.diag(np.diag(cov_params)))<line_sep>self.bse=np.sqrt(np.diag(self.cov_params))<line_sep># these bse are approx [0.248376, 0.102617, 0.0871312, 0.0696346] self.forecast=forecast_results['fc211c'][-25:]<line_sep>self.forecasterr=forecast_results['fc211cse'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc211cdyn'][-25:]<line_sep>self.forecasterr_dyn=forecast_results['fc211cdynse'][-25:]<block_end><else_stmt><block_start><import_from_stmt>.arima211_css_results results<line_sep>self.__dict__.update(results)<line_sep>self.resid=self.resid[1:]<line_sep>self.params=self.params[:-1]<line_sep>self.sigma2=self.sigma<power>2<line_sep>self.aic=self.icstats[4]<line_sep>self.bic=self.icstats[5]<line_sep>self.fittedvalues=self.xb[1:]# no idea why this initial value self.linear=self.y[1:]<line_sep>self.k_diff=1<line_sep># from gretl self.arroots=[1.0229+0j 4.4501+0j]<line_sep>self.maroots=[1.0604+0j]<line_sep>self.hqic=489.3225<line_sep>self.aic_gretl=482.6486<line_sep>self.bic_gretl=499.1402<line_sep>self.tvalues=[.7206 22.54 -19.04]<line_sep>self.pvalues=[.4712 1.52e-112 2.19e-10 8.00e-81]<line_sep>cov_params=np.array([[8.20496e-04 -0.0011992 4.57078e-04 0.00109907] [0 0.00284432 -0.0016752 -0.00220223] [0 0 0.00119783 0.00108868] [0 0 0 0.00245324]])<line_sep>self.cov_params=(cov_params+cov_params.T-np.diag(np.diag(cov_params)))<line_sep>self.bse=np.sqrt(np.diag(self.cov_params))<line_sep># forecasting is not any different for css # except you lose the first p+1 observations for in-sample self.forecast=forecast_results['fc111c_css'][-25:]<line_sep>self.forecasterr=forecast_results['fc111cse_css'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc111cdyn_css']<line_sep>self.forecasterr_dyn=forecast_results['fc111cdynse_css']<block_end><block_end><block_end><class_stmt>ARIMA112(object)<block_start><def_stmt>__init__ self method="mle"<block_start>self.df_model=3<line_sep>self.k=5<line_sep>self.k_ar=1<line_sep>self.k_ma=2<line_sep>self.k_exog=1<line_sep>self.k_diff=1<if_stmt>method<eq>"mle"<block_start><import_from_stmt>.arima112_results results<line_sep># from gretl self.arroots=[1.0324+0j]<line_sep>self.maroots=[1.1447+0j -4.8613+0j]<line_sep>self.hqic=495.5852<line_sep>self.aic_gretl=488.8925<line_sep>self.bic_gretl=505.4338<line_sep>self.tvalues=[3.454 31.10 -7.994 -2.127]<line_sep>self.pvalues=[0.0006 2.1e-212 1.31e-15 .0334]<line_sep>cov_params=np.array([[0.0620096 -0.00172172 0.00181301 0.00103271] [0 9.69682e-04 -9.70767e-04 -8.99814e-04] [0 0 0.00698068 -0.00443871] [0 0 0 0.00713662]])<line_sep>self.cov_params=(cov_params+cov_params.T-np.diag(np.diag(cov_params)))<line_sep>self.bse=np.sqrt(np.diag(self.cov_params))<line_sep># from gretl self.forecast=forecast_results['fc112c'][-25:]<line_sep>self.forecasterr=forecast_results['fc112cse'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc112cdyn']<line_sep>self.forecasterr_dyn=forecast_results['fc112cdynse']<line_sep># unpack stata results self.__dict__=results<line_sep>self.resid=self.resid[1:]<line_sep>self.params=self.params[:-1]<line_sep>self.sigma2=self.sigma<power>2<line_sep>self.aic=self.icstats[4]<line_sep>self.bic=self.icstats[5]<line_sep>self.fittedvalues=self.xb[1:]# no idea why this initial value self.linear=self.y[1:]<line_sep># stata bse are OPG # self.bse = np.diag(self.cov_params) ** .5 <block_end><else_stmt># NOTE: this looks like a "hard" problem # unable to replicate stata's results even with their starting # values # unable to replicate x12 results in stata using their starting # values. x-12 has better likelihood and we can replicate so # use their results # taken from R using X12-arima values as init params <block_start>self.bse=[0.07727588 0.09356658 0.10503567 0.07727970]<line_sep>self.params=[0.9053219 -0.692412 1.0736728 0.1720008]<line_sep>self.sigma2=0.6820727<line_sep>self.tvalues=[11.715452 -7.400215 10.221983 2.225692]<line_sep>self.pvalues=[0 3.791634e-12 0 2.716275e-02]<line_sep>self.cov_params=np.array([[0.0059715623 0.001327824 -0.001592129 -0.0008061933] [0.0013278238 0.008754705 -0.008024634 -0.0045933413] [-0.0015921293 -0.008024634 0.011032492 0.0072509641] [-0.0008061933 -0.004593341 0.007250964 0.0059721516]])<line_sep># from x12arima via gretl # gretl did not converge for this model... self.llf=-246.7534<line_sep>self.nobs=202<line_sep># self.params = [.905322, -.692425, 1.07366, 0.172024] # self.sigma2 = 0.682072819129 # self.bse = [0.0756430, 0.118440, 0.140691, 0.105266] self.resid=[-1.214477 -0.069772 -1.064510 -0.249555 -0.874206 -0.322177 -1.003579 -0.310040 -0.890506 -0.421211 -0.715219 -0.564119 -0.636560 -0.580912 -0.717440 -0.424277 -0.747835 -0.424739 -0.805958 -0.516877 -0.690127 -0.473072 -0.694766 -0.435627 -0.736474 -0.388060 -0.429596 -0.557224 -0.342308 -0.741842 -0.442199 -0.491319 -0.420884 -0.388057 -0.466176 -0.257193 -0.429646 -0.349683 -0.205870 -0.335547 -0.290300 -0.216572 -0.234272 -0.427951 -0.255446 -0.338097 -0.579033 -0.213860 -0.556756 -0.389907 -0.510060 -0.409759 -0.396778 -0.258727 0.160063 -0.467109 0.688004 -0.021120 0.503044 0.031500 0.878365 -0.003548 -0.079327 0.038289 0.032773 -0.050780 -0.560124 0.185655 -0.111981 -0.020714 0.363254 -0.218484 -0.006161 0.165950 0.252365 0.599220 0.488921 0.347677 1.079814 1.102745 0.959907 1.570836 1.454934 0.343521 1.125826 1.154059 0.666141 1.269685 0.551831 -0.027476 -0.305192 1.715665 -0.990662 -0.548239 -0.011636 0.197796 -0.050128 0.480031 0.061198 -0.049562 0.064436 -0.300420 0.494730 -0.411527 0.109242 0.375255 -2.184482 0.717733 -0.673064 0.751681 -0.092543 0.438016 -0.024881 0.250085 0.096010 0.452618 0.265491 0.374299 0.820424 0.238176 -0.059646 1.214061 0.028679 0.797567 1.614444 -0.094717 -0.408067 0.299198 -0.021561 0.231915 0.084190 0.199192 0.201132 0.148509 0.035431 -0.203352 0.264744 -0.319785 0.150305 0.184628 0.074637 0.148340 0.357372 -0.241250 0.119294 0.204413 0.458730 -0.190477 0.416587 0.084216 -0.363361 -0.310339 0.309728 -0.549677 -0.449092 0.183025 -0.259015 -0.000883 0.267255 -0.188068 0.577697 0.049310 0.746401 0.565829 0.178270 0.709983 -0.348012 0.273262 -0.873288 -0.403100 0.720072 -0.428076 0.488246 0.248152 -0.313214 -0.323137 0.414843 0.308909 0.134180 0.732275 0.535639 -0.056128 1.128355 -0.449151 3.879123 -2.303860 1.712549 -0.074407 -1.162052 0.848316 1.262031 0.009320 1.017563 1.978597 -0.001637 3.782223 -4.119563 -3.666488 0.345244 0.869998 0.635321]<line_sep>self.linear=[30.5645 29.4398 30.6045 29.7996 30.6242 30.1622 30.8136 30.2300 30.8705 30.4612 30.9252 30.7841 31.0166 31.0209 31.1974 31.1143 31.4978 31.3647 31.7560 31.5369 31.8101 31.7531 32.0748 32.0156 32.3865 32.2681 32.7096 33.0072 33.1923 33.6418 33.5422 33.8913 34.1209 34.4881 34.8662 35.1572 35.7296 36.0497 36.5059 37.1355 37.5903 38.1166 38.7343 39.3280 39.6554 40.2381 40.6790 40.8139 41.4568 41.5899 42.0101 42.2098 42.5968 42.9587 43.5399 44.6671 44.9120 46.8211 47.5970 49.2685 50.1216 52.3035 53.0793 53.9617 54.8672 55.8508 56.6601 56.8143 58.0120 58.7207 59.6367 61.0185 61.6062 62.5340 63.6476 64.9008 66.6111 68.1523 69.5202 71.8973 74.2401 76.4292 79.4451 82.2565 83.5742 86.0459 88.4339 90.2303 92.8482 94.4275 95.3052 95.7843 99.0907 98.4482 98.8116 99.6022 100.8501 101.6200 103.2388 104.1496 105.0356 106.0004 106.5053 108.1115 108.3908 109.5247 110.8845 108.7823 110.8731 110.6483 112.7925 113.3620 115.0249 115.7499 117.1040 118.0474 119.6345 120.8257 122.2796 124.2618 125.4596 126.2859 128.8713 129.7024 131.7856 134.7947 135.5081 135.9008 137.2216 138.0681 139.3158 140.3008 141.4989 142.6515 143.7646 144.7034 145.3353 146.6198 147.0497 148.2154 149.3254 150.3517 151.4426 152.8413 153.3807 154.4956 155.6413 157.1905 157.7834 159.3158 160.2634 160.7103 161.1903 162.5497 162.6491 163.0170 164.1590 164.7009 165.6327 166.8881 167.5223 169.2507 170.1536 172.1342 173.7217 174.8900 176.7480 177.1267 178.4733 178.1031 178.5799 180.4281 180.7118 182.3518 183.5132 184.0231 184.4852 185.9911 187.2658 188.3677 190.2644 191.8561 192.6716 195.1492 195.3209 201.7039 198.9875 202.7744 203.0621 202.7257 204.6580 207.3287 208.1154 210.5164 213.9986 214.8278 221.0086 215.8405 212.3258 213.5990 215.7497]<line_sep>self.yr=[]<line_sep>self.arroots=[-1.4442+0j]<line_sep>self.maroots=[-1.1394+0j -5.1019+0j]<line_sep>self.hqic=510.1902<line_sep>self.aic=503.5069<line_sep>self.bic=520.0234<line_sep># TODO: Document source for these non-used results # (and why they are not used) # self.tvalues = [11.97, -5.846, 7.631, 1.634] # self.pvalues = [5.21e-33, 5.03e-9, 2.32e-14, .1022] # cov_params = np.array([ # [0.0620096, -0.00172172, 0.00181301, 0.00103271], # [0, 9.69682e-04, -9.70767e-04, -8.99814e-04], # [0, 0, 0.00698068, -0.00443871], # [0, 0, 0, 0.00713662]]) # self.cov_params = cov_params + cov_params.T - \ # np.diag(np.diag(cov_params)) # self.bse = np.sqrt(np.diag(self.cov_params)) self.forecast=forecast_results['fc112c_css'][-25:]<line_sep>self.forecasterr=forecast_results['fc112cse_css'][-25:]<line_sep>self.forecast_dyn=forecast_results['fc112cdyn_css']<line_sep>self.forecasterr_dyn=forecast_results['fc112cdynse_css']<block_end><block_end><block_end>
<import_from_stmt>clpy.manipulation.basic *# NOQA
<import_stmt>logging<def_stmt>get_logger # Use the root logger <block_start><return>logging.getLogger()<block_end>
''' show_config.py IOSXE parsers for the following show command * show configuration lock '''<line_sep># Python <import_stmt>re<line_sep># Metaparser <import_from_stmt>genie.metaparser MetaParser<import_from_stmt>genie.metaparser.util.schemaengine Schema Optional Any<line_sep># ================================================== # Parser for 'show configuration lock' # ================================================== <class_stmt>ShowConfigurationLockSchema(MetaParser)<block_start>""" Schema for show configuration lock """<line_sep>schema={Optional('config_session_lock'):{Optional('owner_pid'):{Any():{'tty_number':int 'tty_username':str 'user_debug_info':str 'lock_active_time_in_sec':int }}} Optional('parser_configure_lock'):{Optional('owner_pid'):{Any():{Optional('user'):str Optional('tty'):int Optional('type'):str Optional('state'):str Optional('class'):str Optional('count'):int Optional('pending_requests'):int Optional('user_debug_info'):str Optional('session_idle_state'):str Optional('num_of_exec_cmds_executed'):int Optional('num_of_exec_cmds_blocked'):int Optional('config_wait_for_show_completion'):str Optional('remote_ip_address'):str Optional('lock_active_time_in_sec'):int Optional('lock_expiration_timer_in_sec'):int }}}}<block_end><class_stmt>ShowConfigurationLock(ShowConfigurationLockSchema)<block_start>""" Parser for show configuration lock"""<line_sep>cli_command='show configuration lock'<def_stmt>cli self output=<none><block_start><if_stmt>output<is><none># execute command to get output <block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end># initial variables ret_dict={}<line_sep>parser_lock_found=<false><line_sep># Owner PID : -1 # Owner PID : 543 # Owner PID :10 p1=re.compile(r'^\s*Owner +PID +: *(?P<owner_pid>(\-)?\d+)$')<line_sep># TTY number : 2 p2=re.compile(r'^\s*TTY +number +: +(?P<tty_number>\d+)$')<line_sep># TTY username : Test1 p3=re.compile(r'^\s*TTY +username +: +(?P<tty_username>\S+)$')<line_sep># User debug info : CLI Session Lock p4=re.compile(r'^\s*User +debug +info +: '<concat>'+(?P<user_debug_info>(\w+ *)+)$')<line_sep># Look Active time (in Sec) : 63 p5=re.compile(r'^\s*Lock +(a|A)ctive +time +\(in +Sec\) +: '<concat>'+(?P<lock_active_time_in_sec>\d+)$')<line_sep># Parser Configure Lock p6=re.compile(r'^\s*Parser +Configure +Lock$')<line_sep># User : User1 # User:User1 p7=re.compile(r'^\s*User *: *(?P<user>\S+)$')<line_sep># TTY : 3 # TTY:3 p8=re.compile(r'^\s*TTY *: *(?P<tty>(\-)?\d+)$')<line_sep># Type : EXCLUSIVE # Type:EXCLUSIVE p9=re.compile(r'^\s*Type *: *(?P<type>[\w\W]+)$')<line_sep># State : LOCKED # State:LOCKED p10=re.compile(r'^\s*State *: *(?P<state>\S+)$')<line_sep># Class : Exposed # Class:Exposed p11=re.compile(r'^\s*Class *: *(?P<class_name>\S+)$')<line_sep># Count : 0 # Count:0 p12=re.compile(r'^\s*Count *: *(?P<count>\d+)$')<line_sep># Pending Requests : 0 # Pending Requests:0 p13=re.compile(r'^\s*Pending +Requests *: '<concat>'*(?P<pending_requests>\d+)$')<line_sep># User debug info : 0 # User debug info:0 p14=re.compile(r'^\s*User +debug +info *: '<concat>'*(?P<user_debug_info>[\w\W]+)$')<line_sep># Session idle state : TRUE p15=re.compile(r'^Session +idle +state *: *(?P<session_idle_state>[\w]+)$')<line_sep># No of exec cmds getting executed : 0 p16=re.compile(r'^No +of +exec +cmds +getting +executed *: *(?P<num_of_exec_cmds_executed>\d+)$')<line_sep># No of exec cmds blocked : 0 p17=re.compile(r'^No +of +exec +cmds +blocked *: *(?P<num_of_exec_cmds_blocked>\d+)$')<line_sep># Config wait for show completion : FALSE p18=re.compile(r'^Config +wait +for +show +completion *: *(?P<config_wait_for_show_completion>[\w]+)$')<line_sep># Remote ip address : Unknown p19=re.compile(r'^Remote +ip +address *: *(?P<remote_ip_address>[\w]+)$')<line_sep># Lock Expiration timer (in Sec) : 593 p20=re.compile(r'^Lock +Expiration +timer +\(in +Sec\) *: *(?P<lock_expiration_timer_in_sec>[\w]+)$')<for_stmt>line out.splitlines()<block_start>line=line.strip()<if_stmt><not>parser_lock_found# Owner PID : 513 <block_start>m=p1.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>config_session_lock=ret_dict.setdefault('config_session_lock' {}).setdefault('owner_pid' {}).setdefault(int(group['owner_pid']) {})<line_sep><continue><block_end># TTY number : 2 m=p2.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>config_session_lock.update({'tty_number':int(group['tty_number'])})<line_sep><continue><block_end># TTY username : Test1 m=p3.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>config_session_lock.update({'tty_username':group['tty_username']})<line_sep><continue><block_end># User debug info : CLI Session Lock m=p4.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>config_session_lock.update({'user_debug_info':group['user_debug_info']})<line_sep><continue><block_end># Lock Active time (in Sec) : 63 m=p5.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>config_session_lock.update({'lock_active_time_in_sec':int(group['lock_active_time_in_sec'])})<line_sep><continue><block_end># Parser Configure Lock m=p6.match(line)<if_stmt>m<block_start>parser_lock_found=<true><line_sep><continue><block_end><block_end><else_stmt># Owner PID : 10 <block_start>m=p1.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock=ret_dict.setdefault('parser_configure_lock' {}).setdefault('owner_pid' {}).setdefault(int(group['owner_pid']) {})<line_sep><continue><block_end># User : User1 m=p7.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'user':group['user']})<line_sep><continue><block_end># TTY : 3 m=p8.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'tty':int(group['tty'])})<line_sep><continue><block_end># Type : Exclusive m=p9.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'type':group['type']})<line_sep><continue><block_end># State : Locked m=p10.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'state':group['state']})<line_sep><continue><block_end># Class : Exposed m=p11.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'class':group['class_name']})<line_sep><continue><block_end># Count : 0 m=p12.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'count':int(group['count'])})<line_sep><continue><block_end># Pending Requests : 0 m=p13.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'pending_requests':int(group['pending_requests'])})<line_sep><continue><block_end># User debug info : 0 m=p14.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'user_debug_info':group['user_debug_info']})<line_sep><continue><block_end># Session idle state : TRUE m=p15.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'session_idle_state':group['session_idle_state']})<line_sep><continue><block_end># No of exec cmds getting executed : 0 m=p16.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'num_of_exec_cmds_executed':int(group['num_of_exec_cmds_executed'])})<line_sep><continue><block_end># No of exec cmds blocked : 0 m=p17.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'num_of_exec_cmds_blocked':int(group['num_of_exec_cmds_blocked'])})<line_sep><continue><block_end># Config wait for show completion : FALSE m=p18.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'config_wait_for_show_completion':group['config_wait_for_show_completion']})<line_sep><continue><block_end># Remote ip address : Unknown m=p19.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'remote_ip_address':group['remote_ip_address']})<line_sep><continue><block_end># Lock Expiration timer (in Sec) : 593 m=p20.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'lock_expiration_timer_in_sec':int(group['lock_expiration_timer_in_sec'])})<line_sep><continue><block_end># Lock Active time (in Sec) : 63 m=p5.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>parser_configure_lock.update({'lock_active_time_in_sec':int(group['lock_active_time_in_sec'])})<line_sep><continue><block_end><block_end><block_end><return>ret_dict<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>l1tBmtfAlgoSelector=cms.EDProducer('L1TBMTFAlgoSelector' # verbose = cms.untracked.bool(False), bmtfKalman=cms.InputTag("simKBmtfDigis:BMTF") bmtfLegacy=cms.InputTag("simBmtfDigis:BMTF") feds=cms.InputTag("rawDataCollector"))<line_sep>
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>inspect isclass<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>pyro.distributions.util broadcast_shape<class_stmt>Exp(nn.Module)<block_start>""" a custom module for exponentiation of tensors """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self val<block_start><return>torch.exp(val)<block_end><block_end><class_stmt>ConcatModule(nn.Module)<block_start>""" a custom module for concatenation of tensors """<def_stmt>__init__ self allow_broadcast=<false><block_start>self.allow_broadcast=allow_broadcast<line_sep>super().__init__()<block_end><def_stmt>forward self *input_args# we have a single object <block_start><if_stmt>len(input_args)<eq>1# regardless of type, # we don't care about single objects # we just index into the object <block_start>input_args=input_args[0]<block_end># don't concat things that are just single objects <if_stmt>torch.is_tensor(input_args)<block_start><return>input_args<block_end><else_stmt><block_start><if_stmt>self.allow_broadcast<block_start>shape=broadcast_shape(*[s.shape[:-1]<for>s input_args])+(-1 )<line_sep>input_args=[s.expand(shape)<for>s input_args]<block_end><return>torch.cat(input_args dim=-1)<block_end><block_end><block_end><class_stmt>ListOutModule(nn.ModuleList)<block_start>""" a custom module for outputting a list of tensors from a list of nn modules """<def_stmt>__init__ self modules<block_start>super().__init__(modules)<block_end><def_stmt>forward self *args **kwargs# loop over modules in self, apply same args <block_start><return>[mm.forward(*args **kwargs)<for>mm self]<block_end><block_end><def_stmt>call_nn_op op<block_start>""" a helper function that adds appropriate parameters when calling an nn module representing an operation like Softmax :param op: the nn.Module operation to instantiate :return: instantiation of the op module with appropriate parameters """<if_stmt>op<in>[nn.Softmax nn.LogSoftmax]<block_start><return>op(dim=1)<block_end><else_stmt><block_start><return>op()<block_end><block_end><class_stmt>MLP(nn.Module)<block_start><def_stmt>__init__ self mlp_sizes activation=nn.ReLU output_activation=<none> post_layer_fct=<lambda>layer_ix total_layers layer:<none> post_act_fct=<lambda>layer_ix total_layers layer:<none> allow_broadcast=<false> use_cuda=<false> # init the module object <block_start>super().__init__()<assert_stmt>len(mlp_sizes)<ge>2 "Must have input and output layer sizes defined"<line_sep># get our inputs, outputs, and hidden input_size,hidden_sizes,output_size=(mlp_sizes[0] mlp_sizes[1:-1] mlp_sizes[-1] )<line_sep># assume int or list <assert_stmt>isinstance(input_size (int list tuple)) "input_size must be int, list, tuple"<line_sep># everything in MLP will be concatted if it's multiple arguments last_layer_size=input_size<if>type(input_size)<eq>int<else>sum(input_size)<line_sep># everything sent in will be concatted together by default all_modules=[ConcatModule(allow_broadcast)]<line_sep># loop over l <for_stmt>layer_ix,layer_size enumerate(hidden_sizes)<block_start><assert_stmt>type(layer_size)<eq>int "Hidden layer sizes must be ints"<line_sep># get our nn layer module (in this case nn.Linear by default) cur_linear_layer=nn.Linear(last_layer_size layer_size)<line_sep># for numerical stability -- initialize the layer properly cur_linear_layer.weight.data.normal_(0 0.001)<line_sep>cur_linear_layer.bias.data.normal_(0 0.001)<line_sep># use GPUs to share data during training (if available) <if_stmt>use_cuda<block_start>cur_linear_layer=nn.DataParallel(cur_linear_layer)<block_end># add our linear layer all_modules.append(cur_linear_layer)<line_sep># handle post_linear post_linear=post_layer_fct(layer_ix+1 len(hidden_sizes) all_modules[-1])<line_sep># if we send something back, add it to sequential # here we could return a batch norm for example <if_stmt>post_linear<is><not><none><block_start>all_modules.append(post_linear)<block_end># handle activation (assumed no params -- deal with that later) all_modules.append(activation())<line_sep># now handle after activation post_activation=post_act_fct(layer_ix+1 len(hidden_sizes) all_modules[-1])<line_sep># handle post_activation if not null # could add batch norm for example <if_stmt>post_activation<is><not><none><block_start>all_modules.append(post_activation)<block_end># save the layer size we just created last_layer_size=layer_size<block_end># now we have all of our hidden layers # we handle outputs <assert_stmt>isinstance(output_size (int list tuple)) "output_size must be int, list, tuple"<if_stmt>type(output_size)<eq>int<block_start>all_modules.append(nn.Linear(last_layer_size output_size))<if_stmt>output_activation<is><not><none><block_start>all_modules.append(call_nn_op(output_activation)<if>isclass(output_activation)<else>output_activation)<block_end><block_end><else_stmt># we're going to have a bunch of separate layers we can spit out (a tuple of outputs) <block_start>out_layers=[]<line_sep># multiple outputs? handle separately <for_stmt>out_ix,out_size enumerate(output_size)# for a single output object, we create a linear layer and some weights <block_start>split_layer=[]<line_sep># we have an activation function split_layer.append(nn.Linear(last_layer_size out_size))<line_sep># then we get our output activation (either we repeat all or we index into a same sized array) act_out_fct=(output_activation<if><not>isinstance(output_activation (list tuple))<else>output_activation[out_ix])<if_stmt>act_out_fct# we check if it's a class. if so, instantiate the object # otherwise, use the object directly (e.g. pre-instaniated) <block_start>split_layer.append(call_nn_op(act_out_fct)<if>isclass(act_out_fct)<else>act_out_fct)<block_end># our outputs is just a sequential of the two out_layers.append(nn.Sequential(*split_layer))<block_end>all_modules.append(ListOutModule(out_layers))<block_end># now we have all of our modules, we're ready to build our sequential! # process mlps in order, pretty standard here self.sequential_mlp=nn.Sequential(*all_modules)<block_end># pass through our sequential for the output! <def_stmt>forward self *args **kwargs<block_start><return>self.sequential_mlp.forward(*args **kwargs)<block_end><block_end>
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_stmt>compas.geometry Point<import_from_stmt>Rhino.Geometry NurbsSurface<as>RhinoNurbsSurface<import_from_stmt>._primitives point_to_rhino<import_from_stmt>._primitives point_to_compas<def_stmt>surface_to_compas_data surface<block_start>"""Convert a Rhino surface to a COMPAS surface. Parameters ---------- surface: :class:`Rhino.Geometry.Surface` Returns ------- :obj:`dict` """<line_sep>surface=surface.ToNurbsSurface()<line_sep>points=[]<line_sep>weights=[]<for_stmt>j range(surface.Points.VCount)<block_start>_points=[]<line_sep>_weights=[]<for_stmt>i range(surface.Points.UCount)<block_start>point=surface.Points.GetPoint(i j)<line_sep>weight=surface.Points.GetWeight(i j)<line_sep>_points.append(point_to_compas(point))<line_sep>_weights.append(weight)<block_end>points.append(_points)<line_sep>weights.append(_weights)<block_end>u_knots=[]<line_sep>u_mults=[]<for_stmt>index range(surface.KnotsU.Count)<block_start>u_knots.append(surface.KnotsU.Item[index])<line_sep>u_mults.append(surface.KnotsU.KnotMultiplicity(index))<block_end>v_knots=[]<line_sep>v_mults=[]<for_stmt>index range(surface.KnotsV.Count)<block_start>v_knots.append(surface.KnotsV.Item[index])<line_sep>v_mults.append(surface.KnotsV.KnotMultiplicity(index))<block_end>u_degree=surface.OrderU-1<line_sep>v_degree=surface.OrderV-1<line_sep>is_u_periodic=<false><line_sep>is_v_periodic=<false><line_sep><return>{'points':[[point.data<for>point row]<for>row points] 'weights':weights 'u_knots':u_knots 'v_knots':v_knots 'u_mults':u_mults 'v_mults':v_mults 'u_degree':u_degree 'v_degree':v_degree 'is_u_periodic':is_u_periodic 'is_v_periodic':is_v_periodic}<block_end><def_stmt>data_to_rhino_surface data<block_start>"""Convert a COMPAS surface to a Rhino surface. Parameters ---------- data: :obj:`dict` Returns ------- :class:`Rhino.Geometry.NurbsSurface` """<line_sep>points=[[Point.from_data(point)<for>point row]<for>row data['points']]<line_sep>nu=len(points[0])<line_sep>nv=len(points)<line_sep>nurbs=RhinoNurbsSurface.Create(3 <false> data['u_degree']+1 data['v_degree']+1 nu nv)<for_stmt>i range(nu)<block_start><for_stmt>j range(nv)<block_start>nurbs.Points.SetPoint(i j point_to_rhino(points[j][i]))<line_sep>nurbs.Points.SetWeight(i j data['weights'][j][i])<block_end><block_end>u_knotvector=[]<for_stmt>knot,mult zip(data['u_knots'] data['u_mults'])<block_start><for_stmt>i range(mult)<block_start>u_knotvector.append(knot)<block_end><block_end><for_stmt>index,knot enumerate(u_knotvector)<block_start>nurbs.KnotsU.Item[index]=knot<block_end>v_knotvector=[]<for_stmt>knot,mult zip(data['v_knots'] data['v_mults'])<block_start><for_stmt>i range(mult)<block_start>v_knotvector.append(knot)<block_end><block_end><for_stmt>index,knot enumerate(v_knotvector)<block_start>nurbs.KnotsV.Item[index]=knot<block_end><return>nurbs<block_end>
<import_stmt>logging<import_from_stmt>typing List<import_stmt>pytorch_lightning<as>pl<import_from_stmt>hydra.utils instantiate<import_from_stmt>omegaconf DictConfig<import_from_stmt>nuplan.planning.script.builders.utils.utils_type validate_type<line_sep>logger=logging.getLogger(__name__)<def_stmt>build_callbacks cfg:DictConfig<arrow>List[pl.Callback]<block_start>""" Build callbacks based on config. :param cfg: Dict config. :return List of callbacks. """<line_sep>logger.info('Building callbacks...')<line_sep>instantiated_callbacks=[]<for_stmt>callback_type cfg.values()<block_start>callback:pl.Callback=instantiate(callback_type)<line_sep>validate_type(callback pl.Callback)<line_sep>instantiated_callbacks.append(callback)<block_end>logger.info('Building callbacks...DONE!')<line_sep><return>instantiated_callbacks<block_end>
# -*- coding: utf-8 -*- <import_stmt>math<import_stmt>random<import_stmt>gym<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>memory EpisodicReplayMemory<import_from_stmt>model ActorCritic<import_from_stmt>utils state_to_tensor<line_sep># Knuth's algorithm for generating Poisson samples <def_stmt>_poisson lmbd<block_start>L,k,p=math.exp(-lmbd) 0 1<while_stmt>p<g>L<block_start>k<augadd>1<line_sep>p<augmul>random.uniform(0 1)<block_end><return>max(k-1 0)<block_end># Transfers gradients from thread-specific model to shared model <def_stmt>_transfer_grads_to_shared_model model shared_model<block_start><for_stmt>param,shared_param zip(model.parameters() shared_model.parameters())<block_start><if_stmt>shared_param.grad<is><not><none><block_start><return><block_end>shared_param._grad=param.grad<block_end><block_end># Adjusts learning rate <def_stmt>_adjust_learning_rate optimiser lr<block_start><for_stmt>param_group optimiser.param_groups<block_start>param_group['lr']=lr<block_end><block_end># Updates networks <def_stmt>_update_networks args T model shared_model shared_average_model loss optimiser# Zero shared and local grads <block_start>optimiser.zero_grad()<line_sep>""" Calculate gradients for gradient descent on loss functions Note that math comments follow the paper, which is formulated for gradient ascent """<line_sep>loss.backward()<line_sep># Gradient L2 normalisation nn.utils.clip_grad_norm_(model.parameters() args.max_gradient_norm)<line_sep># Transfer gradients to shared model and update _transfer_grads_to_shared_model(model shared_model)<line_sep>optimiser.step()<if_stmt>args.lr_decay# Linearly decay learning rate <block_start>_adjust_learning_rate(optimiser max(args.lr<times>(args.T_max-T.value())/args.T_max 1e-32))<block_end># Update shared_average_model <for_stmt>shared_param,shared_average_param zip(shared_model.parameters() shared_average_model.parameters())<block_start>shared_average_param=args.trust_region_decay<times>shared_average_param+(1-args.trust_region_decay)<times>shared_param<block_end><block_end># Computes an "efficient trust region" loss (policy head only) based on an existing loss and two distributions <def_stmt>_trust_region_loss model distribution ref_distribution loss threshold g k<block_start>kl=-(ref_distribution<times>(distribution.log()-ref_distribution.log())).sum(1).mean(0)<line_sep># Compute dot products of gradients k_dot_g=(k<times>g).sum(1).mean(0)<line_sep>k_dot_k=(k<power>2).sum(1).mean(0)<line_sep># Compute trust region update <if_stmt>k_dot_k.item()<g>0<block_start>trust_factor=((k_dot_g-threshold)/k_dot_k).clamp(min=0).detach()<block_end><else_stmt><block_start>trust_factor=torch.zeros(1)<block_end># z* = g - max(0, (k^T∙g - δ) / ||k||^2_2)∙k trust_loss=loss+trust_factor<times>kl<line_sep><return>trust_loss<block_end># Trains model <def_stmt>_train args T model shared_model shared_average_model optimiser policies Qs Vs actions rewards Qret average_policies old_policies=<none><block_start>off_policy=old_policies<is><not><none><line_sep>action_size=policies[0].size(1)<line_sep>policy_loss,value_loss=0 0<line_sep># Calculate n-step returns in forward view, stepping backwards from the last state t=len(rewards)<for_stmt>i reversed(range(t))# Importance sampling weights ρ ← π(∙|s_i) / µ(∙|s_i); 1 for on-policy <block_start><if_stmt>off_policy<block_start>rho=policies[i].detach()/old_policies[i]<block_end><else_stmt><block_start>rho=torch.ones(1 action_size)<block_end># Qret ← r_i + γQret Qret=rewards[i]+args.discount<times>Qret<line_sep># Advantage A ← Qret - V(s_i; θ) A=Qret-Vs[i]<line_sep># Log policy log(π(a_i|s_i; θ)) log_prob=policies[i].gather(1 actions[i]).log()<line_sep># g ← min(c, ρ_a_i)∙∇θ∙log(π(a_i|s_i; θ))∙A single_step_policy_loss=-(rho.gather(1 actions[i]).clamp(max=args.trace_max)<times>log_prob<times>A.detach()).mean(0)# Average over batch # Off-policy bias correction <if_stmt>off_policy# g ← g + Σ_a [1 - c/ρ_a]_+∙π(a|s_i; θ)∙∇θ∙log(π(a|s_i; θ))∙(Q(s_i, a; θ) - V(s_i; θ) <block_start>bias_weight=(1-args.trace_max/rho).clamp(min=0)<times>policies[i]<line_sep>single_step_policy_loss<augsub>(bias_weight<times>policies[i].log()<times>(Qs[i].detach()-Vs[i].expand_as(Qs[i]).detach())).sum(1).mean(0)<block_end><if_stmt>args.trust_region# KL divergence k ← ∇θ0∙DKL[π(∙|s_i; θ_a) || π(∙|s_i; θ)] <block_start>k=-average_policies[i].gather(1 actions[i])/(policies[i].gather(1 actions[i])+1e-10)<if_stmt>off_policy<block_start>g=(rho.gather(1 actions[i]).clamp(max=args.trace_max)<times>A/(policies[i]+1e-10).gather(1 actions[i])+(bias_weight<times>(Qs[i]-Vs[i].expand_as(Qs[i]))/(policies[i]+1e-10)).sum(1)).detach()<block_end><else_stmt><block_start>g=(rho.gather(1 actions[i]).clamp(max=args.trace_max)<times>A/(policies[i]+1e-10).gather(1 actions[i])).detach()<block_end># Policy update dθ ← dθ + ∂θ/∂θ∙z* policy_loss<augadd>_trust_region_loss(model policies[i].gather(1 actions[i])+1e-10 average_policies[i].gather(1 actions[i])+1e-10 single_step_policy_loss args.trust_region_threshold g k)<block_end><else_stmt># Policy update dθ ← dθ + ∂θ/∂θ∙g <block_start>policy_loss<augadd>single_step_policy_loss<block_end># Entropy regularisation dθ ← dθ + β∙∇θH(π(s_i; θ)) policy_loss<augsub>args.entropy_weight<times>-(policies[i].log()<times>policies[i]).sum(1).mean(0)# Sum over probabilities, average over batch # Value update dθ ← dθ - ∇θ∙1/2∙(Qret - Q(s_i, a_i; θ))^2 Q=Qs[i].gather(1 actions[i])<line_sep>value_loss<augadd>((Qret-Q)<power>2/2).mean(0)# Least squares loss # Truncated importance weight ρ¯_a_i = min(1, ρ_a_i) truncated_rho=rho.gather(1 actions[i]).clamp(max=1)<line_sep># Qret ← ρ¯_a_i∙(Qret - Q(s_i, a_i; θ)) + V(s_i; θ) Qret=truncated_rho<times>(Qret-Q.detach())+Vs[i].detach()<block_end># Update networks _update_networks(args T model shared_model shared_average_model policy_loss+value_loss optimiser)<block_end># Acts and trains model <def_stmt>train rank args T shared_model shared_average_model optimiser<block_start>torch.manual_seed(args.seed+rank)<line_sep>env=gym.make(args.env)<line_sep>env.seed(args.seed+rank)<line_sep>model=ActorCritic(env.observation_space env.action_space args.hidden_size)<line_sep>model.train()<if_stmt><not>args.on_policy# Normalise memory capacity by number of training processes <block_start>memory=EpisodicReplayMemory(args.memory_capacity<floordiv>args.num_processes args.max_episode_length)<block_end>t=1# Thread step counter done=<true># Start new episode <while_stmt>T.value()<le>args.T_max# On-policy episode loop <block_start><while_stmt><true># Sync with shared model at least every t_max steps <block_start>model.load_state_dict(shared_model.state_dict())<line_sep># Get starting timestep t_start=t<line_sep># Reset or pass on hidden state <if_stmt>done<block_start>hx,avg_hx=torch.zeros(1 args.hidden_size) torch.zeros(1 args.hidden_size)<line_sep>cx,avg_cx=torch.zeros(1 args.hidden_size) torch.zeros(1 args.hidden_size)<line_sep># Reset environment and done flag state=state_to_tensor(env.reset())<line_sep>done,episode_length=<false> 0<block_end><else_stmt># Perform truncated backpropagation-through-time (allows freeing buffers after backwards call) <block_start>hx=hx.detach()<line_sep>cx=cx.detach()<block_end># Lists of outputs for training policies,Qs,Vs,actions,rewards,average_policies=[] [] [] [] [] []<while_stmt><not>done<and>t-t_start<l>args.t_max# Calculate policy and values <block_start>policy,Q,V,(hx cx)=model(state (hx cx))<line_sep>average_policy,_,_,(avg_hx avg_cx)=shared_average_model(state (avg_hx avg_cx))<line_sep># Sample action action=torch.multinomial(policy 1)[0 0]<line_sep># Step next_state,reward,done,_=env.step(action.item())<line_sep>next_state=state_to_tensor(next_state)<line_sep>reward=args.reward_clip<and>min(max(reward -1) 1)<or>reward# Optionally clamp rewards done=done<or>episode_length<ge>args.max_episode_length# Stop episodes at a max length episode_length<augadd>1# Increase episode counter <if_stmt><not>args.on_policy# Save (beginning part of) transition for offline training <block_start>memory.append(state action reward policy.detach())# Save just tensors <block_end># Save outputs for online training [arr.append(el)<for>arr,el zip((policies Qs Vs actions rewards average_policies) (policy Q V torch.LongTensor([[action]]) torch.Tensor([[reward]]) average_policy))]<line_sep># Increment counters t<augadd>1<line_sep>T.increment()<line_sep># Update state state=next_state<block_end># Break graph for last values calculated (used for targets, not directly as model outputs) <if_stmt>done# Qret = 0 for terminal s <block_start>Qret=torch.zeros(1 1)<if_stmt><not>args.on_policy# Save terminal state for offline training <block_start>memory.append(state <none> <none> <none>)<block_end><block_end><else_stmt># Qret = V(s_i; θ) for non-terminal s <block_start>_,_,Qret,_=model(state (hx cx))<line_sep>Qret=Qret.detach()<block_end># Train the network on-policy _train(args T model shared_model shared_average_model optimiser policies Qs Vs actions rewards Qret average_policies)<line_sep># Finish on-policy episode <if_stmt>done<block_start><break><block_end><block_end># Train the network off-policy when enough experience has been collected <if_stmt><not>args.on_policy<and>len(memory)<ge>args.replay_start# Sample a number of off-policy episodes based on the replay ratio <block_start><for_stmt>_ range(_poisson(args.replay_ratio))# Act and train off-policy for a batch of (truncated) episode <block_start>trajectories=memory.sample_batch(args.batch_size maxlen=args.t_max)<line_sep># Reset hidden state hx,avg_hx=torch.zeros(args.batch_size args.hidden_size) torch.zeros(args.batch_size args.hidden_size)<line_sep>cx,avg_cx=torch.zeros(args.batch_size args.hidden_size) torch.zeros(args.batch_size args.hidden_size)<line_sep># Lists of outputs for training policies,Qs,Vs,actions,rewards,old_policies,average_policies=[] [] [] [] [] [] []<line_sep># Loop over trajectories (bar last timestep) <for_stmt>i range(len(trajectories)-1)# Unpack first half of transition <block_start>state=torch.cat(tuple(trajectory.state<for>trajectory trajectories[i]) 0)<line_sep>action=torch.LongTensor([trajectory.action<for>trajectory trajectories[i]]).unsqueeze(1)<line_sep>reward=torch.Tensor([trajectory.reward<for>trajectory trajectories[i]]).unsqueeze(1)<line_sep>old_policy=torch.cat(tuple(trajectory.policy<for>trajectory trajectories[i]) 0)<line_sep># Calculate policy and values policy,Q,V,(hx cx)=model(state (hx cx))<line_sep>average_policy,_,_,(avg_hx avg_cx)=shared_average_model(state (avg_hx avg_cx))<line_sep># Save outputs for offline training [arr.append(el)<for>arr,el zip((policies Qs Vs actions rewards average_policies old_policies) (policy Q V action reward average_policy old_policy))]<line_sep># Unpack second half of transition next_state=torch.cat(tuple(trajectory.state<for>trajectory trajectories[i+1]) 0)<line_sep>done=torch.Tensor([trajectory.action<is><none><for>trajectory trajectories[i+1]]).unsqueeze(1)<block_end># Do forward pass for all transitions _,_,Qret,_=model(next_state (hx cx))<line_sep># Qret = 0 for terminal s, V(s_i; θ) otherwise Qret=((1-done)<times>Qret).detach()<line_sep># Train the network off-policy _train(args T model shared_model shared_average_model optimiser policies Qs Vs actions rewards Qret average_policies old_policies=old_policies)<block_end><block_end>done=<true><block_end>env.close()<block_end>
<import_from_stmt>opta.commands.init_templates.helpers dictionary_deep_set<import_from_stmt>opta.commands.init_templates.template TemplateVariable<line_sep>LOCATIONS=["australiaeast" "brazilsouth" "canadacentral" "centralus" "eastus" "eastus2" "francecentral" "germanywestcentral" "japaneast" "southafricanorth" "southcentralus" "southeastasia" "uksouth" "westeurope" "westus2" "westus3" ]<def_stmt>validate location_name:str<arrow>bool<block_start><return>location_name<in>LOCATIONS<block_end><def_stmt>apply d:dict v:str<arrow>dict<block_start>set_path=dictionary_deep_set(["providers" "azurerm" "location"])<line_sep>set_path(d v)<line_sep><return>d<block_end>indented_locations=[f"\t{location}"<for>location LOCATIONS]<line_sep>location_string="\n".join(indented_locations)<line_sep>azureLocationVariable=TemplateVariable(prompt="Azure location" applier=apply validator=validate error_message=f"Must be one of\n{location_string}" default_value="centralus" )<line_sep>
<import_stmt>sys<line_sep>sys.path.append("../")<import_from_stmt>appJar gui<line_sep>lid=0<def_stmt>add btn<block_start><global>lid<line_sep>app.openPage("Main Title" app.getSpinBox("spin"))<line_sep>app.addLabel(str(lid) str(lid))<line_sep>lid<augadd>1<line_sep>app.stopPage()<block_end>app=gui()<line_sep>app.setBg("DarkKhaki")<line_sep>app.setGeometry(280 400)<line_sep>app.startPagedWindow("Main Title")<line_sep>app.startPage()<line_sep>app.addLabel("l13" "Label 1")<line_sep>app.addSpinBoxRange("spin" 1 5)<line_sep>app.addButton("addLabel" add)<line_sep>app.stopPage()<line_sep>app.startPage()<line_sep>app.stopPage()<line_sep>app.startPage()<line_sep>app.addLabel("l3" "Label 3")<line_sep>app.stopPage()<line_sep>app.startPage()<line_sep>app.addLabel("l4" "Label 4")<line_sep>app.stopPage()<line_sep>app.stopPagedWindow()<line_sep>app.go()<line_sep>
# Copyright (c) 2018, Xilinx, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __author__="<NAME>"<line_sep>__copyright__="Copyright 2018, Xilinx"<line_sep>__email__="<EMAIL>"<import_stmt>contextlib<import_from_stmt>pynq DefaultHierarchy<import_from_stmt>.pipeline ColorConverter PixelPacker<import_from_stmt>.frontend VideoInFrontend VideoOutFrontend<import_from_stmt>.dma AxiVDMA<import_from_stmt>.common *<class_stmt>VideoIn(DefaultHierarchy)<block_start>"""Wrapper for the input video pipeline. This wrapper assumes the following pipeline structure and naming color_convert_in -> pixel_pack ->axi_vdma with vtc_in and axi_gpio_hdmiiin helper IP Attributes ---------- frontend : pynq.lib.video.HDMIInFrontend The HDMI frontend for signal detection color_convert : pynq.lib.video.ColorConverter The input color format converter pixel_pack : pynq.lib.video.PixelPacker Converts the input pixel size to that required by the VDMA """<line_sep>@staticmethod<def_stmt>checkhierarchy description<block_start><if_stmt>'frontend'<in>description['hierarchies']<block_start>frontend_dict=description['hierarchies']['frontend']<block_end><elif_stmt>'frontend'<in>description['ip']<block_start>frontend_dict=description['ip']['frontend']<block_end><else_stmt><block_start><return><false><block_end><return>('pixel_pack'<in>description['ip']<and>'color_convert'<in>description['ip']<and>description['ip']['pixel_pack']['driver']<eq>PixelPacker<and>description['ip']['color_convert']['driver']<eq>ColorConverter<and>issubclass(frontend_dict['driver'] VideoInFrontend))<block_end><def_stmt>__init__ self description vdma=<none><block_start>"""Initialise the drivers for the pipeline Parameters ---------- path : str name of the hierarchy containing all of the video blocks """<line_sep>super().__init__(description)<line_sep>ip_dict=self.description<line_sep>self._vdma=vdma<line_sep>self._color=self.color_convert<line_sep>self._pixel=self.pixel_pack<line_sep>self._hdmi=self.frontend<block_end><def_stmt>configure self pixelformat=PIXEL_BGR<block_start>"""Configure the pipeline to use the specified pixel format. If the pipeline is running it is stopped prior to the configuration being changed Parameters ---------- pixelformat : PixelFormat The pixel format to configure the pipeline for """<if_stmt>self._vdma.readchannel.running<block_start>self._vdma.readchannel.stop()<block_end>self._color.colorspace=pixelformat.in_color<line_sep>self._pixel.bits_per_pixel=pixelformat.bits_per_pixel<line_sep>self._hdmi.start()<line_sep>input_mode=self._hdmi.mode<line_sep>self._vdma.readchannel.mode=VideoMode(input_mode.width input_mode.height pixelformat.bits_per_pixel input_mode.fps)<line_sep><return>self._closecontextmanager()<block_end><def_stmt>start self<block_start>"""Start the pipeline """<line_sep>self._vdma.readchannel.start()<line_sep><return>self._stopcontextmanager()<block_end><def_stmt>stop self<block_start>"""Stop the pipeline """<line_sep>self._vdma.readchannel.stop()<block_end>@contextlib.contextmanager<def_stmt>_stopcontextmanager self<block_start>"""Context Manager to stop the VDMA at the end of the block """<line_sep><yield><line_sep>self.stop()<block_end>@contextlib.contextmanager<def_stmt>_closecontextmanager self<block_start>"""Context Manager to close the HDMI port at the end of the block """<line_sep><yield><line_sep>self.close()<block_end><def_stmt>close self<block_start>"""Uninitialise the drivers, stopping the pipeline beforehand """<line_sep>self.stop()<line_sep>self._hdmi.stop()<block_end>@property<def_stmt>colorspace self<block_start>"""The colorspace of the pipeline, can be changed without stopping the pipeline """<line_sep><return>self._color.colorspace<block_end>@colorspace.setter<def_stmt>colorspace self new_colorspace<block_start>self._color.colorspace=new_colorspace<block_end>@property<def_stmt>mode self<block_start>"""Video mode of the input """<line_sep><return>self._vdma.readchannel.mode<block_end>@property<def_stmt>cacheable_frames self<block_start>"""Whether frames should be cacheable or non-cacheable Only valid if a VDMA has been specified """<if_stmt>self._vdma<block_start><return>self._vdma.readchannel.cacheable_frames<block_end><else_stmt><block_start><raise>RuntimeError("No VDMA specified")<block_end><block_end>@cacheable_frames.setter<def_stmt>cacheable_frames self value<block_start><if_stmt>self._vdma<block_start>self._vdma.readchannel.cacheable_frames=value<block_end><else_stmt><block_start><raise>RuntimeError("No VDMA specified")<block_end><block_end><def_stmt>readframe self<block_start>"""Read a video frame See AxiVDMA.S2MMChannel.readframe for details """<line_sep><return>self._vdma.readchannel.readframe()<block_end><async_keyword><def_stmt>readframe_async self<block_start>"""Read a video frame See AxiVDMA.S2MMChannel.readframe for details """<line_sep><return><await>self._vdma.readchannel.readframe_async()<block_end><def_stmt>tie self output<block_start>"""Mirror the video input on to an output channel Parameters ---------- output : HDMIOut The output to mirror on to """<line_sep>self._vdma.readchannel.tie(output._vdma.writechannel)<block_end><block_end><class_stmt>VideoOut(DefaultHierarchy)<block_start>"""Wrapper for the output video pipeline. This wrapper assumes the following pipeline structure and naming axi_vdma -> pixel_unpack -> color_convert -> frontend with vtc_out and axi_dynclk helper IP Attributes ---------- frontend : pynq.lib.video.HDMIOutFrontend The HDMI frontend for mode setting color_convert : pynq.lib.video.ColorConverter The output color format converter pixel_unpack : pynq.lib.video.PixelPacker Converts the input pixel size to 24 bits-per-pixel """<line_sep>@staticmethod<def_stmt>checkhierarchy description<block_start><if_stmt>'frontend'<in>description['hierarchies']<block_start>frontend_dict=description['hierarchies']['frontend']<block_end><elif_stmt>'frontend'<in>description['ip']<block_start>frontend_dict=description['ip']['frontend']<block_end><else_stmt><block_start><return><false><block_end><return>('pixel_unpack'<in>description['ip']<and>'color_convert'<in>description['ip']<and>description['ip']['pixel_unpack']['driver']<eq>PixelPacker<and>description['ip']['color_convert']['driver']<eq>ColorConverter<and>issubclass(frontend_dict['driver'] VideoOutFrontend))<block_end><def_stmt>__init__ self description vdma=<none><block_start>"""Initialise the drivers for the pipeline Parameters ---------- path : str name of the hierarchy containing all of the video blocks """<line_sep>super().__init__(description)<line_sep>self._vdma=vdma<line_sep>self._color=self.color_convert<line_sep>self._pixel=self.pixel_unpack<line_sep>self._hdmi=self.frontend<block_end><def_stmt>configure self mode pixelformat=<none><block_start>"""Configure the pipeline to use the specified pixel format and size. If the pipeline is running it is stopped prior to the configuration being changed Parameters ---------- mode : VideoMode The video mode to output pixelformat : PixelFormat The pixel format to configure the pipeline for """<if_stmt>self._vdma.writechannel.running<block_start>self._vdma.writechannel.stop()<block_end><if_stmt>pixelformat<is><none><block_start><if_stmt>mode.bits_per_pixel<eq>8<block_start>pixelformat=PIXEL_GRAY<block_end><elif_stmt>mode.bits_per_pixel<eq>24<block_start>pixelformat=PIXEL_BGR<block_end><elif_stmt>mode.bits_per_pixel<eq>32<block_start>pixelformat=PIXEL_RGBA<block_end><else_stmt><block_start><raise>ValueError("No default pixel format for ${mode.bits_per_pixel} bpp")<block_end><block_end><if_stmt>pixelformat.bits_per_pixel<ne>mode.bits_per_pixel<block_start><raise>ValueError("Video mode and pixel format have different sized pixels")<block_end>self._color.colorspace=pixelformat.out_color<line_sep>self._pixel.bits_per_pixel=pixelformat.bits_per_pixel<line_sep>self._hdmi.mode=mode<line_sep>self._vdma.writechannel.mode=mode<line_sep>self._hdmi.start()<line_sep><return>self._closecontextmanager()<block_end><def_stmt>start self<block_start>"""Start the pipeline """<line_sep>self._vdma.writechannel.start()<line_sep><return>self._stopcontextmanager()<block_end><def_stmt>stop self<block_start>"""Stop the pipeline """<line_sep>self._vdma.writechannel.stop()<block_end><def_stmt>close self<block_start>"""Close the pipeline an unintialise the drivers """<line_sep>self.stop()<line_sep>self._hdmi.stop()<block_end>@contextlib.contextmanager<def_stmt>_stopcontextmanager self<block_start>"""Context Manager to stop the VDMA at the end of the block """<line_sep><yield><line_sep>self.stop()<block_end>@contextlib.contextmanager<def_stmt>_closecontextmanager self<block_start>"""Context Manager to close the HDMI port at the end of the block """<line_sep><yield><line_sep>self.close()<block_end>@property<def_stmt>colorspace self<block_start>"""Set the colorspace for the pipeline - can be done without stopping the pipeline """<line_sep><return>self._color.colorspace<block_end>@colorspace.setter<def_stmt>colorspace self new_colorspace<block_start>self._color.colorspace=new_colorspace<block_end>@property<def_stmt>mode self<block_start>"""The currently configured video mode """<line_sep><return>self._vdma.writechannel.mode<block_end>@property<def_stmt>cacheable_frames self<block_start>"""Whether frames should be cacheable or non-cacheable Only valid if a VDMA has been specified """<if_stmt>self._vdma<block_start><return>self._vdma.writechannel.cacheable_frames<block_end><else_stmt><block_start><raise>RuntimeError("No VDMA specified")<block_end><block_end>@cacheable_frames.setter<def_stmt>cacheable_frames self value<block_start><if_stmt>self._vdma<block_start>self._vdma.writechannel.cacheable_frames=value<block_end><else_stmt><block_start><raise>RuntimeError("No VDMA specified")<block_end><block_end><def_stmt>newframe self<block_start>"""Return an unintialised video frame of the correct type for the pipeline """<line_sep><return>self._vdma.writechannel.newframe()<block_end><def_stmt>writeframe self frame<block_start>"""Write the frame to the video output See AxiVDMA.MM2SChannel.writeframe for more details """<line_sep>self._vdma.writechannel.writeframe(frame)<block_end><async_keyword><def_stmt>writeframe_async self frame<block_start>"""Write the frame to the video output See AxiVDMA.MM2SChannel.writeframe for more details """<line_sep><await>self._vdma.writechannel.writeframe_async(frame)<block_end><block_end><class_stmt>HDMIWrapper(DefaultHierarchy)<block_start>"""Hierarchy driver for the entire video subsystem. Exposes the input, output and video DMA as attributes. For most use cases the wrappers for the input and output pipelines are sufficient and the VDMA will not need to be used directly. Attributes ---------- hdmi_in : pynq.lib.video.HDMIIn The HDMI input pipeline hdmi_out : pynq.lib.video.HDMIOut The HDMI output pipeline axi_vdma : pynq.lib.video.AxiVDMA The video DMA. """<line_sep>@staticmethod<def_stmt>checkhierarchy description<block_start>in_pipeline=<none><line_sep>out_pipeline=<none><line_sep>dma=<none><for_stmt>hier,details description['hierarchies'].items()<block_start><if_stmt>details['driver']<eq>VideoIn<block_start>in_pipeline=hier<block_end><elif_stmt>details['driver']<eq>VideoOut<block_start>out_pipeline=hier<block_end><block_end><for_stmt>ip,details description['ip'].items()<block_start><if_stmt>details['driver']<eq>AxiVDMA<block_start>dma=ip<block_end><block_end><return>(in_pipeline<is><not><none><and>out_pipeline<is><not><none><and>dma<is><not><none>)<block_end><def_stmt>__init__ self description<block_start>super().__init__(description)<line_sep>in_pipeline=<none><line_sep>out_pipeline=<none><line_sep>dma=<none><for_stmt>hier,details description['hierarchies'].items()<block_start><if_stmt>details['driver']<eq>VideoIn<block_start>in_pipeline=hier<block_end><elif_stmt>details['driver']<eq>VideoOut<block_start>out_pipeline=hier<block_end><block_end><for_stmt>ip,details description['ip'].items()<block_start><if_stmt>details['driver']<eq>AxiVDMA<block_start>dma=ip<block_end><block_end>getattr(self in_pipeline)._vdma=getattr(self dma)<line_sep>getattr(self out_pipeline)._vdma=getattr(self dma)<block_end><block_end>
<try_stmt><block_start><import_stmt>uctypes<block_end><except_stmt>ImportError<block_start>print("SKIP")<line_sep><raise>SystemExit<block_end>data=bytearray(b'01234567')<line_sep>print(uctypes.bytes_at(uctypes.addressof(data) 4))<line_sep>print(uctypes.bytearray_at(uctypes.addressof(data) 4))<line_sep>
""" .. currentmodule:: pylayers.antprop.diffRT .. autosummary:: :members: """<import_from_future_stmt> print_function<import_stmt>doctest<import_stmt>os<import_stmt>glob<line_sep>#!/usr/bin/python # -*- coding: latin1 -*- <import_stmt>numpy<as>np<import_stmt>scipy.special<as>sps<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pdb<def_stmt>diff fGHz phi0 phi si sd N mat0 matN beta=np.pi/2 mode='tab' debug=<false><block_start>""" Luebbers Diffration coefficient for Ray tracing Parameters ---------- Nf : number of frequencies Nr : number of rays fGHz : np.array (Nf) phi0 : np.array (Nr) phi : np.array (Nr) si : np.array (Nr) sd : np.array (Nr) N: np.array (Nb) mat0 : Mat matN : Mat beta : np.array (Nb) skew incidence angle (rad) mode : str ( 'tab','exact') if 'tab': the Fresnel function is interpolated ( increase speed) if 'exact': the Fresnel function is computed for each values ( increase accuracy) (see FreF) Returns ------- Ds : numpy array Diffraction soft Dh : numpy array Diffraction hard Examples -------- .. plot:: :include-source: >>> import numpy as np >>> from pylayers.antprop.slab import * >>> Nf=3 >>> Nr=10 >>> Nb=5 >>> fGHz = np.linspace(0,10,Nf) >>> N = np.linspace(1,10,Nb)#320/180. >>> phi0 = np.linspace(0.01,2*np.pi-0.01,Nr)#40*np.pi/180. >>> phi = np.linspace(0.01,2*np.pi-0.01,Nr) >>> dm = MatDB() >>> mat0 = dm['METAL'] >>> matN = dm['METAL'] >>> si = 10000.*np.ones(Nr) >>> sd = 1.*np.ones(Nr) >>> plt.ion() >>> Ds,Dh,D1,D2,D3,D4 = diff(fGHz,phi0,phi,si,sd,N,mat0,matN) """<if_stmt><not>isinstance(fGHz np.ndarray)<block_start>fGHz=np.array([fGHz])<block_end><if_stmt><not>isinstance(phi0 np.ndarray)<block_start>phi0=np.array([phi0])<block_end><if_stmt><not>isinstance(phi np.ndarray)<block_start>phi=np.array([phi])<block_end><if_stmt><not>isinstance(si np.ndarray)<block_start>si=np.array([si])<block_end><if_stmt><not>isinstance(sd np.ndarray)<block_start>sd=np.array([sd])<block_end><if_stmt><not>isinstance(N np.ndarray)<block_start>N=np.array([N])<block_end><if_stmt><not>isinstance(beta np.ndarray)<block_start>beta=np.array([beta])<block_end>fGHz=fGHz[: <none>]<line_sep>phi0=phi0[<none> :]<line_sep>phi=phi[<none> :]<line_sep>si=si[<none> :]<line_sep>sd=sd[<none> :]<line_sep>N=N[<none> :]<line_sep>beta=beta[<none> :]<line_sep>L=si<times>sd/(si+sd)<line_sep>k=2<times>np.pi<times>fGHz/0.3<line_sep>#-------------------------------------------------- # R on faces 'o' and 'n' #-------------------------------------------------- tho=np.empty((fGHz.shape[0] phi.shape[1]))<line_sep>thn=np.empty((fGHz.shape[0] phi.shape[1]))<line_sep># PHI0 = phi0 * np.ones(phi.shape) # PHI = np.ones(phi0.shape)*phi # BN = np.ones(phi0.shape)*N c1=phi<g>phi0<line_sep>c2=~c1<line_sep>tho[: c1[0 :]]=phi0[: c1[0 :]]<line_sep>thn[: c1[0 :]]=N[: c1[0 :]]<times>np.pi-phi[: c1[0 :]]<line_sep>tho[: c2[0 :]]=phi[: c2[0 :]]<line_sep>thn[: c2[0 :]]=N[: c2[0 :]]<times>np.pi-phi0[: c2[0 :]]<line_sep>er0=np.real(mat0['epr'])<line_sep>err0=np.imag(mat0['epr'])<line_sep>ur0=np.real(mat0['mur'])<line_sep>urr0=np.imag(mat0['mur'])<line_sep>sigma0=mat0['sigma']<line_sep>deltah0=mat0['roughness']<line_sep>erN=np.real(matN['epr'])<line_sep>errN=np.imag(matN['epr'])<line_sep>urN=np.real(mat0['mur'])<line_sep>urrN=np.imag(mat0['mur'])<line_sep>sigmaN=matN['sigma']<line_sep>deltahN=matN['roughness']<line_sep>Rsofto,Rhardo=R(tho k er0 err0 sigma0 ur0 urr0 deltah0)<line_sep>Rsoftn,Rhardn=R(thn k erN errN sigmaN urN urrN deltahN)<line_sep>#-------------------------------------------------- # grazing angle Go et Gn #-------------------------------------------------- Gsofto,Gsoftn=G(N phi0 Rsofto Rsoftn)<line_sep>Ghardo,Ghardn=G(N phi0 Rhardo Rhardn)<line_sep>#-------------------------------------------------- #calcul des 4 termes du coeff diff #-------------------------------------------------- #by construction #0 < KLA < 2*k*L klamax=2<times>np.max(k)<times>np.max(L)<if_stmt>mode<eq>'tab'#xF0 = np.logspace(-6,-2,1000) #xF1 = np.logspace(-2,np.log10(klamax),1000) #xF = np.hstack((xF0,xF1)) #pdb.set_trace() # xF = np.logspace(-6,np.log10(klamax),1000) <block_start>xF=np.linspace(-8 np.log10(klamax) 2000)<line_sep>pxF=10<power>xF<line_sep>F=FreF(pxF)[0]<block_end><else_stmt><block_start>xF=[]<line_sep>F=[]<block_end>sign=1.0<line_sep>D1=Dfunc(sign k N phi-phi0 si sd xF F beta)<line_sep>sign=-1.0<line_sep>D2=Dfunc(sign k N phi-phi0 si sd xF F beta)<line_sep>sign=+1.0<line_sep>D3=Dfunc(sign k N phi+phi0 si sd xF F beta)<line_sep>sign=-1.0<line_sep>D4=Dfunc(sign k N phi+phi0 si sd xF F beta)<line_sep>#-------------------------------------- #n>=1 : exterior wedge #-------------------------------------- Dsoft=np.empty(np.shape(D1) dtype=complex)<line_sep>Dhard=np.empty(np.shape(D1) dtype=complex)<line_sep>#c1 = BN>=1.0 Dsoft=D1+D2+Rsoftn<times>D3+Rsofto<times>D4<line_sep>Dhard=D1+D2+Rhardn<times>D3+Rhardo<times>D4<line_sep># Dsoft = D2-D4 # Dhard = D2+D4 #Dsoft = D1+D2-D3-D4 #Dhard = D1+D2+D3+D4 # Dsoft = Gsoftn*(D1+Rsoftn*D3)+Gsofto*(D2+Rsofto*D4) # Dhard = Ghardn*(D1+Rhardn*D3)+Ghardo*(D2+Rhardo*D4) # c1 = abs(Gsoftn+1.0) < 1e-6 # c2 = abs(Gsofto+1.0) < 1e-6 # c3 = abs(Ghardn+1.0) < 1e-6 # c4 = abs(Ghardo+1.0) < 1e-6 # # Dsoft[c1]= 0.5*(D1[c1]+D3[c1])+Gsofto[c1]*(D2[c1]+Rsofto[c1]*D4[c1]) # Dsoft[c2]= Gsoftn[c2]*(D1[c2]+Rsoftn[c2]*D3[c2])+0.5*(D2[c2]+D4[c2]) # Dhard[c3]= 0.5*(D1[c3]+D3[c3])+Ghardo[c3]*(D2[c3]+Rhardo[c3]*D4[c3]) # Dhard[c4]= Ghardn[c4]*(D1[c4]+Rhardn[c4]*D3[c4])+0.5*(D2[c4]+D4[c4]) #-------------------------------------- #traitement des cas ou Go (ou Gn) = -1 #-------------------------------------- # if (abs(Gsoftn+1.0) < 1e-6): # DTsoft = 0.5*(D1+D3)+Gsofto*(D2+Rsofto*D4) # # if (abs(Gsofto+1.0)<1e-6): # DTsoft = Gsoftn*(D1+Rsoftn*D3)+0.5*(D2+D4) # # if (abs(Ghardn+1.0) < 1.0e-6): # DThard = 0.5*(D1+D3)+Ghardo*(D2+Rhardo*D4) # # if (abs(Ghardo+1.0)<1e-6): # DThard = Ghardn*(D1+Rhardn*D3)+0.5*(D2+D4) # ##-------------------------------------- ##cas ou n<1 : interior wedge ##-------------------------------------- # else: # # thoz = N*np.pi-tho # thnz = N*np.pi-thn # # # [Rsoftnz,Rhardnz] = R(thnz,k,ero,erro,condo,uro,deltaho) # [Rsoftoz,Rhardoz] = R(thoz,k,ern,errn,condn,urn,deltahn) # # DTsoft = Rsoftoz*Rsoftnz*D1+Rsoftn*D3+(Rsofto*Rsoftn*D2+Rsofto*D4) # # DThard = Rhardoz*Rhardnz*D1+Rhardn*D3+(Rhardo*Rhardn*D2+Rhardo*D4) <if_stmt>np.isnan(Dsoft).any()<block_start>u=np.isnan(Dsoft)<line_sep>pdb.set_trace()<block_end><if_stmt>np.isnan(Dhard).any()<block_start>v=np.where(Dhard<eq>np.nan)<line_sep>pdb.set_trace()<block_end><if_stmt>debug<block_start><return>Dsoft Dhard D1 D2 D3 D4<block_end><else_stmt><block_start><return>Dsoft Dhard<block_end><block_end>#,D1,D2,D3,D4 <def_stmt>G N phi0 Ro Rn<block_start>""" grazing angle correction Parameters ---------- N : wedge parameter phi0 : incidence angle (rad) Ro : R coefficient on face o Rn : R coefficient on face n Luebbers 89 "a heuristique UTD slope diffraction coefficient for rough lossy wedges" """<if_stmt><not>isinstance(phi0 np.ndarray)<block_start>phi0=np.array([phi0])<block_end><if_stmt><not>isinstance(N np.ndarray)<block_start>N=np.array([N])<block_end>PHI0=phi0<times>np.ones(Ro.shape)<line_sep>BN=N<times>np.ones(Ro.shape)<line_sep># face o Go=np.ones(np.shape(Ro) dtype='complex')<line_sep>c1=(abs(PHI0)<l>1.0e-6)<times>(abs(Ro+1.0)<g>1.0e-6)<line_sep>c2=(abs(PHI0)<l>1.0e-6)<times>(abs(Ro+1.0)<l>1.0e-6)<line_sep>c3=abs(PHI0-BN<times>np.pi)<l>1.0e-6<line_sep>Go[c1]=1.0/(1.0+Ro[c1])<line_sep>Go[c2]=-1.<line_sep>Go[c3]=0.5<line_sep># face n Gn=np.ones(np.shape(Rn) dtype='complex')<line_sep>c1=(abs(PHI0-BN<times>np.pi)<l>1.0e-6)<times>(abs(Rn+1.0)<g>1.0e-6)<line_sep>c2=(abs(PHI0-BN<times>np.pi)<l>1.0e-6)<times>(abs(Rn+1.0)<l>1.0e-6)<line_sep>c3=abs(PHI0)<l>1.0e-6<line_sep>Gn[c1]=1.0/(1.0+Rn[c1])<line_sep>Gn[c2]=-1.<line_sep>Gn[c3]=0.5<line_sep><return>Go Gn<block_end><def_stmt>Dfunc sign k N dphi si sd xF=[] F=[] beta=np.pi/2<block_start>""" Parameters ---------- sign : int +1 | -1 k : wave number N : wedge parameter dphi : phi-phi0 or phi+phi0 si : distance source-D sd : distance D-observation beta : skew incidence angle xF : array support of Fresnel function. F : array Values of Fresnel function in regard of support if F =[], fresnel function is computed otherwise the passed interpolation F is used. Reference --------- [1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11 Notes ----- e-jnp.pi/4 1 Di= ------------------ * ----------- * F(kla) ([1] eq 25) 2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta) """<line_sep>cste=(1.0-1.0<times>1j)<times>(1.0/(4.0<times>N<times>np.sqrt(k<times>np.pi)<times>np.sin(beta)))<line_sep>rnn=(dphi+np.pi<times>sign)/(2.0<times>N<times>np.pi)<line_sep>nn=np.zeros(np.shape(rnn))<line_sep>nn[rnn<g>0.5]=1<line_sep>nn[rnn<g>1.5]=2<line_sep>nn[rnn<l>-0.5]=-1<line_sep>nn[rnn<l>-1.5]=-2<line_sep># KLA ref[1] eq 27 L=((si<times>sd)<times>np.sin(beta)<power>2)/(1.<times>(si+sd))<line_sep>AC=np.cos((2.0<times>N<times>nn<times>np.pi-dphi)/2.0)<line_sep>A=2<times>AC<power>2<line_sep>KLA=k<times>L<times>A<line_sep>epsi=AC<times>2.0<line_sep>angle=(np.pi+sign<times>dphi)/(2.0<times>N)<line_sep>tan=np.tan(angle)<line_sep>Di=np.empty(KLA.shape)<if_stmt>len(F)<eq>0<block_start>Fkla,ys,yL=FreF(KLA)<block_end><else_stmt>#pxF = 10**xF #uF = (np.abs(KLA[:,:]-pxF[:,None,None])).argmin(axis=0) <block_start>val=np.maximum(np.log10(np.abs(KLA))-xF[0 <none> <none>] 0)<line_sep>uF2=(len(F)-1)<times>(val)/(xF[-1 <none> <none>]-xF[0 <none> <none>])<line_sep>uF2_int=np.floor(uF2).astype('int')<line_sep>Fkla=F[uF2_int]<line_sep>#if np.max(Fkla) > 1: # Warning('diffRT : Fkla tab probably wrong') <block_end># 4.56 Mac Namara <try_stmt><block_start>Di=-cste<times>Fkla/tan<block_end><except_stmt><block_start>print('tan=0 : It can happen')<line_sep>pdb.set_trace()<block_end>c5=np.where(np.abs(tan)<l>1e-9)<line_sep>BL=np.ones(Di.shape)<times>L<line_sep>Di[: c5]=0.5<times>np.sqrt(BL[c5])<line_sep># if np.isinf(Di).any(): # pdb.set_trace() <return>(Di)<block_end><def_stmt>FresnelI x<block_start>""" calculates Fresnel integral Parameters ---------- x : array real argument """<line_sep>v=np.empty(x.shape dtype=complex)<line_sep>y=np.abs(x)<line_sep>z=.25<times>y<line_sep>u1=np.where(z<g>1)<line_sep>u2=np.where(z<le>1)<line_sep>y1=y[u1]<line_sep>y2=y[u2]<line_sep>d1=np.cos(y1)<line_sep>d2=np.cos(y2)<line_sep>e1=np.sin(y1)<line_sep>e2=np.sin(y2)<line_sep>z1=z[u1]<line_sep>z2=z[u2]<line_sep>c1=np.sqrt(z1)<line_sep>c2=np.sqrt(z2)<line_sep># ---------------------------------------- # x>4, z>1 # ---------------------------------------- v1=0.5-0.5<times>1j<line_sep>c1=(1.0)/c1<line_sep>z1=c1<times>c1<line_sep>a1=((((((((((.23393900e-3<times>z1-.12179300e-2)<times>z1+.21029670e-2)<times>z1+.2464200e-3)<times>z1-.67488730e-2)<times>z1+.11948809e-1)<times>z1-.9497136e-2)<times>z1+.68989200e-3)<times>z1+.57709560e-2)<times>z1+.3936000e-5)<times>z1-.24933975e-1)<times>z1<times>c1<line_sep>b1=(((((((((((.838386000e-3<times>z1-.55985150e-2)<times>z1+.16497308e-1)<times>z1-.27928955e-1)<times>z1+.29064067e-1)<times>z1-.17122914e-1)<times>z1+.19032180e-2)<times>z1+.48514660e-2)<times>z1+.23006000e-4)<times>z1-.93513410e-2)<times>z1+.23000000e-7)<times>z1+.19947114000)<times>c1<line_sep># ---------------------------------------- # x<4, z<1 # ---------------------------------------- a2=(((((((((((0.34404779e-1<times>z2-0.15023096)<times>z2-0.25639041e-1)<times>z2+0.850663781)<times>z2-0.75752419e-1)<times>z2-0.305048566e1)<times>z2-0.16898657e-1)<times>z2+0.6920691902e1)<times>z2-0.576361e-3)<times>z2-0.6808568854e1)<times>z2-0.1702e-5)<times>z2+0.159576914e1)<times>c2<line_sep>b2=(((((((((((.19547031e-1<times>z2-.216195929e0)<times>z2+.702222016e0)<times>z2-.4033492760e0)<times>z2-.1363729124e1)<times>z2-.138341947e0)<times>z2+.5075161298e1)<times>z2-.952089500e-2)<times>z2-.778002040e1)<times>z2-.928100000e-4)<times>z2+.4255387524e1)<times>z2-.33000000e-7)<times>c2<line_sep>w1=a1<times>d1+b1<times>e1+1j<times>(b1<times>d1-a1<times>e1)+v1<line_sep>w2=a2<times>d2+b2<times>e2+1j<times>(b2<times>d2-a2<times>e2)<line_sep>v[u1]=w1<line_sep>v[u2]=w2<line_sep>y=v<times>(np.sqrt(np.pi/2.0))<line_sep><return>y<block_end><def_stmt>FreF x<block_start>""" F function from Pathack Parameters ---------- x : array real argument Examples -------- .. plot:: :include-source: >>> import matplotlib.pyplot as plt >>> import numpy as np >>> x = np.logspace(-4,2,400); >>> F = FreF(x) >>> plt.semilogx(x,,np.abs(F)) >>> plt.grid() """<line_sep>ejp4=np.exp(1j<times>np.pi/4)<line_sep>emjp4=np.exp(-1j<times>np.pi/4)<line_sep>y=np.empty(x.shape dtype=complex)<line_sep>u1=np.where(x<g>10)[0]<line_sep>u2=np.where(x<le>10)[0]<line_sep>xu1=x[u1]<line_sep>xu2=x[u2]<line_sep>x2=xu1<times>xu1<line_sep>x3=x2<times>xu1<line_sep>x4=x3<times>xu1<line_sep>w1=1-0.75/x2+4.6875/x4+1j<times>(0.5/xu1-1.875/x3)<line_sep>cst=(1.0-1j)<times>0.5<times>np.sqrt(np.pi/2)<line_sep>carx=abs(xu2)<line_sep>racx=np.sqrt(carx)<line_sep>modx=np.mod(xu2 2<times>np.pi)<line_sep>expjx=np.exp(1j<times>modx)<line_sep>fr=FresnelI(carx)<line_sep>into=cst-fr<line_sep>w2=2.0<times>racx<times>1j<times>expjx<times>into<line_sep>y[u1]=w1<line_sep>y[u2]=w2<line_sep># [1] eq 30 ys=(np.sqrt(np.pi<times>x)-2<times>x<times>ejp4-(2/3.)<times>x<power>2<times>emjp4)<times>np.exp(1j<times>(np.pi/4+x))<line_sep>yl=1-0.75/(x<times>x)+4.6875/(x<times>x<times>x<times>x)+1j<times>(0.5/x-1.875/(x<times>x<times>x))<line_sep><return>y ys yl<block_end><def_stmt>FreF2 x<block_start>""" F function using numpy fresnel function Parameters ---------- Not working for large argument """<line_sep>y=np.empty(x.shape dtype=complex)<line_sep>u1=np.where(x<g>5)[0]<line_sep>u2=np.where(x<le>5)[0]<line_sep>xu1=x[u1]<line_sep>xu2=x[u2]<line_sep>x2=xu1<times>xu1<line_sep>x3=x2<times>xu1<line_sep>x4=x3<times>xu1<line_sep>w1=1-0.75/x2+4.6875/x4+1j<times>(0.5/xu1-1.875/x3)<line_sep>cst=np.sqrt(np.pi/2.)<line_sep>sF,cF=sps.fresnel(np.sqrt(xu2/cst))<line_sep>Fc=(0.5-cF)<times>cst<line_sep>Fs=(0.5-sF)<times>cst<line_sep>modx=np.mod(xu2 2<times>np.pi)<line_sep>expjx=np.exp(1j<times>modx)<line_sep>w2=2<times>1j<times>np.sqrt(xu2)<times>expjx<times>(Fc-1j<times>Fs)<line_sep>y[u1]=w1<line_sep>y[u2]=w2<line_sep><return>(y)<block_end><def_stmt>R th k er err sigma ur urr deltah<block_start>""" R coeff Parameters ---------- th : np.array incidence angle (axe 0) k : np.array wave number (axe 1) er : real part of permittivity err : imaginary part of permittivity sigma : conductivity ur : real part of permeability urr : imaginary part of permeability deltah : height standard deviation Examples -------- .. plot:: :include-source: >>> import numpy as np >>> th = np.linspace(0,np.pi/2,180)[None,:] >>> fGHz = 0.3 >>> lamda = 0.3/fGHz >>> k = np.array([2*np.pi/2])[:,None] >>> Rs,Rh = R(th,k,9,0,0.01,1,0,0) """<line_sep>cel=299792458<line_sep>#-------------------------------------------- #cas des surfaces dielectriques (sinon er=-1) #-------------------------------------------- <if_stmt>(er<ge>0.0)<block_start><if_stmt>(((ur-1.0)<l>1e-16)&((er-1.0)<l>1e-16))<block_start>Rs=np.zeros(len(th) dtype=complex)<line_sep>Rh=np.zeros(len(th) dtype=complex)<block_end>u1=np.where(th<ge>1.5<times>np.pi)<line_sep>u2=np.where(th<ge>np.pi)<line_sep>u3=np.where(th<ge>0.5<times>np.pi)<line_sep>th[u1]=2.0<times>np.pi-th[u1]<line_sep>th[u2]=th[u2]-np.pi<line_sep>th[u3]=np.pi-th[u3]<line_sep>#if (th >= 1.5*np.pi ): # th = 2.0*np.pi - th #elif (th >= np.pi ): # th = th - np.pi #elif (th >= 0.5*np.pi): # th = np.pi - th uo=4.0<times>np.pi<times>1e-7<line_sep>eo=1.0/(uo<times>cel<times>cel)<line_sep>pulse=k<times>cel<line_sep>permi=(er-1j<times>err)-(1j<times>sigma)/(pulse<times>eo)<line_sep>perme=ur-1j<times>urr<line_sep>yy=(permi/perme)<line_sep>st=np.sin(th)<line_sep>ct=np.cos(th)<line_sep>bb=np.sqrt(yy-ct<power>2)<line_sep>Rs=(st-bb)/(st+bb)<line_sep>Rh=(yy<times>st-bb)/(yy<times>st+bb)<block_end><else_stmt># metalic case <block_start>Rs=-np.ones(th.shape dtype=complex)<line_sep>Rh=np.ones(th.shape dtype=complex)<block_end>roughness=1.0<line_sep>Rs=Rs<times>roughness<line_sep>Rh=Rh<times>roughness<line_sep><return>Rs Rh<block_end>
"""Unit tests for DatastoreBase."""<import_stmt>datetime<import_stmt>unittest<import_from_stmt>apscheduler.schedulers.blocking BlockingScheduler<import_from_stmt>ndscheduler.corescheduler constants<import_from_stmt>ndscheduler.corescheduler.datastore.providers.sqlite DatastoreSqlite<class_stmt>DatastoreBaseTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>fake_scheduler=BlockingScheduler()<line_sep>self.store=DatastoreSqlite.get_instance()<line_sep>self.store.start(fake_scheduler <none>)<block_end><def_stmt>test_add_execution_get_execution self<block_start>eid='12345'<line_sep>job_id='321'<line_sep>self.store.add_execution(eid job_id state=constants.EXECUTION_STATUS_SCHEDULED)<line_sep>execution=self.store.get_execution(eid)<line_sep>self.assertEqual(execution['execution_id'] eid)<block_end><def_stmt>test_update_execution_get_execution self<block_start>eid='12346'<line_sep>job_id='321'<line_sep>self.store.add_execution(eid job_id state=constants.EXECUTION_STATUS_SCHEDULED)<line_sep>self.store.update_execution(eid state=constants.EXECUTION_STATUS_RUNNING)<line_sep>execution=self.store.get_execution(eid)<line_sep>self.assertEqual(execution['execution_id'] eid)<line_sep>self.assertEqual(execution['state'] constants.EXECUTION_STATUS_DICT[constants.EXECUTION_STATUS_RUNNING])<block_end><def_stmt>test_get_executions_by_time_interval self<block_start>now=datetime.datetime.now()<line_sep>start_time=(now+datetime.timedelta(minutes=20)).isoformat()<line_sep>end_time=(now+datetime.timedelta(minutes=100)).isoformat()<line_sep>self.store.add_execution('12' '34' state=constants.EXECUTION_STATUS_SCHEDULED scheduled_time=now+datetime.timedelta(minutes=5))<line_sep>self.store.add_execution('13' '34' state=constants.EXECUTION_STATUS_SCHEDULED scheduled_time=now+datetime.timedelta(minutes=50))<line_sep>self.store.add_execution('14' '34' state=constants.EXECUTION_STATUS_SCHEDULED scheduled_time=now+datetime.timedelta(minutes=70))<line_sep>self.store.add_execution('15' '34' state=constants.EXECUTION_STATUS_SCHEDULED scheduled_time=now+datetime.timedelta(minutes=120))<line_sep>executions=self.store.get_executions(start_time end_time)<line_sep>self.assertEqual(len(executions['executions']) 2)<block_end><def_stmt>test_add_audit_log_get_audit_logs self<block_start>job_id='234'<line_sep>job_name='asdfs'<line_sep>event=constants.AUDIT_LOG_ADDED<line_sep>user='aa'<line_sep>description='hihi'<line_sep>self.store.add_audit_log(job_id job_name event user=user description=description)<line_sep>now=datetime.datetime.utcnow()<line_sep>five_min_ago=now-datetime.timedelta(minutes=5)<line_sep>logs=self.store.get_audit_logs(five_min_ago.isoformat() now.isoformat())<line_sep>self.assertEqual(len(logs['logs']) 1)<block_end><block_end>
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>contextlib<import_stmt>logging<import_stmt>platform<import_stmt>sys<import_stmt>unittest<import_from_stmt>py_trace_event trace_time<class_stmt>TimerTest(unittest.TestCase)# Helper methods. <block_start>@contextlib.contextmanager<def_stmt>ReplacePlatformProcessorCall self f<block_start><try_stmt><block_start>old_proc=platform.processor<line_sep>platform.processor=f<line_sep><yield><block_end><finally_stmt><block_start>platform.processor=old_proc<block_end><block_end>@contextlib.contextmanager<def_stmt>ReplaceQPCCheck self f<block_start><try_stmt><block_start>old_qpc=trace_time.IsQPCUsable<line_sep>trace_time.IsQPCUsable=f<line_sep><yield><block_end><finally_stmt><block_start>trace_time.IsQPCUsable=old_qpc<block_end><block_end># Platform detection tests. <def_stmt>testInitializeNowFunction_platformNotSupported self<block_start><with_stmt>self.assertRaises(RuntimeError)<block_start>trace_time.InitializeNowFunction('invalid_platform')<block_end><block_end><def_stmt>testInitializeNowFunction_windows self<block_start><if_stmt><not>(sys.platform.startswith(trace_time._PLATFORMS['windows'])<or>sys.platform.startswith(trace_time._PLATFORMS['cygwin']))<block_start><return><true><block_end>trace_time.InitializeNowFunction(sys.platform)<line_sep>self.assertTrue(trace_time.GetClock()<eq>trace_time._WIN_HIRES<or>trace_time.GetClock()<eq>trace_time._WIN_LORES)<block_end><def_stmt>testInitializeNowFunction_linux self<block_start><if_stmt><not>sys.platform.startswith(trace_time._PLATFORMS['linux'])<block_start><return><true><block_end>trace_time.InitializeNowFunction(sys.platform)<line_sep>self.assertEqual(trace_time.GetClock() trace_time._LINUX_CLOCK)<block_end><def_stmt>testInitializeNowFunction_mac self<block_start><if_stmt><not>sys.platform.startswith(trace_time._PLATFORMS['mac'])<block_start><return><true><block_end>trace_time.InitializeNowFunction(sys.platform)<line_sep>self.assertEqual(trace_time.GetClock() trace_time._MAC_CLOCK)<block_end># Windows Tests <def_stmt>testIsQPCUsable_buggyAthlonProcReturnsFalse self<block_start><if_stmt><not>(sys.platform.startswith(trace_time._PLATFORMS['windows'])<or>sys.platform.startswith(trace_time._PLATFORMS['cygwin']))<block_start><return><true><block_end><def_stmt>BuggyAthlonProc <block_start><return>'AMD64 Family 15 Model 23 Stepping 6, AuthenticAMD'<block_end><with_stmt>self.ReplacePlatformProcessorCall(BuggyAthlonProc)<block_start>self.assertFalse(trace_time.IsQPCUsable())<block_end><block_end><def_stmt>testIsQPCUsable_returnsTrueOnWindows self<block_start><if_stmt><not>(sys.platform.startswith(trace_time._PLATFORMS['windows'])<or>sys.platform.startswith(trace_time._PLATFORMS['cygwin']))<block_start><return><true><block_end><def_stmt>Proc <block_start><return>'Intel64 Family 15 Model 23 Stepping 6, GenuineIntel'<block_end><with_stmt>self.ReplacePlatformProcessorCall(Proc)<block_start>self.assertTrue(trace_time.IsQPCUsable())<block_end><block_end><def_stmt>testGetWinNowFunction_QPC self<block_start><if_stmt><not>(sys.platform.startswith(trace_time._PLATFORMS['windows'])<or>sys.platform.startswith(trace_time._PLATFORMS['cygwin']))<block_start><return><true><block_end># Test requires QPC to be available on platform. <if_stmt><not>trace_time.IsQPCUsable()<block_start><return><true><block_end>self.assertGreater(trace_time.monotonic() 0)<block_end># Works even if QPC would work. <def_stmt>testGetWinNowFunction_GetTickCount self<block_start><if_stmt><not>(sys.platform.startswith(trace_time._PLATFORMS['windows'])<or>sys.platform.startswith(trace_time._PLATFORMS['cygwin']))<block_start><return><true><block_end><with_stmt>self.ReplaceQPCCheck(<lambda>:<false>)<block_start>self.assertGreater(trace_time.monotonic() 0)<block_end><block_end># Linux tests. <def_stmt>testGetClockGetTimeClockNumber_linux self<block_start>self.assertEquals(trace_time.GetClockGetTimeClockNumber('linux') 1)<block_end><def_stmt>testGetClockGetTimeClockNumber_freebsd self<block_start>self.assertEquals(trace_time.GetClockGetTimeClockNumber('freebsd') 4)<block_end><def_stmt>testGetClockGetTimeClockNumber_bsd self<block_start>self.assertEquals(trace_time.GetClockGetTimeClockNumber('bsd') 3)<block_end><def_stmt>testGetClockGetTimeClockNumber_sunos self<block_start>self.assertEquals(trace_time.GetClockGetTimeClockNumber('sunos5') 4)<block_end># Smoke Test. <def_stmt>testMonotonic self<block_start>time_one=trace_time.Now()<for_stmt>_ xrange(1000)<block_start>time_two=trace_time.Now()<line_sep>self.assertLessEqual(time_one time_two)<line_sep>time_one=time_two<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.getLogger().setLevel(logging.DEBUG)<line_sep>unittest.main(verbosity=2)<block_end>
<import_stmt>dedupe<import_stmt>unittest<import_stmt>random<import_stmt>pytest<line_sep>SAMPLE=[({"name":"Bob" "age":"50"} {"name":"Charlie" "age":"75"}) ({"name":"Meredith" "age":"40"} {"name":"Sue" "age":"10"}) ({"name":"Willy" "age":"35"} {"name":"William" "age":"35"}) ({"name":"Jimmy" "age":"20"} {"name":"Jimbo" "age":"21"})]<class_stmt>ActiveLearningTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.data_model=dedupe.datamodel.DataModel([{'field':'name' 'type':'String'} {'field':'age' 'type':'String'}])<block_end><def_stmt>test_AL self<block_start>random.seed(1111111111110)<line_sep>original_N=len(SAMPLE)<line_sep>active_learner=dedupe.labeler.RLRLearner(self.data_model)<line_sep>active_learner.candidates=SAMPLE<assert_stmt>len(active_learner)<eq>original_N<line_sep>pair=active_learner.pop()<line_sep>print(pair)<assert_stmt>pair<eq>({"name":"Willy" "age":"35"} {"name":"William" "age":"35"})<assert_stmt>len(active_learner)<eq>original_N-1<line_sep>pair=active_learner.pop()<line_sep>print(pair)<assert_stmt>pair<eq>({"name":"Jimmy" "age":"20"} {"name":"Jimbo" "age":"21"})<assert_stmt>len(active_learner)<eq>original_N-2<line_sep>pair=active_learner.pop()<assert_stmt>pair<eq>({"name":"Meredith" "age":"40"} {"name":"Sue" "age":"10"})<assert_stmt>len(active_learner)<eq>original_N-3<line_sep>active_learner.pop()<with_stmt>pytest.raises(IndexError)<block_start>active_learner.pop()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright 2019-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Management of all events data. This module exists to all loaders. It can read events data through the DataLoader. This module also acts as a thread pool manager. """<import_stmt>abc<import_stmt>datetime<import_stmt>threading<import_stmt>time<import_stmt>os<import_from_stmt>typing Iterable Optional<import_from_stmt>mindinsight.datavisual.data_transform.summary_watcher SummaryWatcher<import_from_stmt>mindinsight.conf settings<import_from_stmt>mindinsight.datavisual.common exceptions<import_from_stmt>mindinsight.datavisual.common.enums CacheStatus<import_from_stmt>mindinsight.datavisual.common.log logger<import_from_stmt>mindinsight.datavisual.common.enums DataManagerStatus<import_from_stmt>mindinsight.datavisual.common.enums PluginNameEnum<import_from_stmt>mindinsight.datavisual.common.exceptions TrainJobNotExistError<import_from_stmt>mindinsight.datavisual.data_transform.loader_generators.loader_generator MAX_DATA_LOADER_SIZE<import_from_stmt>mindinsight.datavisual.data_transform.loader_generators.data_loader_generator DataLoaderGenerator<import_from_stmt>mindinsight.utils.computing_resource_mgr ComputingResourceManager<import_from_stmt>mindinsight.utils.exceptions MindInsightException<import_from_stmt>mindinsight.utils.exceptions ParamValueError<import_from_stmt>mindinsight.utils.exceptions UnknownError<import_from_stmt>mindinsight.datavisual.utils.tools exception_wrapper<class_stmt>_BasicTrainJob<block_start>""" Basic info about train job. Args: abs_summary_base_dir (str): The canonical path of summary base directory. It should be the return value of realpath(). entry (dict): The summary dir entry listed by SummaryWatcher. """<def_stmt>__init__ self abs_summary_base_dir entry<block_start>self._abs_summary_base_dir=abs_summary_base_dir<line_sep>self._entry=entry<block_end>@property<def_stmt>abs_summary_dir self<block_start>"""Get summary directory path."""<line_sep><return>os.path.realpath(os.path.join(self._abs_summary_base_dir self._entry['relative_path']))<block_end>@property<def_stmt>summary_base_dir self<block_start>"""Get summary base directory path."""<line_sep><return>self._abs_summary_base_dir<block_end>@property<def_stmt>train_id self<block_start>"""Get train id."""<line_sep><return>self._entry['relative_path']<block_end>@property<def_stmt>profiler_dir self<block_start>"""Get profiler directory path."""<if_stmt>self._entry['profiler']<is><not><none><block_start><return>self._entry['profiler']['directory']<block_end><return><none><block_end>@property<def_stmt>create_time self<block_start>"""Get create time."""<line_sep><return>self._entry['create_time']<block_end>@property<def_stmt>update_time self<block_start>"""Get update time."""<line_sep><return>self._entry['update_time']<block_end>@property<def_stmt>profiler_type self<block_start>"""Get profiler type"""<if_stmt>self._entry['profiler']<is><not><none><block_start><return>self._entry['profiler']['profiler_type']<block_end><return>''<block_end>@property<def_stmt>summary_files self<block_start>"""Get the summary files count in the summary dir."""<line_sep><return>self._entry['summary_files']<block_end>@property<def_stmt>graph_files self<block_start>"""Get the graph pb files count in the summary dir."""<line_sep><return>self._entry['graph_files']<block_end>@property<def_stmt>lineage_files self<block_start>"""Get the lineage files count in the summary dir."""<line_sep><return>self._entry['lineage_files']<block_end>@property<def_stmt>dump_dir self<block_start>"""Get the dump file path in the summary dir."""<line_sep><return>self._entry.get('dump_dir' <none>)<block_end><block_end><class_stmt>CachedTrainJob<block_start>""" Cache item for BriefCacheManager. DetailCacheManager will also wrap it's return value with this class. Args: basic_info (_BasicTrainJob): Basic info about the train job. """<def_stmt>__init__ self basic_info:_BasicTrainJob<block_start>self._basic_info=basic_info<line_sep>self._last_access_time=datetime.datetime.utcnow()<line_sep># Other cached content is stored here. self._content={}<line_sep>self._cache_status=CacheStatus.NOT_IN_CACHE<line_sep>self._key_locks={}<block_end>@property<def_stmt>cache_status self<block_start>"""Get cache status."""<line_sep><return>self._cache_status<block_end>@cache_status.setter<def_stmt>cache_status self value<block_start>"""Set cache status."""<line_sep>self._cache_status=value<block_end><def_stmt>update_access_time self<block_start>"""Update last access time of this cache item."""<line_sep>self._last_access_time=datetime.datetime.utcnow()<block_end>@property<def_stmt>last_access_time self<block_start>"""Get last access time for purposes such as LRU."""<line_sep><return>self._last_access_time<block_end>@property<def_stmt>abs_summary_dir self<block_start>"""Get summary directory path."""<line_sep><return>self._basic_info.abs_summary_dir<block_end>@property<def_stmt>summary_base_dir self<block_start>"""Get summary base directory path."""<line_sep><return>self._basic_info.summary_base_dir<block_end><def_stmt>set self key value<block_start>"""Set value to cache."""<line_sep>self._content[key]=value<block_end><def_stmt>delete self key raise_exception=<true><block_start>"""Delete key in cache."""<try_stmt><block_start>self._content.pop(key)<block_end><except_stmt>KeyError<block_start><if_stmt>raise_exception<block_start><raise>ParamValueError("Delete failed. Invalid cache key({}).".format(key))<block_end><block_end><block_end><def_stmt>get self key raise_exception=<true><block_start>""" Get value from cache. Args: key (str): Key of content. raise_exception (bool): If the key does not exist and raise_exception is True, it will raise an Exception. Returns: Union[Object, None], Return value if key in content, return False else if raise_exception is False. Raises: ParamValueError, if the key does not exist and raise_exception is True. """<try_stmt><block_start><return>self._content[key]<block_end><except_stmt>KeyError<block_start><if_stmt>raise_exception<block_start><raise>ParamValueError("Invalid cache key({}).".format(key))<block_end><return><none><block_end><block_end>@property<def_stmt>basic_info self<block_start>"""Get basic train job info."""<line_sep><return>self._basic_info<block_end>@basic_info.setter<def_stmt>basic_info self value<block_start>"""Set basic train job info."""<line_sep>self._basic_info=value<block_end><def_stmt>lock_key self key<block_start>"""Threading lock with given key."""<line_sep><return>self._key_locks.setdefault(key threading.Lock())<block_end>@property<def_stmt>train_id self<block_start>"""Get train id."""<line_sep><return>self._basic_info.train_id<block_end><block_end><class_stmt>TrainJob<block_start>""" Train job object. You must not create TrainJob objects manually. You should always get TrainJob objects from DataManager. Args: brief_train_job (CachedTrainJob): Brief info about train job. detail_train_job (Optional[CachedTrainJob]): Detailed info about train job. Default: None. """<def_stmt>__init__ self brief_train_job:CachedTrainJob detail_train_job:Optional[CachedTrainJob]=<none><block_start>self._brief=brief_train_job<line_sep>self._detail=detail_train_job<if_stmt>self._detail<is><none><block_start>self._cache_status=CacheStatus.NOT_IN_CACHE<block_end><else_stmt><block_start>self._cache_status=self._detail.cache_status<block_end><block_end><def_stmt>has_detail self<block_start>"""Whether this train job has detailed info in cache."""<line_sep><return>bool(self._detail<is><not><none>)<block_end><def_stmt>get_detail self key<block_start>""" Get detail content. Args: key (Any): Cache key. Returns: Any, cache content. Raises: TrainJobDetailNotInCacheError: when this train job has no detail cache. """<if_stmt><not>self.has_detail()<block_start><raise>exceptions.TrainJobDetailNotInCacheError()<block_end><return>self._detail.get(key)<block_end><def_stmt>get_brief self key<block_start>""" Get brief content. Args: key (Any): Cache key. Returns: Any, cache content. """<line_sep><return>self._brief.get(key)<block_end><def_stmt>get_basic_info self<block_start>""" Get basic info. Returns: basic_info (_BasicTrainJob): Basic info about the train job. """<line_sep><return>self._brief.basic_info<block_end>@property<def_stmt>cache_status self<block_start>"""Get cache status."""<line_sep><return>self._cache_status<block_end>@cache_status.setter<def_stmt>cache_status self cache_status<block_start>"""Set cache status."""<line_sep>self._cache_status=cache_status<block_end><block_end><class_stmt>BaseCacheItemUpdater(abc.ABC)<block_start>"""Abstract base class for other modules to update cache content."""<def_stmt>update_item self cache_item:CachedTrainJob<block_start>""" Update cache item in place. Args: cache_item (CachedTrainJob): The cache item to be processed. """<line_sep><raise>NotImplementedError()<block_end><block_end><class_stmt>_BaseCacheManager<block_start>"""Base class for cache manager."""<def_stmt>__init__ self summary_base_dir<block_start>self._summary_base_dir=summary_base_dir<line_sep># Use dict to remove duplicate updaters. self._updaters={}<line_sep># key is train_id self._lock=threading.Lock()<line_sep>self._cache_items={}<block_end><def_stmt>size self<block_start>"""Gets used cache slots."""<line_sep><return>len(self._cache_items)<block_end><def_stmt>register_cache_item_updater self updater:BaseCacheItemUpdater<block_start>"""Register cache item updater."""<line_sep>self._updaters[updater.__class__.__qualname__]=updater<block_end><def_stmt>get_train_jobs self<block_start>"""Get cached train jobs."""<line_sep>copied_train_jobs=dict(self._cache_items)<line_sep><return>copied_train_jobs<block_end><def_stmt>get_train_job self train_id<block_start>"""Get cached train job."""<try_stmt><block_start><return>self._cache_items[train_id]<block_end><except_stmt>KeyError<block_start><raise>TrainJobNotExistError(train_id)<block_end><block_end><def_stmt>cache_train_job self train_id<arrow>bool<block_start>""" Cache given train job and update train job's last access time. This method should return true if reload actions should be taken to cache the train job. Args: train_id (str): Train Id. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>delete_train_job self train_id<block_start>"""Delete train job from cache."""<if_stmt>train_id<in>self._cache_items<block_start><del_stmt>self._cache_items[train_id]<block_end><block_end><def_stmt>has_content self<block_start>"""Whether this cache manager has train jobs."""<line_sep><return>bool(self._cache_items)<block_end><def_stmt>update_cache self executor<block_start>""" Update cache according to given train jobs on disk. Different cache manager should implement different cache update policies in this method. Args: executor (Executor): The Executor instance. """<line_sep><raise>NotImplementedError()<block_end><block_end><class_stmt>_BriefCacheManager(_BaseCacheManager)<block_start>"""A cache manager that holds all disk train jobs on disk."""<def_stmt>__init__ self summary_base_dir<block_start>super(_BriefCacheManager self).__init__(summary_base_dir)<line_sep>self._summary_watcher=SummaryWatcher()<block_end><def_stmt>cache_train_job self train_id<block_start>""" Cache given train job. All disk train jobs are cached on every reload, so this method always return false. Args: train_id (str): Train Id. """<if_stmt>train_id<in>self._cache_items<block_start>self._cache_items[train_id].update_access_time()<block_end><return><false><block_end><def_stmt>update_cache self executor<block_start>"""Update cache."""<line_sep>logger.info('Start to update BriefCacheManager.')<line_sep>summaries_info=self._summary_watcher.list_summary_directories(self._summary_base_dir)<line_sep>basic_train_jobs=[]<for_stmt>info summaries_info<block_start>basic_train_jobs.append(_BasicTrainJob(abs_summary_base_dir=self._summary_base_dir entry=info))<block_end><with_stmt>self._lock<block_start>new_cache_items=self._merge_with_disk(basic_train_jobs)<line_sep>self._cache_items=new_cache_items<block_end><for_stmt>updater self._updaters.values()<block_start><for_stmt>cache_item self._cache_items.values()<block_start>updater.update_item(cache_item)<block_end><block_end><block_end><def_stmt>_merge_with_disk self disk_train_jobs:Iterable[_BasicTrainJob]<block_start>""" Merge train jobs in cache with train jobs from disk This method will remove train jobs not on disk. Call this function with lock for thread safety. Args: disk_train_jobs (Iterable[_BasicTrainJob]): Basic train jobs info from disk. Returns: dict, a dict containing train jobs to be cached. """<line_sep>new_cache_items={}<for_stmt>train_job disk_train_jobs<block_start><if_stmt>train_job.train_id<not><in>self._cache_items<block_start>new_cache_items[train_job.train_id]=CachedTrainJob(train_job)<block_end><else_stmt><block_start>reused_train_job=self._cache_items[train_job.train_id]<line_sep>reused_train_job.basic_info=train_job<line_sep>new_cache_items[train_job.train_id]=reused_train_job<block_end><block_end><return>new_cache_items<block_end><def_stmt>register_folder_analyzer self analyzer<block_start>"""Register folder analyzer."""<line_sep>self._summary_watcher.register_folder_analyzer(analyzer)<block_end>@property<def_stmt>cache_items self<block_start>"""Get cache items."""<line_sep><return>self._cache_items<block_end><block_end># Key for plugin tags. DATAVISUAL_PLUGIN_KEY="tag_mapping"<line_sep># Detail train job cache key for datavisual content. DATAVISUAL_CACHE_KEY="datavisual"<class_stmt>_DetailCacheManager(_BaseCacheManager)<block_start>"""A cache manager that holds detailed info for most recently used train jobs."""<def_stmt>__init__ self summary_base_dir<block_start>super().__init__(summary_base_dir)<line_sep>self._loader_pool={}<line_sep>self._deleted_id_list=[]<line_sep>self._loader_pool_mutex=threading.Lock()<line_sep>self._loader_generators=[DataLoaderGenerator(summary_base_dir)]<line_sep>self._loading_mutex=threading.Lock()<block_end><def_stmt>has_content self<block_start>"""Whether this cache manager has train jobs."""<line_sep><return>bool(self._loader_pool)<block_end><def_stmt>register_folder_analyzer self analyzer<block_start>"""Register folder analyzer."""<for_stmt>generator self._loader_generators<block_start>generator.register_folder_analyzer(analyzer)<block_end><block_end><def_stmt>size self<block_start>""" Get the number of items in this cache manager. To be implemented. Returns: int, the number of items in this cache manager. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>loader_pool_size self<block_start>"""Get loader pool size."""<line_sep><return>len(self._loader_pool)<block_end><def_stmt>update_cache self executor<block_start>""" Update cache. Will switch to using disk_train_jobs in the future. Args: executor (Executor): The Executor instance. """<with_stmt>self._loading_mutex<block_start>load_in_cache=exception_wrapper(self._execute_load_data)<try_stmt><block_start><while_stmt><not>load_in_cache(executor)<block_start><yield><block_end><block_end><except_stmt>UnknownError<as>ex<block_start>logger.warning("Load event data failed. Detail: %s." str(ex))<block_end><block_end><block_end><def_stmt>cache_train_job self train_id<block_start>"""Cache given train job."""<line_sep>loader=<none><line_sep>need_reload=<false><with_stmt>self._loader_pool_mutex<block_start><if_stmt>self._is_loader_in_loader_pool(train_id self._loader_pool)<block_start>loader=self._loader_pool.get(train_id)<block_end><if_stmt>loader<is><none><block_start><for_stmt>generator self._loader_generators<block_start>tmp_loader=generator.generate_loader_by_train_id(train_id)<if_stmt>loader<and>loader.latest_update_time<g>tmp_loader.latest_update_time<block_start><continue><block_end>loader=tmp_loader<block_end><if_stmt>loader<is><none><block_start><raise>TrainJobNotExistError(train_id)<block_end>self._add_loader(loader)<line_sep>need_reload=<true><block_end><block_end>self._update_loader_latest_update_time(loader.loader_id)<line_sep><return>need_reload<block_end><def_stmt>get_train_jobs self<block_start>""" Get train jobs To be implemented. """<block_end><def_stmt>_add_loader self loader<block_start>""" Add a loader to load data. Args: loader (LoaderStruct): A object of `Loader`. """<if_stmt>len(self._loader_pool)<ge>MAX_DATA_LOADER_SIZE<block_start>delete_number=len(self._loader_pool)-MAX_DATA_LOADER_SIZE+1<line_sep>sorted_loaders=sorted(self._loader_pool.items() key=<lambda>loader:loader[1].latest_update_time)<for_stmt>index range(delete_number)<block_start>delete_loader_id=sorted_loaders[index][0]<line_sep>self._delete_loader(delete_loader_id)<block_end><block_end>self._loader_pool.update({loader.loader_id:loader})<block_end><def_stmt>_delete_loader self loader_id<block_start>""" Delete loader from loader pool by loader id. Args: loader_id (str): ID of loader. """<if_stmt>self._loader_pool.get(loader_id)<is><not><none><block_start>logger.debug("delete loader %s" loader_id)<line_sep>self._loader_pool.pop(loader_id)<block_end><block_end><def_stmt>_execute_loader self loader_id executor<block_start>""" Load data form data_loader. If there is something wrong by loading, add logs and delete the loader. Args: loader_id (str): An ID for `Loader`. executor (Executor): The Executor instance. Returns: bool, True if the loader is finished loading. """<try_stmt><block_start><with_stmt>self._loader_pool_mutex<block_start>loader=self._loader_pool.get(loader_id <none>)<if_stmt>loader<is><none><block_start>logger.debug("Loader %r has been deleted, will not load data." loader_id)<line_sep><return><true><block_end><block_end>loader.cache_status=CacheStatus.CACHING<if_stmt>loader.data_loader.load(executor)# Update loader cache status to CACHED. # Loader with cache status CACHED should remain the same cache status. <block_start>loader.cache_status=CacheStatus.CACHED<line_sep><return><true><block_end><return><false><block_end><except_stmt>MindInsightException<as>ex<block_start>logger.warning("Data loader %r load data failed. "<concat>"Delete data_loader. Detail: %s" loader_id ex)<with_stmt>self._loader_pool_mutex<block_start>self._delete_loader(loader_id)<block_end><return><true><block_end><block_end><def_stmt>_generate_loaders self<block_start>"""This function generates the loader from given path."""<line_sep>loader_dict={}<for_stmt>generator self._loader_generators<block_start>loader_dict.update(generator.generate_loaders(self._loader_pool))<block_end>sorted_loaders=sorted(loader_dict.items() key=<lambda>loader:loader[1].latest_update_time)<line_sep>latest_loaders=sorted_loaders[-MAX_DATA_LOADER_SIZE:]<line_sep>self._deal_loaders(latest_loaders)<block_end><def_stmt>_deal_loaders self latest_loaders<block_start>""" This function determines which loaders to keep or remove or added. It is based on the given dict of loaders. Args: latest_loaders (list[dict]): A list of <loader_id: LoaderStruct>. """<with_stmt>self._loader_pool_mutex<block_start><for_stmt>loader_id,loader latest_loaders<block_start><if_stmt>self._loader_pool.get(loader_id <none>)<is><none><block_start>self._add_loader(loader)<line_sep><continue><block_end># If this loader was updated manually before, # its latest_update_time may bigger than update_time in summary. <if_stmt>self._loader_pool[loader_id].latest_update_time<l>loader.latest_update_time<block_start>self._update_loader_latest_update_time(loader_id loader.latest_update_time)<block_end><block_end><block_end><block_end><def_stmt>_execute_load_data self executor<block_start>"""Load data through multiple threads."""<line_sep>self._generate_loaders()<line_sep>loader_pool=self._get_snapshot_loader_pool()<line_sep>loaded=<true><for_stmt>loader_id loader_pool<block_start>loaded=self._execute_loader(loader_id executor)<and>loaded<block_end><return>loaded<block_end><def_stmt>delete_train_job self train_id<block_start>""" Delete train job with a train id. Args: train_id (str): ID for train job. """<with_stmt>self._loader_pool_mutex<block_start>self._delete_loader(train_id)<block_end><block_end><def_stmt>list_tensors self train_id tag<block_start>""" List tensors of the given train job and tag. If the tensor can not find by the given tag, will raise exception. Args: train_id (str): ID for train job. tag (str): The tag name. Returns: list, the NameTuple format is `collections.namedtuple('_Tensor', ['wall_time', 'event_step', 'value'])`. the value will contain the given tag data. """<line_sep>loader_pool=self._get_snapshot_loader_pool()<if_stmt><not>self._is_loader_in_loader_pool(train_id loader_pool)<block_start><raise>TrainJobNotExistError("Can not find the given train job in cache.")<block_end>data_loader=loader_pool[train_id].data_loader<line_sep>tensors=[]<try_stmt><block_start>events_data=data_loader.get_events_data()<line_sep>tensors=events_data.tensors(tag)<block_end><except_stmt>KeyError<block_start>error_msg="Can not find any data in this train job by given tag."<line_sep><raise>ParamValueError(error_msg)<block_end><except_stmt>AttributeError<block_start>logger.debug("Train job %r has been deleted or it has not loaded data, "<concat>"and set tags to empty list." train_id)<block_end><return>tensors<block_end><def_stmt>_check_train_job_exist self train_id loader_pool<block_start>""" Check train job exist, if not exist, will raise exception. Args: train_id (str): The given train job id. loader_pool (dict[str, LoaderStruct]): Refer to self._loader_pool. Raises: TrainJobNotExistError: Can not find train job in data manager. """<line_sep>is_exist=<false><if_stmt>train_id<in>loader_pool<block_start><return><block_end><for_stmt>generator self._loader_generators<block_start><if_stmt>generator.check_train_job_exist(train_id)<block_start>is_exist=<true><line_sep><break><block_end><block_end><if_stmt><not>is_exist<block_start><raise>TrainJobNotExistError("Can not find the train job in data manager.")<block_end><block_end><def_stmt>_is_loader_in_loader_pool self train_id loader_pool<block_start>""" Check train job exist, if not exist, return False. Else, return True. Args: train_id (str): The given train job id. loader_pool (dict): See self._loader_pool. Returns: bool, if loader in loader pool, return True. """<if_stmt>train_id<in>loader_pool<block_start><return><true><block_end><return><false><block_end><def_stmt>_get_snapshot_loader_pool self<block_start>""" Create a snapshot of data loader pool to avoid concurrent mutation and iteration issues. Returns: dict, a copy of `self._loader_pool`. """<with_stmt>self._loader_pool_mutex<block_start><return>dict(self._loader_pool)<block_end><block_end><def_stmt>get_train_job self train_id<block_start>""" Get train job by train ID. This method overrides parent method. Args: train_id (str): Train ID for train job. Returns: dict, single train job, if can not find any data, will return None. """<line_sep>self._check_train_job_exist(train_id self._loader_pool)<line_sep>loader=self._get_loader(train_id)<if_stmt>loader<is><none><block_start>logger.info("No valid summary log in train job %s, or it is not in the cache." train_id)<line_sep><return><none><block_end>train_job=loader.to_dict()<line_sep>train_job.pop('data_loader')<line_sep>plugin_data={}<for_stmt>plugin_name PluginNameEnum.list_members()<block_start>job=self.get_train_job_by_plugin(train_id plugin_name=plugin_name)<if_stmt>job<is><none><block_start>plugin_data[plugin_name]=[]<block_end><else_stmt><block_start>plugin_data[plugin_name]=job['tags']<block_end><block_end>train_job.update({DATAVISUAL_PLUGIN_KEY:plugin_data})<line_sep># Will fill basic_info value in future. train_job_obj=CachedTrainJob(basic_info=<none>)<line_sep>train_job_obj.set(DATAVISUAL_CACHE_KEY train_job)<line_sep>train_job_obj.cache_status=loader.cache_status<line_sep><return>train_job_obj<block_end><def_stmt>_get_loader self train_id<block_start>""" Get loader by train id. Args: train_id (str): Train Id. Returns: LoaderStruct, the loader. """<line_sep>loader=<none><with_stmt>self._loader_pool_mutex<block_start><if_stmt>self._is_loader_in_loader_pool(train_id self._loader_pool)<block_start>loader=self._loader_pool.get(train_id)<block_end><block_end><return>loader<block_end><def_stmt>_update_loader_latest_update_time self loader_id latest_update_time=<none><block_start>""" Update loader with latest_update_time. Args: loader_id (str): ID of loader. latest_update_time (float): Timestamp. """<if_stmt>latest_update_time<is><none><block_start>latest_update_time=time.time()<block_end>self._loader_pool[loader_id].latest_update_time=latest_update_time<block_end><def_stmt>get_train_job_by_plugin self train_id plugin_name<block_start>""" Get a train job by train job id. If the given train job does not has the given plugin data, the tag list will be empty. Args: train_id (str): Get train job info by the given id. plugin_name (str): Get tags by given plugin. Returns: TypedDict('TrainJobEntity', {'id': str, 'name': str, 'tags': List[str]}), a train job object. """<line_sep>self._check_train_job_exist(train_id self._loader_pool)<line_sep>loader=self._get_loader(train_id)<if_stmt>loader<is><none><block_start>logger.warning("No valid summary log in train job %s, "<concat>"or it is not in the cache." train_id)<line_sep><return><none><block_end>name=loader.name<line_sep>data_loader=loader.data_loader<line_sep>tags=[]<try_stmt><block_start>events_data=data_loader.get_events_data()<line_sep>tags=events_data.list_tags_by_plugin(plugin_name)<block_end><except_stmt>KeyError<block_start>logger.debug("Plugin name %r does not exist "<concat>"in train job %r, and set tags to empty list." plugin_name name)<block_end><except_stmt>AttributeError<block_start>logger.debug("Train job %r has been deleted or it has not loaded data, "<concat>"and set tags to empty list." name)<block_end>result=dict(id=train_id name=name tags=tags)<line_sep><return>result<block_end><block_end><class_stmt>DataManager<block_start>""" DataManager manages a pool of loader which help access events data. Each loader helps deal the data of the events. A loader corresponds to an events_data. The DataManager build a pool including all the data_loader. The data_loader provides extracting method to get the information of events. """<def_stmt>__init__ self summary_base_dir<block_start>""" Initialize the pool of loader and the dict of name-to-path. Args: summary_base_dir (str): Base summary directory. self._status: Refer `datavisual.common.enums.DataManagerStatus`. """<line_sep>self._summary_base_dir=os.path.realpath(summary_base_dir)<line_sep>self._status=DataManagerStatus.INIT.value<line_sep>self._status_mutex=threading.Lock()<line_sep>self._detail_cache=_DetailCacheManager(self._summary_base_dir)<line_sep>self._brief_cache=_BriefCacheManager(self._summary_base_dir)<line_sep># This lock is used to make sure that only one self._load_data_in_thread() is running. # Because self._load_data_in_thread() will create process pool when loading files, we can not # afford to run multiple self._load_data_in_thread() simultaneously (will create too many processes). self._load_data_lock=threading.Lock()<block_end>@property<def_stmt>summary_base_dir self<block_start>"""Get summary base dir."""<line_sep><return>self._summary_base_dir<block_end><def_stmt>start_load_data self reload_interval=0<block_start>""" Start threads for loading data. Args: reload_interval (int): Time to reload data again. Returns: Thread, the background Thread instance. """<line_sep>logger.info("Start to load data")<line_sep>DataManager.check_reload_interval(reload_interval)<line_sep>thread=threading.Thread(target=self._load_data_in_thread name='start_load_data_thread' args=(reload_interval ) daemon=<true>)<line_sep>thread.start()<line_sep><return>thread<block_end>@staticmethod<def_stmt>check_reload_interval reload_interval<block_start>""" Check reload interval is valid. Args: reload_interval (int): Reload interval >= 0. """<if_stmt><not>isinstance(reload_interval int)<block_start><raise>ParamValueError("The value of reload interval should be integer.")<block_end><if_stmt>reload_interval<l>0<block_start><raise>ParamValueError("The value of reload interval should be >= 0.")<block_end><block_end><def_stmt>_load_data_in_thread self reload_interval<block_start>"""Wrapper for load data in thread."""<if_stmt>self._load_data_lock.locked()<block_start><return><block_end><with_stmt>self._load_data_lock<block_start><while_stmt><true><block_start><try_stmt><block_start>exception_wrapper(self._load_data)()<block_end><except_stmt>UnknownError<as>exc# Not raising the exception here to ensure that data reloading does not crash. <block_start>logger.warning(exc.message)<block_end><finally_stmt><block_start>self._status=DataManagerStatus.DONE.value<block_end><if_stmt><not>reload_interval<block_start><break><block_end>time.sleep(reload_interval)<block_end><block_end><block_end><def_stmt>_load_data self<block_start>"""This function will load data once and ignore it if the status is loading."""<with_stmt>self._status_mutex<block_start><if_stmt>self.status<eq>DataManagerStatus.LOADING.value<block_start>logger.debug("Current status is %s , will ignore to load data." self.status)<line_sep><return><block_end>self.status=DataManagerStatus.LOADING.value<block_end><with_stmt>ComputingResourceManager.get_instance().get_executor(max_processes_cnt=settings.MAX_PROCESSES_COUNT)<as>executor<block_start>self._brief_cache.update_cache(executor)<line_sep>brief_cache_update=time.time()<for_stmt>_ self._detail_cache.update_cache(executor)<block_start>update_interval=time.time()-brief_cache_update<line_sep>logger.debug('Loading one round of detail cache taking %ss.' update_interval)<if_stmt>update_interval<g>3# Use 3 seconds as threshold to avoid updating too often <block_start>self._brief_cache.update_cache(executor)<line_sep>brief_cache_update<augadd>update_interval<block_end><block_end><with_stmt>self._status_mutex<block_start><if_stmt><not>self._brief_cache.has_content()<and><not>self._detail_cache.has_content()<block_start>self.status=DataManagerStatus.INVALID.value<block_end><else_stmt><block_start>self.status=DataManagerStatus.DONE.value<block_end>logger.info("Load brief data end, and loader pool size is %r." self._detail_cache.loader_pool_size())<block_end><block_end><block_end><def_stmt>get_train_job_by_plugin self train_id plugin_name<block_start>""" Get a train job by train job id. If the given train job does not has the given plugin data, the tag list will be empty. Args: train_id (str): Get train job info by the given id. plugin_name (str): Get tags by given plugin. Returns: TypedDict('TrainJobEntity', {'id': str, 'name': str, 'tags': List[str]}), a train job object. """<line_sep>self._check_status_valid()<line_sep><return>self._detail_cache.get_train_job_by_plugin(train_id plugin_name)<block_end><def_stmt>delete_train_job self train_id only_delete_from_cache=<true><block_start>""" Delete train job with a train id. Args: train_id (str): ID for train job. """<if_stmt><not>only_delete_from_cache<block_start><raise>NotImplementedError("Delete from both cache and disk is not supported.")<block_end>self._brief_cache.delete_train_job(train_id)<line_sep>self._detail_cache.delete_train_job(train_id)<block_end><def_stmt>list_tensors self train_id tag<block_start>""" List tensors of the given train job and tag. If the tensor can not find by the given tag, will raise exception. Args: train_id (str): ID for train job. tag (str): The tag name. Returns: NamedTuple, the tuple format is `collections.namedtuple('_Tensor', ['wall_time', 'event_step', 'value'])`. the value will contain the given tag data. """<line_sep>self._check_status_valid()<line_sep><return>self._detail_cache.list_tensors(train_id tag)<block_end><def_stmt>_check_status_valid self<block_start>"""Check if the status is valid to load data."""<if_stmt>self.status<eq>DataManagerStatus.INIT.value<block_start><raise>exceptions.SummaryLogIsLoading("Data is being loaded, current status: %s."%self._status)<block_end><block_end><def_stmt>get_train_job self train_id<block_start>""" Get train job by train ID. Args: train_id (str): Train ID for train job. Returns: dict, single train job, if can not find any data, will return None. """<line_sep>self._check_status_valid()<line_sep>detail_train_job=self._detail_cache.get_train_job(train_id)<line_sep>brief_train_job=self._brief_cache.get_train_job(train_id)<line_sep><return>TrainJob(brief_train_job detail_train_job)<block_end>@property<def_stmt>status self<block_start>""" Get the status of data manager. Returns: DataManagerStatus, the status of data manager. """<line_sep><return>self._status<block_end>@status.setter<def_stmt>status self status<block_start>"""Set data manager status."""<line_sep>self._status=status<block_end><def_stmt>cache_train_job self train_id<block_start>"""Cache given train job (async)."""<line_sep>brief_need_reload=self._brief_cache.cache_train_job(train_id)<line_sep>detail_need_reload=self._detail_cache.cache_train_job(train_id)<if_stmt>brief_need_reload<or>detail_need_reload<block_start>self.start_load_data()<block_end><block_end><def_stmt>register_brief_cache_item_updater self updater:BaseCacheItemUpdater<block_start>"""Register brief cache item updater for brief cache manager."""<line_sep>self._brief_cache.register_cache_item_updater(updater)<block_end><def_stmt>register_folder_analyzer self analyzer<block_start>"""Register folder analyzer."""<line_sep>self._brief_cache.register_folder_analyzer(analyzer)<line_sep>self._detail_cache.register_folder_analyzer(analyzer)<block_end><def_stmt>get_brief_cache self<block_start>"""Get brief cache."""<line_sep><return>self._brief_cache<block_end><def_stmt>get_brief_train_job self train_id<block_start>"""Get brief train job."""<line_sep><return>self._brief_cache.get_train_job(train_id)<block_end><block_end>DATA_MANAGER=DataManager(settings.SUMMARY_BASE_DIR)<line_sep>
""" This module provides an ANA file Reader. This is a modified version of `pyana <https://github.com/tvwerkhoven/pyana>`__. .. warning:: The reading and writing of ana files is not supported under Windows. """<import_stmt>os<import_stmt>collections<import_from_stmt>sunpy.io.header FileHeader<try_stmt><block_start><import_from_stmt>sunpy.io _pyana<block_end><except_stmt>ImportError<block_start>_pyana=<none><block_end>__all__=['read' 'get_header' 'write']<line_sep>HDPair=collections.namedtuple('HDPair' ['data' 'header'])<def_stmt>read filename debug=<false> **kwargs<block_start>""" Loads an ANA file and returns the data and a header in a list of (data, header) tuples. Parameters ---------- filename : `str` Name of file to be read. debug : `bool`, optional Prints verbose debug information. Returns ------- out : `list` A list of (data, header) tuples Examples -------- >>> data = sunpy.io.ana.read(filename) # doctest: +SKIP """<if_stmt><not>os.path.isfile(filename)<block_start><raise>OSError("File does not exist!")<block_end><if_stmt>_pyana<is><none><block_start><raise>ImportError("C extension for ANA is missing, please rebuild.")<block_end>data=_pyana.fzread(filename debug)<line_sep><return>[HDPair(data['data'] FileHeader(data['header']))]<block_end><def_stmt>get_header filename debug=<false><block_start>""" Loads an ANA file and only return the header consisting of the dimensions, size (defined as the product of all dimensions times the size of the datatype, this not relying on actual filesize) and comments. Parameters ---------- filename : `str` Name of file to be read. debug : `bool`, optional Prints verbose debug information. Returns ------- out : `list` A list of `~sunpy.io.header.FileHeader` headers. Examples -------- >>> header = sunpy.io.ana.get_header(filename) # doctest: +SKIP """<if_stmt>_pyana<is><none><block_start><raise>ImportError("C extension for ANA is missing, please rebuild")<block_end>data=_pyana.fzread(filename debug)<line_sep><return>[FileHeader(data['header'])]<block_end><def_stmt>write filename data comments=<false> compress=<true> debug=<false><block_start>""" Saves a 2D `numpy.array` as an ANA file and returns the bytes written or ``NULL``. Parameters ---------- filename : `str` Name of file to be created. data : `numpy.ndarray` The data to be stored. comments : `~sunpy.io.header.FileHeader`, optional The comments to be stored as a header. compress : `bool`, optional Compress the data with `True` (the default). debug : `bool`, optional Prints verbose debug information, defaults to `False`. Returns ------- out: ANA compressed archive A new ANA compressed archive containing the data and header. Examples -------- >>> written = sunpy.io.ana.write(filename, data, comments=False, compress=True) # doctest: +SKIP """<if_stmt>_pyana<is><none><block_start><raise>ImportError("C extension for ANA is missing, please rebuild")<block_end><if_stmt>comments<block_start><return>_pyana.fzwrite(filename data int(compress) comments debug)<block_end><else_stmt><block_start><return>_pyana.fzwrite(filename data int(compress) '' debug)<block_end><block_end>
# automatically generated by the FlatBuffers compiler, do not modify # namespace: FBOutput <import_stmt>tdw.flatbuffers<class_stmt>EnvironmentCollision(object)<block_start>__slots__=['_tab']<line_sep>@classmethod<def_stmt>GetRootAsEnvironmentCollision cls buf offset<block_start>n=tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset buf offset)<line_sep>x=EnvironmentCollision()<line_sep>x.Init(buf n+offset)<line_sep><return>x<block_end># EnvironmentCollision <def_stmt>Init self buf pos<block_start>self._tab=tdw.flatbuffers.table.Table(buf pos)<block_end># EnvironmentCollision <def_stmt>ObjectId self<block_start>o=tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))<if_stmt>o<ne>0<block_start><return>self._tab.Get(tdw.flatbuffers.number_types.Int32Flags o+self._tab.Pos)<block_end><return>0<block_end># EnvironmentCollision <def_stmt>State self<block_start>o=tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))<if_stmt>o<ne>0<block_start><return>self._tab.Get(tdw.flatbuffers.number_types.Uint8Flags o+self._tab.Pos)<block_end><return>1<block_end># EnvironmentCollision <def_stmt>Contacts self j<block_start>o=tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))<if_stmt>o<ne>0<block_start>x=self._tab.Vector(o)<line_sep>x<augadd>tdw.flatbuffers.number_types.UOffsetTFlags.py_type(j)<times>24<import_from_stmt>.ContactPoint ContactPoint<line_sep>obj=ContactPoint()<line_sep>obj.Init(self._tab.Bytes x)<line_sep><return>obj<block_end><return><none><block_end># EnvironmentCollision <def_stmt>ContactsLength self<block_start>o=tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))<if_stmt>o<ne>0<block_start><return>self._tab.VectorLen(o)<block_end><return>0<block_end># EnvironmentCollision <def_stmt>Floor self<block_start>o=tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))<if_stmt>o<ne>0<block_start><return>bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags o+self._tab.Pos))<block_end><return><false><block_end><block_end><def_stmt>EnvironmentCollisionStart builder<block_start>builder.StartObject(4)<block_end><def_stmt>EnvironmentCollisionAddObjectId builder objectId<block_start>builder.PrependInt32Slot(0 objectId 0)<block_end><def_stmt>EnvironmentCollisionAddState builder state<block_start>builder.PrependUint8Slot(1 state 1)<block_end><def_stmt>EnvironmentCollisionAddContacts builder contacts<block_start>builder.PrependUOffsetTRelativeSlot(2 tdw.flatbuffers.number_types.UOffsetTFlags.py_type(contacts) 0)<block_end><def_stmt>EnvironmentCollisionStartContactsVector builder numElems<block_start><return>builder.StartVector(24 numElems 4)<block_end><def_stmt>EnvironmentCollisionAddFloor builder floor<block_start>builder.PrependBoolSlot(3 floor 0)<block_end><def_stmt>EnvironmentCollisionEnd builder<block_start><return>builder.EndObject()<block_end>
# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ExplainLoader."""<import_stmt>math<import_stmt>os<import_stmt>re<import_stmt>threading<import_from_stmt>collections defaultdict<import_from_stmt>datetime datetime<import_from_stmt>enum Enum<import_from_stmt>typing Dict Iterable List Optional Union<import_from_stmt>mindinsight.datavisual.common.exceptions TrainJobNotExistError<import_from_stmt>mindinsight.datavisual.data_access.file_handler FileHandler<import_from_stmt>mindinsight.explainer.common.enums ExplainFieldsEnum<import_from_stmt>mindinsight.explainer.common.log logger<import_from_stmt>mindinsight.explainer.manager.explain_parser ExplainParser<import_from_stmt>mindinsight.utils.exceptions ParamValueError UnknownError<line_sep>_NAN_CONSTANT='NaN'<line_sep>_NUM_DIGITS=6<line_sep>_EXPLAIN_FIELD_NAMES=[ExplainFieldsEnum.SAMPLE_ID ExplainFieldsEnum.BENCHMARK ExplainFieldsEnum.METADATA ]<line_sep>_SAMPLE_FIELD_NAMES=[ExplainFieldsEnum.GROUND_TRUTH_LABEL ExplainFieldsEnum.INFERENCE ExplainFieldsEnum.EXPLANATION ExplainFieldsEnum.HIERARCHICAL_OCCLUSION]<class_stmt>_LoaderStatus(Enum)<block_start>STOP='STOP'<line_sep>LOADING='LOADING'<line_sep>PENDING='PENDING'<line_sep>LOADED='LOADED'<block_end><def_stmt>_round score<block_start>"""Take round of a number to given precision."""<try_stmt><block_start><return>round(score _NUM_DIGITS)<block_end><except_stmt>TypeError<block_start><return>score<block_end><block_end><class_stmt>ExplainLoader<block_start>"""ExplainLoader which manage the record in the summary file."""<def_stmt>__init__ self loader_id:str summary_dir:str<block_start>self._parser=ExplainParser(summary_dir)<line_sep>self._loader_info={'loader_id':loader_id 'summary_dir':summary_dir 'create_time':os.stat(summary_dir).st_ctime 'update_time':os.stat(summary_dir).st_mtime 'query_time':os.stat(summary_dir).st_ctime 'uncertainty_enabled':<false> }<line_sep>self._samples=defaultdict(dict)<line_sep>self._metadata={'explainers':[] 'metrics':[] 'labels':[] 'min_confidence':0.5}<line_sep>self._benchmark={'explainer_score':defaultdict(dict) 'label_score':defaultdict(dict)}<line_sep>self._status=_LoaderStatus.PENDING.value<line_sep>self._status_mutex=threading.Lock()<block_end>@property<def_stmt>all_classes self<arrow>List[Dict]<block_start>""" Return a list of detailed label information, including label id, label name and sample count of each label. Returns: list[dict], a list of dict, each dict contains: - id (int): Label id. - label (str): Label name. - sample_count (int): Number of samples for each label. """<line_sep>sample_count_per_label=defaultdict(int)<line_sep>saliency_count_per_label=defaultdict(int)<line_sep>hoc_count_per_label=defaultdict(int)<for_stmt>sample self._samples.values()<block_start><if_stmt>sample.get('image')<and>(sample.get('ground_truth_label')<or>sample.get('predicted_label'))<block_start><for_stmt>label set(sample['ground_truth_label']+sample['predicted_label'])<block_start>sample_count_per_label[label]<augadd>1<if_stmt>sample['inferences'][label]['saliency_maps']<block_start>saliency_count_per_label[label]<augadd>1<block_end><if_stmt>sample['inferences'][label]['hoc_layers']<block_start>hoc_count_per_label[label]<augadd>1<block_end><block_end><block_end><block_end>all_classes_return=[{'id':label_id 'label':label_name 'sample_count':sample_count_per_label[label_id] 'saliency_sample_count':saliency_count_per_label[label_id] 'hoc_sample_count':hoc_count_per_label[label_id]}<for>label_id,label_name enumerate(self._metadata['labels'])]<line_sep><return>all_classes_return<block_end>@property<def_stmt>query_time self<arrow>float<block_start>"""Return query timestamp of explain loader."""<line_sep><return>self._loader_info['query_time']<block_end>@query_time.setter<def_stmt>query_time self new_time:Union[datetime float]<block_start>""" Update the query_time timestamp manually. Args: new_time (datetime.datetime or float): Updated query_time for the explain loader. """<if_stmt>isinstance(new_time datetime)<block_start>self._loader_info['query_time']=new_time.timestamp()<block_end><elif_stmt>isinstance(new_time float)<block_start>self._loader_info['query_time']=new_time<block_end><else_stmt><block_start><raise>TypeError('new_time should have type of datetime.datetime or float, but receive {}'.format(type(new_time)))<block_end><block_end>@property<def_stmt>create_time self<arrow>float<block_start>"""Return the create timestamp of summary file."""<line_sep><return>self._loader_info['create_time']<block_end>@create_time.setter<def_stmt>create_time self new_time:Union[datetime float]<block_start>""" Update the create_time manually Args: new_time (datetime.datetime or float): Updated create_time of summary_file. """<if_stmt>isinstance(new_time datetime)<block_start>self._loader_info['create_time']=new_time.timestamp()<block_end><elif_stmt>isinstance(new_time float)<block_start>self._loader_info['create_time']=new_time<block_end><else_stmt><block_start><raise>TypeError('new_time should have type of datetime.datetime or float, but receive {}'.format(type(new_time)))<block_end><block_end>@property<def_stmt>explainers self<arrow>List[str]<block_start>"""Return a list of explainer names recorded in the summary file."""<line_sep><return>self._metadata['explainers']<block_end>@property<def_stmt>explainer_scores self<arrow>List[Dict]<block_start>""" Return evaluation results for every explainer. Returns: list[dict], A list of evaluation results of each explainer. Each item contains: - explainer (str): Name of evaluated explainer. - evaluations (list[dict]): A list of evaluation results by different metrics. - class_scores (list[dict]): A list of evaluation results on different labels. Each item in the evaluations contains: - metric (str): name of metric method - score (float): evaluation result Each item in the class_scores contains: - label (str): Name of label - evaluations (list[dict]): A list of evaluation results on different labels by different metrics. Each item in evaluations contains: - metric (str): Name of metric method - score (float): Evaluation scores of explainer on specific label by the metric. """<line_sep>explainer_scores=[]<for_stmt>explainer,explainer_score_on_metric self._benchmark['explainer_score'].copy().items()<block_start>metric_scores=[{'metric':metric 'score':_round(score)}<for>metric,score explainer_score_on_metric.items()]<line_sep>label_scores=[]<for_stmt>label,label_score_on_metric self._benchmark['label_score'][explainer].copy().items()<block_start>score_of_single_label={'label':self._metadata['labels'][label] 'evaluations':[{'metric':metric 'score':_round(score)}<for>metric,score label_score_on_metric.items()] }<line_sep>label_scores.append(score_of_single_label)<block_end>explainer_scores.append({'explainer':explainer 'evaluations':metric_scores 'class_scores':label_scores })<block_end><return>explainer_scores<block_end>@property<def_stmt>labels self<arrow>List[str]<block_start>"""Return the label recorded in the summary."""<line_sep><return>self._metadata['labels']<block_end>@property<def_stmt>metrics self<arrow>List[str]<block_start>"""Return a list of metric names recorded in the summary file."""<line_sep><return>self._metadata['metrics']<block_end>@property<def_stmt>min_confidence self<arrow>Optional[float]<block_start>"""Return minimum confidence used to filter the predicted labels."""<line_sep><return>self._metadata['min_confidence']<block_end>@property<def_stmt>sample_count self<arrow>int<block_start>""" Return total number of samples in the loader. Since the loader only return available samples (i.e. with original image data and ground_truth_label loaded in cache), the returned count only takes the available samples into account. Return: int, total number of available samples in the loading job. """<line_sep>sample_count=0<for_stmt>sample self._samples.values()<block_start><if_stmt>sample.get('image' <false>)<block_start>sample_count<augadd>1<block_end><block_end><return>sample_count<block_end>@property<def_stmt>samples self<arrow>List[Dict]<block_start>"""Return the information of all samples in the job."""<line_sep><return>self._samples<block_end>@property<def_stmt>train_id self<arrow>str<block_start>"""Return ID of explain loader."""<line_sep><return>self._loader_info['loader_id']<block_end>@property<def_stmt>uncertainty_enabled self<block_start>"""Whether uncertainty is enabled."""<line_sep><return>self._loader_info['uncertainty_enabled']<block_end>@property<def_stmt>update_time self<arrow>float<block_start>"""Return latest modification timestamp of summary file."""<line_sep><return>self._loader_info['update_time']<block_end>@update_time.setter<def_stmt>update_time self new_time:Union[datetime float]<block_start>""" Update the update_time manually. Args: new_time (datetime.datetime or float): Updated time for the summary file. """<if_stmt>isinstance(new_time datetime)<block_start>self._loader_info['update_time']=new_time.timestamp()<block_end><elif_stmt>isinstance(new_time float)<block_start>self._loader_info['update_time']=new_time<block_end><else_stmt><block_start><raise>TypeError('new_time should have type of datetime.datetime or float, but receive {}'.format(type(new_time)))<block_end><block_end><def_stmt>load self<block_start>"""Start loading data from the latest summary file to the loader."""<if_stmt>self.status<ne>_LoaderStatus.LOADED.value<block_start>self.status=_LoaderStatus.LOADING.value<block_end>filenames=[]<for_stmt>filename FileHandler.list_dir(self._loader_info['summary_dir'])<block_start><if_stmt>FileHandler.is_file(FileHandler.join(self._loader_info['summary_dir'] filename))<block_start>filenames.append(filename)<block_end><block_end>filenames=ExplainLoader._filter_files(filenames)<if_stmt><not>filenames<block_start><raise>TrainJobNotExistError('No summary file found in %s, explain job will be delete.'%self._loader_info['summary_dir'])<block_end>is_end=<false><while_stmt><not>is_end<and>self.status<ne>_LoaderStatus.STOP.value<block_start><try_stmt><block_start>file_changed,is_end,event_dict=self._parser.list_events(filenames)<block_end><except_stmt>UnknownError<block_start>is_end=<true><line_sep><break><block_end><if_stmt>file_changed<block_start>logger.info('Summary file in %s update, reload the data in the summary.' self._loader_info['summary_dir'])<line_sep>self._clear_job()<if_stmt>self.status<ne>_LoaderStatus.STOP.value<block_start>self.status=_LoaderStatus.LOADING.value<block_end><block_end><if_stmt>event_dict<block_start>self._import_data_from_event(event_dict)<block_end><block_end>self._reform_sample_info()<if_stmt>is_end<block_start>self.status=_LoaderStatus.LOADED.value<block_end><block_end>@property<def_stmt>status self<block_start>"""Get the status of this class with lock."""<with_stmt>self._status_mutex<block_start><return>self._status<block_end><block_end>@status.setter<def_stmt>status self status<block_start>"""Set the status of this class with lock."""<with_stmt>self._status_mutex<block_start>self._status=status<block_end><block_end><def_stmt>stop self<block_start>"""Stop load data."""<line_sep>self.status=_LoaderStatus.STOP.value<block_end><def_stmt>get_all_samples self<arrow>List[Dict]<block_start>""" Return a list of sample information cached in the explain job. Returns: sample_list (list[SampleObj]): a list of sample objects, each object consists of: - id (int): Sample id. - name (str): Basename of image. - inferences (list[dict]): List of inferences for all labels. """<line_sep>returned_samples=[{'id':sample_id 'name':info['name'] 'image':info['image'] 'inferences':list(info['inferences'].values())}<for>sample_id,info self._samples.items()<if>info.get('image' <false>)]<line_sep><return>returned_samples<block_end><def_stmt>_import_data_from_event self event_dict:Dict<block_start>"""Parse and import data from the event data."""<if_stmt>'metadata'<not><in>event_dict<and>self._is_metadata_empty()<block_start><raise>ParamValueError('metadata is incomplete, should write metadata first in the summary.')<block_end><for_stmt>tag,event event_dict.items()<block_start><if_stmt>tag<eq>ExplainFieldsEnum.METADATA.value<block_start>self._import_metadata_from_event(event.metadata)<block_end><elif_stmt>tag<eq>ExplainFieldsEnum.BENCHMARK.value<block_start>self._import_benchmark_from_event(event.benchmark)<block_end><elif_stmt>tag<eq>ExplainFieldsEnum.SAMPLE_ID.value<block_start>self._import_sample_from_event(event)<block_end><else_stmt><block_start>logger.info('Unknown ExplainField: %s.' tag)<block_end><block_end><block_end><def_stmt>_is_metadata_empty self<block_start>"""Check whether metadata is completely loaded first."""<if_stmt><not>self._metadata['labels']<block_start><return><true><block_end><return><false><block_end><def_stmt>_import_metadata_from_event self metadata_event<block_start>"""Import the metadata from event into loader."""<def_stmt>take_union existed_list imported_data<block_start>"""Take union of existed_list and imported_data."""<if_stmt>isinstance(imported_data Iterable)<block_start><for_stmt>sample imported_data<block_start><if_stmt>sample<not><in>existed_list<block_start>existed_list.append(sample)<block_end><block_end><block_end><block_end>take_union(self._metadata['explainers'] metadata_event.explain_method)<line_sep>take_union(self._metadata['metrics'] metadata_event.benchmark_method)<line_sep>take_union(self._metadata['labels'] metadata_event.label)<block_end><def_stmt>_import_benchmark_from_event self benchmarks<block_start>""" Parse the benchmark event. Benchmark data are separated into 'explainer_score' and 'label_score'. 'explainer_score' contains overall evaluation results of each explainer by different metrics, while 'label_score' additionally divides the results w.r.t different labels. The structure of self._benchmark['explainer_score'] demonstrates below: { explainer_1: {metric_name_1: score_1, ...}, explainer_2: {metric_name_1: score_1, ...}, ... } The structure of self._benchmark['label_score'] is: { explainer_1: {label_id: {metric_1: score_1, metric_2: score_2, ...}, ...}, explainer_2: {label_id: {metric_1: score_1, metric_2: score_2, ...}, ...}, ... } Args: benchmarks (BenchmarkContainer): Parsed benchmarks data from summary file. """<line_sep>explainer_score=self._benchmark['explainer_score']<line_sep>label_score=self._benchmark['label_score']<for_stmt>benchmark benchmarks<block_start>explainer=benchmark.explain_method<line_sep>metric=benchmark.benchmark_method<line_sep>metric_score=benchmark.total_score<line_sep>label_score_event=benchmark.label_score<line_sep>explainer_score[explainer][metric]=_NAN_CONSTANT<if>math.isnan(metric_score)<else>metric_score<line_sep>new_label_score_dict=ExplainLoader._score_event_to_dict(label_score_event metric)<for_stmt>label,scores_of_metric new_label_score_dict.items()<block_start><if_stmt>label<not><in>label_score[explainer]<block_start>label_score[explainer][label]={}<block_end>label_score[explainer][label].update(scores_of_metric)<block_end><block_end><block_end><def_stmt>_import_sample_from_event self sample<block_start>""" Parse the sample event. Detailed data of each sample are store in self._samples, identified by sample_id. Each sample data are stored in the following structure: - ground_truth_labels (list[int]): A list of ground truth labels of the sample. - ground_truth_probs (list[float]): A list of confidences of ground-truth label from black-box model. - predicted_labels (list[int]): A list of predicted labels from the black-box model. - predicted_probs (list[int]): A list of confidences w.r.t the predicted labels. - explanations (dict): Explanations is a dictionary where the each explainer name mapping to a dictionary of saliency maps. The structure of explanations demonstrates below: { explainer_name_1: {label_1: saliency_id_1, label_2: saliency_id_2, ...}, explainer_name_2: {label_1: saliency_id_1, label_2: saliency_id_2, ...}, ... } - hierarchical_occlusion (dict): A dictionary where each label is matched to a dictionary: {label_1: [{prob: layer1_prob, bbox: []}, {prob: layer2_prob, bbox: []}], label_2: } """<if_stmt>getattr(sample 'sample_id' <none>)<is><none><block_start><raise>ParamValueError('sample_event has no sample_id')<block_end>sample_id=sample.sample_id<if_stmt>sample_id<not><in>self._samples<block_start>self._samples[sample_id]={'id':sample_id 'name':str(sample_id) 'image':sample.image_path 'ground_truth_label':[] 'predicted_label':[] 'inferences':defaultdict(dict) 'explanation':defaultdict(dict) 'hierarchical_occlusion':defaultdict(dict)}<block_end><if_stmt>sample.image_path<block_start>self._samples[sample_id]['image']=sample.image_path<block_end><for_stmt>tag _SAMPLE_FIELD_NAMES<block_start><if_stmt>tag<eq>ExplainFieldsEnum.GROUND_TRUTH_LABEL<block_start><if_stmt><not>self._samples[sample_id]['ground_truth_label']<block_start>self._samples[sample_id]['ground_truth_label'].extend(list(sample.ground_truth_label))<block_end><block_end><elif_stmt>tag<eq>ExplainFieldsEnum.INFERENCE<block_start>self._import_inference_from_event(sample sample_id)<block_end><elif_stmt>tag<eq>ExplainFieldsEnum.EXPLANATION<block_start>self._import_explanation_from_event(sample sample_id)<block_end><elif_stmt>tag<eq>ExplainFieldsEnum.HIERARCHICAL_OCCLUSION<block_start>self._import_hoc_from_event(sample sample_id)<block_end><block_end><block_end><def_stmt>_reform_sample_info self<block_start>"""Reform the sample info."""<for_stmt>_,sample_info self._samples.items()<block_start>inferences=sample_info['inferences']<line_sep>res_dict=defaultdict(list)<for_stmt>explainer,label_heatmap_path_dict sample_info['explanation'].items()<block_start><for_stmt>label,heatmap_path label_heatmap_path_dict.items()<block_start>res_dict[label].append({'explainer':explainer 'overlay':heatmap_path})<block_end><block_end><for_stmt>label,item inferences.items()<block_start>item['saliency_maps']=res_dict[label]<block_end><for_stmt>label,item sample_info['hierarchical_occlusion'].items()<block_start>inferences[label]['hoc_layers']=item['hoc_layers']<block_end><block_end><block_end><def_stmt>_import_inference_from_event self event sample_id<block_start>"""Parse the inference event."""<line_sep>inference=event.inference<if_stmt>inference.ground_truth_prob_sd<or>inference.predicted_prob_sd<block_start>self._loader_info['uncertainty_enabled']=<true><block_end><if_stmt><not>self._samples[sample_id]['predicted_label']<block_start>self._samples[sample_id]['predicted_label'].extend(list(inference.predicted_label))<block_end><if_stmt><not>self._samples[sample_id]['inferences']<block_start>inferences={}<for_stmt>label,prob zip(list(event.ground_truth_label)+list(inference.predicted_label) list(inference.ground_truth_prob)+list(inference.predicted_prob))<block_start>inferences[label]={'label':self._metadata['labels'][label] 'confidence':_round(prob) 'saliency_maps':[] 'hoc_layers':{} }<if_stmt><not>event.ground_truth_label<block_start>inferences[label]['prediction_type']=<none><block_end><else_stmt><block_start><if_stmt>prob<l>self.min_confidence<block_start>inferences[label]['prediction_type']='FN'<block_end><elif_stmt>label<in>event.ground_truth_label<block_start>inferences[label]['prediction_type']='TP'<block_end><else_stmt><block_start>inferences[label]['prediction_type']='FP'<block_end><block_end><block_end><if_stmt>self._loader_info['uncertainty_enabled']<block_start><for_stmt>label,std,low,high zip(list(event.ground_truth_label)+list(inference.predicted_label) list(inference.ground_truth_prob_sd)+list(inference.predicted_prob_sd) list(inference.ground_truth_prob_itl95_low)+list(inference.predicted_prob_itl95_low) list(inference.ground_truth_prob_itl95_hi)+list(inference.predicted_prob_itl95_hi))<block_start>inferences[label]['confidence_sd']=_round(std)<line_sep>inferences[label]['confidence_itl95']=[_round(low) _round(high)]<block_end><block_end>self._samples[sample_id]['inferences']=inferences<block_end><block_end><def_stmt>_import_explanation_from_event self event sample_id<block_start>"""Parse the explanation event."""<if_stmt>self._samples[sample_id]['explanation']<is><none><block_start>self._samples[sample_id]['explanation']=defaultdict(dict)<block_end>sample_explanation=self._samples[sample_id]['explanation']<for_stmt>explanation_item event.explanation<block_start>explainer=explanation_item.explain_method<line_sep>label=explanation_item.label<line_sep>sample_explanation[explainer][label]=explanation_item.heatmap_path<block_end><block_end><def_stmt>_import_hoc_from_event self event sample_id<block_start>"""Parse the mango event."""<line_sep>sample_hoc=self._samples[sample_id]['hierarchical_occlusion']<if_stmt>event.hierarchical_occlusion<block_start><for_stmt>hoc_item event.hierarchical_occlusion<block_start>label=hoc_item.label<line_sep>sample_hoc[label]={}<line_sep>sample_hoc[label]['label']=label<line_sep>sample_hoc[label]['mask']=hoc_item.mask<line_sep>sample_hoc[label]['confidence']=self._samples[sample_id]['inferences'][label]['confidence']<line_sep>sample_hoc[label]['hoc_layers']=[]<for_stmt>hoc_layer hoc_item.layer<block_start>sample_hoc_dict={'confidence':hoc_layer.prob}<line_sep>box_lst=list(hoc_layer.box)<line_sep>box=[box_lst[i:i+4]<for>i range(0 len(hoc_layer.box) 4)]<line_sep>sample_hoc_dict['boxes']=box<line_sep>sample_hoc[label]['hoc_layers'].append(sample_hoc_dict)<block_end><block_end><block_end><block_end><def_stmt>_clear_job self<block_start>"""Clear the cached data and update the time info of the loader."""<line_sep>self._samples.clear()<line_sep>self._loader_info['create_time']=os.stat(self._loader_info['summary_dir']).st_ctime<line_sep>self._loader_info['update_time']=os.stat(self._loader_info['summary_dir']).st_mtime<line_sep>self._loader_info['query_time']=max(self._loader_info['update_time'] self._loader_info['query_time'])<def_stmt>clear_inner_dict outer_dict<block_start>"""Clear the inner structured data of the given dict."""<for_stmt>item outer_dict.values()<block_start>item.clear()<block_end><block_end>map(clear_inner_dict [self._metadata self._benchmark])<block_end>@staticmethod<def_stmt>_filter_files filenames<block_start>""" Gets a list of summary files. Args: filenames (list[str]): File name list, like [filename1, filename2]. Returns: list[str], filename list. """<line_sep><return>list(filter(<lambda>filename:(re.search(r'summary\.\d+' filename)<and>filename.endswith("_explain")) filenames))<block_end>@staticmethod<def_stmt>_is_inference_valid sample<block_start>""" Check whether the inference data is empty or have the same length. If probs have different length with the labels, it can be confusing when assigning each prob to label. '_is_inference_valid' returns True only when the data size of match to each other. Note that prob data could be empty, so empty prob will pass the check. """<line_sep>ground_truth_len=len(sample['ground_truth_label'])<for_stmt>name ['ground_truth_prob' 'ground_truth_prob_sd' 'ground_truth_prob_itl95_low' 'ground_truth_prob_itl95_hi']<block_start><if_stmt>sample[name]<and>len(sample[name])<ne>ground_truth_len<block_start>logger.info('Length of %s not match the ground_truth_label. Length of ground_truth_label: %d,'<concat>'length of %s: %d' name ground_truth_len name len(sample[name]))<line_sep><return><false><block_end><block_end>predicted_len=len(sample['predicted_label'])<for_stmt>name ['predicted_prob' 'predicted_prob_sd' 'predicted_prob_itl95_low' 'predicted_prob_itl95_hi']<block_start><if_stmt>sample[name]<and>len(sample[name])<ne>predicted_len<block_start>logger.info('Length of %s not match the predicted_labels. Length of predicted_label: %d,'<concat>'length of %s: %d' name predicted_len name len(sample[name]))<line_sep><return><false><block_end><block_end><return><true><block_end>@staticmethod<def_stmt>_score_event_to_dict label_score_event metric<arrow>Dict<block_start>"""Transfer metric scores per label to pre-defined structure."""<line_sep>new_label_score_dict=defaultdict(dict)<for_stmt>label_id,label_score enumerate(label_score_event)<block_start>new_label_score_dict[label_id][metric]=_NAN_CONSTANT<if>math.isnan(label_score)<else>label_score<block_end><return>new_label_score_dict<block_end><block_end>
<import_from_future_stmt> print_function<import_from_stmt>builtins zip<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_from_stmt>h2o H2OFrame<import_from_stmt>h2o.exceptions H2OTypeError H2OValueError<def_stmt>compare_frames expected actual<block_start><assert_stmt>actual.shape<eq>expected.shape<assert_stmt>actual.columns<eq>expected.columns "Columns differ: %r vs %r"%(actual.columns colnames)<for_stmt>i range(len(actual.columns))<block_start>colname=actual.columns[i]<line_sep>t1=expected.types[colname]<line_sep>t2=actual.types[colname]<assert_stmt>t1<eq>t2 ("Bad types %s: expected %s, got %s"%(colname t1 t2))<line_sep>col1=expected[colname]<line_sep>s1=str(h2o.as_list(col1))<line_sep>col2=actual[colname]<line_sep>s2=str(h2o.as_list(col2))<assert_stmt>s1<eq>s2 ("bad values: expected[%d] = %r, actual[%d] = %r"%(i s1 i s2))<block_end><block_end><def_stmt>test1 <block_start>badFrame=H2OFrame({"one":[4 6 1] "two":["a" "b" "cde"] "three":[0 5.2 14]})<line_sep>badClone=H2OFrame({"one":[4 6 1] "two":["a" "b" "cde"] "three":[0 5.2 14]})<line_sep>compare_frames(badFrame badClone)<try_stmt><block_start>badFrame.asfactor()<assert_stmt><false> "The frame contaied a real number, an error should be thrown"<block_end><except_stmt>H2OValueError# as designed <block_start><pass><block_end>compare_frames(badFrame badClone)<line_sep>originalAfterOp=H2OFrame.get_frame(badFrame.frame_id)<line_sep>compare_frames(badFrame originalAfterOp)<line_sep>goodFrame=H2OFrame({"one":[4 6 1] "two":["a" "b" "cde"]})<line_sep>goodClone=H2OFrame({"one":[4 6 1] "two":["a" "b" "cde"]})<line_sep>compare_frames(goodFrame goodClone)<line_sep>factoredFrame=goodFrame.asfactor()<line_sep>originalAfterOp=H2OFrame.get_frame(goodFrame.frame_id)<line_sep>compare_frames(goodFrame originalAfterOp)<line_sep>expectedFactoredFrame=H2OFrame({"one":[4 6 1] "two":["a" "b" "cde"]} column_types={"one":"categorical" "two":"enum"})<line_sep>compare_frames(expectedFactoredFrame factoredFrame)<line_sep>refactoredFrame=expectedFactoredFrame.asfactor()<line_sep>factoredAfterOp=H2OFrame.get_frame(refactoredFrame.frame_id)<line_sep>compare_frames(expectedFactoredFrame factoredAfterOp)<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(test1)<block_end><else_stmt><block_start>test1()<block_end>
# -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """The trainer program for Auto Lane."""<import_stmt>logging<import_stmt>os<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>pycocotools.coco COCO<import_from_stmt>vega.common ClassFactory ClassType<import_from_stmt>vega.trainer.trainer_ms TrainerMs<import_from_stmt>mindspore.train.callback ModelCheckpoint CheckpointConfig LossMonitor<import_stmt>mindspore.common.dtype<as>mstype<import_from_stmt>mindspore.train Model<as>MsModel<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.nn SGD<import_from_stmt>.src.model_utils.config config<import_from_stmt>.src.dataset data_to_mindrecord_byte_image create_fasterrcnn_dataset<import_from_stmt>.src.lr_schedule dynamic_lr<import_from_stmt>.src.network_define WithLossCell TrainOneStepCell LossNet<import_from_stmt>.src.util coco_eval bbox2result_1image results2json<import_from_stmt>vega.datasets.conf.dataset DatasetConfig<line_sep>logger=logging.getLogger(__name__)<def_stmt>valid <block_start>"""Construct the trainer of SpNas."""<line_sep>config=DatasetConfig().to_dict()<line_sep>config=config['_class_data'].val<line_sep>prefix="FasterRcnn_eval.mindrecord"<line_sep>mindrecord_dir=config.mindrecord_dir<line_sep>mindrecord_file=os.path.join(mindrecord_dir prefix)<if_stmt><not>os.path.exists(mindrecord_file)<block_start><if_stmt><not>os.path.isdir(mindrecord_dir)<block_start>os.makedirs(mindrecord_dir)<block_end><if_stmt>config.dataset<eq>"coco"<block_start><if_stmt>os.path.isdir(config.coco_root)<block_start>data_to_mindrecord_byte_image(config "coco" <false> prefix file_num=1)<block_end><else_stmt><block_start>logging.info("coco_root not exits.")<block_end><block_end><else_stmt><block_start><if_stmt>os.path.isdir(config.IMAGE_DIR)<and>os.path.exists(config.ANNO_PATH)<block_start>data_to_mindrecord_byte_image(config "other" <false> prefix file_num=1)<block_end><else_stmt><block_start>logging.info("IMAGE_DIR or ANNO_PATH not exits.")<block_end><block_end><block_end>dataset=create_fasterrcnn_dataset(config mindrecord_file batch_size=config.test_batch_size is_training=<false>)<line_sep><return>dataset<block_end><def_stmt>train <block_start>"""Train fasterrcnn dataset."""<line_sep>config=DatasetConfig().to_dict()<line_sep>config=config['_class_data'].train<line_sep>prefix="FasterRcnn.mindrecord"<line_sep>mindrecord_dir=config.mindrecord_dir<line_sep>mindrecord_file=os.path.join(mindrecord_dir prefix+"0")<line_sep>print("CHECKING MINDRECORD FILES ...")<line_sep>rank=int(os.getenv('RANK_ID' '0'))<line_sep>device_num=int(os.getenv('RANK_SIZE' '1'))<if_stmt>rank<eq>0<and><not>os.path.exists(mindrecord_file)<block_start><if_stmt><not>os.path.isdir(mindrecord_dir)<block_start>os.makedirs(mindrecord_dir)<block_end><if_stmt>config.dataset<eq>"coco"<block_start><if_stmt>os.path.isdir(config.coco_root)<block_start><if_stmt><not>os.path.exists(config.coco_root)<block_start>logging.info("Please make sure config:coco_root is valid.")<line_sep><raise>ValueError(config.coco_root)<block_end>data_to_mindrecord_byte_image(config "coco" <true> prefix)<block_end><else_stmt><block_start>logging.info("coco_root not exits.")<block_end><block_end><else_stmt><block_start><if_stmt>os.path.isdir(config.image_dir)<and>os.path.exists(config.anno_path)<block_start><if_stmt><not>os.path.exists(config.image_dir)<block_start>logging.info("Please make sure config:image_dir is valid.")<line_sep><raise>ValueError(config.image_dir)<block_end>data_to_mindrecord_byte_image(config "other" <true> prefix)<block_end><else_stmt><block_start>logging.info("image_dir or anno_path not exits.")<block_end><block_end><block_end><while_stmt><not>os.path.exists(mindrecord_file+".db")<block_start>time.sleep(5)<block_end>dataset=create_fasterrcnn_dataset(config mindrecord_file batch_size=config.batch_size device_num=device_num rank_id=rank num_parallel_workers=config.num_parallel_workers python_multiprocessing=config.python_multiprocessing)<line_sep><return>dataset<block_end>@ClassFactory.register(ClassType.TRAINER)<class_stmt>SpNasTrainerCallback(TrainerMs)<block_start>"""Construct the trainer of SpNas."""<line_sep>disable_callbacks=['ProgressLogger']<def_stmt>build self<block_start>"""Construct the trainer of SpNas."""<line_sep>logging.debug("Trainer Config: {}".format(self.config))<line_sep>self._init_hps()<line_sep>self.use_syncbn=self.config.syncbn<if_stmt><not>self.train_loader<block_start>self.train_loader=train()<block_end><if_stmt><not>self.valid_loader<block_start>self.valid_loader=valid()<block_end>self.batch_num_train=self.train_loader.get_dataset_size()<line_sep>self.batch_num_valid=self.valid_loader.get_dataset_size()<line_sep>self.valid_metrics=self._init_metrics()<block_end><def_stmt>_train_epoch self<block_start>"""Construct the trainer of SpNas."""<line_sep>dataset=self.train_loader<line_sep>dataset_size=dataset.get_dataset_size()<line_sep>self.model=self.model.set_train()<line_sep>self.model.to_float(mstype.float16)<line_sep>self.loss=LossNet()<line_sep>lr=Tensor(dynamic_lr(config dataset_size) mstype.float32)<line_sep>self.optimizer=SGD(params=self.model.trainable_params() learning_rate=lr momentum=config.momentum weight_decay=config.weight_decay loss_scale=config.loss_scale)<line_sep>net_with_loss=WithLossCell(self.model self.loss)<line_sep>net=TrainOneStepCell(net_with_loss self.optimizer sens=config.loss_scale)<line_sep>config_ck=CheckpointConfig(save_checkpoint_steps=self.config.save_steps keep_checkpoint_max=1)<line_sep>save_path=self.get_local_worker_path(self.step_name self.worker_id)<line_sep>ckpoint_cb=ModelCheckpoint(config=config_ck directory=save_path)<line_sep>loss_cb=LossMonitor(per_print_times=1)<line_sep>callback_list=[ckpoint_cb loss_cb]<line_sep>self.ms_model=MsModel(net)<try_stmt><block_start>self.ms_model.train(epoch=self.config.epochs train_dataset=dataset callbacks=callback_list dataset_sink_mode=<false>)<block_end><except_stmt>RuntimeError<as>e<block_start>logging.warning(f"failed to train the model, skip it, message: {str(e)}")<block_end><block_end><def_stmt>_valid_epoch self<block_start>"""Construct the trainer of SpNas."""<line_sep>dataset=self.valid_loader<line_sep>self.model.set_train(<false>)<line_sep>self.model.to_float(mstype.float16)<line_sep>outputs=[]<line_sep>dataset_coco=COCO(self.config.metric.params.anno_path)<line_sep>max_num=128<for_stmt>data dataset.create_dict_iterator(num_epochs=1)<block_start>img_data=data['image']<line_sep>img_metas=data['image_shape']<line_sep>gt_bboxes=data['box']<line_sep>gt_labels=data['label']<line_sep>gt_num=data['valid_num']<line_sep>output=self.model(img_data img_metas gt_bboxes gt_labels gt_num)<line_sep>all_bbox=output[0]<line_sep>all_label=output[1]<line_sep>all_mask=output[2]<for_stmt>j range(config.test_batch_size)<block_start>all_bbox_squee=np.squeeze(all_bbox.asnumpy()[j : :])<line_sep>all_label_squee=np.squeeze(all_label.asnumpy()[j : :])<line_sep>all_mask_squee=np.squeeze(all_mask.asnumpy()[j : :])<line_sep>all_bboxes_tmp_mask=all_bbox_squee[all_mask_squee :]<line_sep>all_labels_tmp_mask=all_label_squee[all_mask_squee]<if_stmt>all_bboxes_tmp_mask.shape[0]<g>max_num<block_start>inds=np.argsort(-all_bboxes_tmp_mask[: -1])<line_sep>inds=inds[:max_num]<line_sep>all_bboxes_tmp_mask=all_bboxes_tmp_mask[inds]<line_sep>all_labels_tmp_mask=all_labels_tmp_mask[inds]<block_end>outputs_tmp=bbox2result_1image(all_bboxes_tmp_mask all_labels_tmp_mask config.num_classes)<line_sep>outputs.append(outputs_tmp)<block_end><block_end>eval_types=["bbox"]<line_sep>result_files=results2json(dataset_coco outputs "./results.pkl")<line_sep>metrics=coco_eval(result_files eval_types dataset_coco single_result=<true>)<line_sep>self.valid_metrics.update(metrics)<line_sep>valid_logs=dict()<line_sep>valid_logs['cur_valid_perfs']=self.valid_metrics.results<line_sep>self.callbacks.after_valid(valid_logs)<block_end><block_end>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=import-error,print-statement,relative-import <import_stmt>copy<import_stmt>re<line_sep>SPECIAL_TOKENS=[# This list should be sorted by length. 'WebCodecs' 'WebSocket' 'String16' 'Float32' 'Float64' 'Base64' 'IFrame' 'Latin1' 'MathML' 'PlugIn' 'SQLite' 'Uint16' 'Uint32' 'WebGL2' 'webgl2' 'WebGPU' 'ASCII' 'CSSOM' 'CType' 'DList' 'Int16' 'Int32' 'MPath' 'OList' 'TSpan' 'UList' 'UTF16' 'Uint8' 'WebGL' 'XPath' 'ETC1' 'etc1' 'HTML' 'Int8' 'S3TC' 's3tc' 'SPv2' 'UTF8' 'sRGB' 'URLs' 'API' 'CSS' 'DNS' 'DOM' 'EXT' 'RTC' 'SVG' 'XSS' '2D' 'AX' 'FE' 'JS' 'V0' 'V8' 'v8' 'XR' ]<line_sep>_SPECIAL_TOKENS_WITH_NUMBERS=[token<for>token SPECIAL_TOKENS<if>re.search(r'[0-9]' token)]<line_sep># Applying _TOKEN_PATTERNS repeatedly should capture any sequence of a-z, A-Z, # 0-9. _TOKEN_PATTERNS=[# 'Foo' 'foo' '[A-Z]?[a-z]+' # The following pattern captures only 'FOO' in 'FOOElement'. '[A-Z]+(?![a-z])' # '2D' '3D', but not '2Dimension' '[0-9][Dd](?![a-z])' '[0-9]+' ]<line_sep>_TOKEN_RE=re.compile(r'('+'|'.join(SPECIAL_TOKENS+_TOKEN_PATTERNS)+r')')<def_stmt>tokenize_name name<block_start>"""Tokenize the specified name. A token consists of A-Z, a-z, and 0-9 characters. Other characters work as token delimiters, and the resultant list won't contain such characters. Capital letters also work as delimiters. E.g. 'FooBar-baz' is tokenized to ['Foo', 'Bar', 'baz']. See _TOKEN_PATTERNS for more details. This function detects special cases that are not easily discernible without additional knowledge, such as recognizing that in SVGSVGElement, the first two SVGs are separate tokens, but WebGL is one token. Returns: A list of token strings. """<line_sep># In case |name| is written in lowerCamelCase, we try to match special # tokens that contains numbers ignoring cases only at the first step. tokens=[]<line_sep>match=re.search(r'^('+'|'.join(_SPECIAL_TOKENS_WITH_NUMBERS)+r')' name re.IGNORECASE)<if_stmt>match<block_start>tokens.append(match.group(0))<line_sep>name=name[match.end(0):]<block_end><return>tokens+_TOKEN_RE.findall(name)<block_end><class_stmt>NameStyleConverter(object)<block_start>"""Converts names from camelCase to various other styles. """<def_stmt>__init__ self name<block_start>self.tokens=tokenize_name(name)<line_sep>self._original=name<block_end>@property<def_stmt>original self<block_start><return>self._original<block_end><def_stmt>__str__ self<block_start><return>self._original<block_end># Make this class workable with sort(). <def_stmt>__lt__ self other<block_start><return>self.original<l>other.original<block_end># Make this class workable with groupby(). <def_stmt>__eq__ self other<block_start><return>self.original<eq>other.original<block_end># If __eq__() is defined then a custom __hash__() needs to be defined. <def_stmt>__hash__ self<block_start><return>hash(self.original)<block_end><def_stmt>to_snake_case self<block_start>"""Snake case is the file and variable name style per Google C++ Style Guide: https://google.github.io/styleguide/cppguide.html#Variable_Names Also known as the hacker case. https://en.wikipedia.org/wiki/Snake_case """<line_sep><return>'_'.join([token.lower()<for>token self.tokens])<block_end><def_stmt>to_upper_camel_case self<block_start>"""Upper-camel case is the class and function name style per Google C++ Style Guide: https://google.github.io/styleguide/cppguide.html#Function_Names Also known as the PascalCase. https://en.wikipedia.org/wiki/Camel_case. """<line_sep>tokens=self.tokens<line_sep># If the first token is one of SPECIAL_TOKENS, we should replace the # token with the matched special token. # e.g. ['css', 'External', 'Scanner', 'Preload'] => 'CSSExternalScannerPreload' <if_stmt>tokens<and>tokens[0].lower()<eq>tokens[0]<block_start><for_stmt>special SPECIAL_TOKENS<block_start><if_stmt>special.lower()<eq>tokens[0]<block_start>tokens=copy.deepcopy(tokens)<line_sep>tokens[0]=special<line_sep><break><block_end><block_end><block_end><return>''.join([token[0].upper()+token[1:]<for>token tokens])<block_end><def_stmt>to_lower_camel_case self<block_start>"""Lower camel case is the name style for attribute names and operation names in web platform APIs. e.g. 'addEventListener', 'documentURI', 'fftSize' https://en.wikipedia.org/wiki/Camel_case. """<if_stmt><not>self.tokens<block_start><return>''<block_end><return>self.tokens[0].lower()+''.join([token[0].upper()+token[1:]<for>token self.tokens[1:]])<block_end><def_stmt>to_macro_case self<block_start>"""Macro case is the macro name style per Google C++ Style Guide: https://google.github.io/styleguide/cppguide.html#Macro_Names """<line_sep><return>'_'.join([token.upper()<for>token self.tokens])<block_end><def_stmt>to_all_cases self<block_start><return>{'snake_case':self.to_snake_case() 'upper_camel_case':self.to_upper_camel_case() 'macro_case':self.to_macro_case() }<block_end># Use the following high level naming functions which describe the semantics # of the name, rather than a particular style. <def_stmt>to_class_name self prefix=<none> suffix=<none><block_start>"""Represents this name as a class name in Chromium C++ style. i.e. UpperCamelCase. """<line_sep>camel_prefix=prefix[0].upper()+prefix[1:].lower()<if>prefix<else>''<line_sep>camel_suffix=suffix[0].upper()+suffix[1:].lower()<if>suffix<else>''<line_sep><return>camel_prefix+self.to_upper_camel_case()+camel_suffix<block_end><def_stmt>to_class_data_member self prefix=<none> suffix=<none><block_start>"""Represents this name as a data member name in Chromium C++ style. i.e. snake_case_with_trailing_underscore_. """<line_sep>lower_prefix=prefix.lower()+'_'<if>prefix<else>''<line_sep>lower_suffix=suffix.lower()+'_'<if>suffix<else>''<line_sep><return>lower_prefix+self.to_snake_case()+'_'+lower_suffix<block_end><def_stmt>to_function_name self prefix=<none> suffix=<none><block_start>"""Represents this name as a function name in Blink C++ style. i.e. UpperCamelCase Note that this function should not be used for IDL operation names and C++ functions implementing IDL operations and attributes. """<line_sep>camel_prefix=prefix[0].upper()+prefix[1:].lower()<if>prefix<else>''<line_sep>camel_suffix=''<if_stmt>type(suffix)<is>list<block_start><for_stmt>item suffix<block_start>camel_suffix<augadd>item[0].upper()+item[1:].lower()<block_end><block_end><elif_stmt>suffix<block_start>camel_suffix=suffix[0].upper()+suffix[1:].lower()<block_end><return>camel_prefix+self.to_upper_camel_case()+camel_suffix<block_end><def_stmt>to_enum_value self<block_start>"""Represents this name as an enum value in Blink C++ style. i.e. kUpperCamelCase """<line_sep><return>'k'+self.to_upper_camel_case()<block_end><def_stmt>to_header_guard self<block_start>"""Represents this name as a header guard style in Chromium C++ style. i.e. THIRD_PARTY_BLINK_RENDERER_MODULES_MODULES_EXPORT_H_ """<line_sep><return>re.sub(r'[-/.]' '_' self.to_macro_case())+'_'<block_end><block_end>
<import_stmt>time<import_stmt>azure.batch.models<as>batch_models<def_stmt>wait_for_task_to_complete core_cluster_operations job_id:str task_id:str<block_start><while_stmt><true><block_start>task=core_cluster_operations.batch_client.task.get(job_id=job_id task_id=task_id)<if_stmt>task.state<ne>batch_models.TaskState.completed<block_start>time.sleep(2)<block_end><else_stmt><block_start><return><block_end><block_end><block_end>
<import_from_stmt>unittest mock<import_stmt>pytest<import_from_stmt>fastapi FastAPI<import_from_stmt>fastapi.testclient TestClient<import_from_stmt>starlette.middleware.sessions SessionMiddleware<import_from_stmt>authx get_social_router<import_from_stmt>tests.utils ACCESS_COOKIE_NAME REFRESH_COOKIE_NAME MockAuthBackend<line_sep>app=FastAPI()<line_sep>app.add_middleware(SessionMiddleware secret_key="SECRET" max_age=10)<line_sep>router=get_social_router(<none> MockAuthBackend(<none> <none> <none> <none> <none>) <false> "http://127.0.0.1" ACCESS_COOKIE_NAME REFRESH_COOKIE_NAME <none> <none> ["google" "facebook"] {"google":{"id":"id" "secret":"secret" } "facebook":{"id":"id" "secret":"secret" } } )<line_sep>app.include_router(router prefix="/auth")<line_sep>test_client=TestClient(app)<line_sep>ACCESS_TOKEN="<PASSWORD>"<line_sep>REFRESH_TOKEN="<PASSWORD>"<line_sep>@pytest.mark.parametrize("provider" ["google" "facebook"])<def_stmt>test_login provider:str<block_start>""" Test login with social provider Args: provider (str): social provider """<line_sep>url=app.url_path_for("social:login" provider=provider)<with_stmt>mock.patch(f"authx.routers.social.SocialService.login_{provider}" mock.Mock(return_value="/") )<as>mock_method<block_start>response=test_client.get(url allow_redirects=<false>)<line_sep>mock_method.assert_called_once()<block_end><assert_stmt>response.status_code<eq>307<block_end>@pytest.mark.parametrize("provider" ["google" "facebook"])@mock.patch("authx.routers.social.check_state" mock.Mock(return_value=<true>) )<def_stmt>test_callback provider:str<block_start>""" Test callback with social provider Args: provider (str): social provider """<line_sep>patcher_callback=mock.patch(f"authx.routers.social.SocialService.callback_{provider}" mock.AsyncMock(return_value=(<none> <none> )) )<line_sep>mock_callback=patcher_callback.start()<line_sep>patcher_resolve_user=mock.patch("authx.routers.social.SocialService.resolve_user" mock.AsyncMock(return_value={"access":ACCESS_TOKEN "refresh":REFRESH_TOKEN}) )<line_sep>mock_resolve_user=patcher_resolve_user.start()<line_sep>url=app.url_path_for("social:callback" provider=provider)<line_sep>response=test_client.get(url allow_redirects=<false>)<assert_stmt>response.status_code<eq>307<assert_stmt>response.cookies.get(ACCESS_COOKIE_NAME)<eq>ACCESS_TOKEN<assert_stmt>response.cookies.get(REFRESH_COOKIE_NAME)<eq>REFRESH_TOKEN<line_sep>mock_callback.assert_awaited_once()<line_sep>mock_resolve_user.assert_awaited_once_with(provider <none> <none>)<line_sep>patcher_callback.stop()<line_sep>patcher_resolve_user.stop()<block_end>
# Check the basic discovery process, including a sub-suite. # # RUN: %{lit} %{inputs}/discovery \ # RUN: -j 1 --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s # # CHECK-BASIC-ERR: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}' # CHECK-BASIC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}' # CHECK-BASIC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}' # # CHECK-BASIC-OUT: -- Test Suites -- # CHECK-BASIC-OUT: sub-suite - 2 tests # CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-BASIC-OUT: top-level-suite - 3 tests # CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery$}} # CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery$}} # # CHECK-BASIC-OUT: -- Available Tests -- # CHECK-BASIC-OUT: sub-suite :: test-one # CHECK-BASIC-OUT: sub-suite :: test-two # CHECK-BASIC-OUT: top-level-suite :: subdir/test-three # CHECK-BASIC-OUT: top-level-suite :: test-one # CHECK-BASIC-OUT: top-level-suite :: test-two # Check discovery when providing the special builtin 'config_map' # RUN: %{python} %{inputs}/config-map-discovery/driver.py \ # RUN: %{inputs}/config-map-discovery/main-config/lit.cfg \ # RUN: %{inputs}/config-map-discovery/lit.alt.cfg \ # RUN: --single-process --debug --show-tests --show-suites > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-ERR < %t.err %s # CHECK-CONFIG-MAP-OUT-NOT: ERROR: lit.cfg invoked # CHECK-CONFIG-MAP-OUT: -- Test Suites -- # CHECK-CONFIG-MAP-OUT: config-map - 2 tests # CHECK-CONFIG-MAP-OUT: Source Root: {{.*[/\\]config-map-discovery[/\\]tests}} # CHECK-CONFIG-MAP-OUT: Exec Root : {{.*[/\\]tests[/\\]Inputs[/\\]config-map-discovery}} # CHECK-CONFIG-MAP-OUT: -- Available Tests -- # CHECK-CONFIG-MAP-OUT-NOT: invalid-test.txt # CHECK-CONFIG-MAP-OUT: config-map :: test1.txt # CHECK-CONFIG-MAP-OUT: config-map :: test2.txt # CHECK-CONFIG-MAP-ERR: loading suite config '{{.*}}lit.alt.cfg' # CHECK-CONFIG-MAP-ERR: loaded config '{{.*}}lit.alt.cfg' # CHECK-CONFIG-MAP-ERR: resolved input '{{.*(/|\\\\)config-map-discovery(/|\\\\)main-config}}' to 'config-map'::() # Check discovery when exact test names are given. # # RUN: %{lit} \ # RUN: %{inputs}/discovery/subdir/test-three.py \ # RUN: %{inputs}/discovery/subsuite/test-one.txt \ # RUN: -j 1 --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-EXACT-TEST < %t.out %s # # CHECK-EXACT-TEST: -- Available Tests -- # CHECK-EXACT-TEST: sub-suite :: test-one # CHECK-EXACT-TEST: top-level-suite :: subdir/test-three # Check discovery when config files end in .py # RUN: %{lit} %{inputs}/py-config-discovery \ # RUN: -j 1 --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-PYCONFIG-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-PYCONFIG-ERR < %t.err %s # # CHECK-PYCONFIG-ERR: loading suite config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}' # CHECK-PYCONFIG-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}' # CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}' # CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}' # CHECK-PYCONFIG-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}' # CHECK-PYCONFIG-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}' # # CHECK-PYCONFIG-OUT: -- Test Suites -- # CHECK-PYCONFIG-OUT: sub-suite - 2 tests # CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-PYCONFIG-OUT: top-level-suite - 3 tests # CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery$}} # CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]py-config-discovery$}} # # CHECK-PYCONFIG-OUT: -- Available Tests -- # CHECK-PYCONFIG-OUT: sub-suite :: test-one # CHECK-PYCONFIG-OUT: sub-suite :: test-two # CHECK-PYCONFIG-OUT: top-level-suite :: subdir/test-three # CHECK-PYCONFIG-OUT: top-level-suite :: test-one # CHECK-PYCONFIG-OUT: top-level-suite :: test-two # Check discovery when using an exec path. # # RUN: %{lit} %{inputs}/exec-discovery \ # RUN: -j 1 --debug --show-tests --show-suites \ # RUN: -v > %t.out 2> %t.err # RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s # RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s # # CHECK-ASEXEC-ERR: loading suite config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}' # CHECK-ASEXEC-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}' # CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}' # CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}' # CHECK-ASEXEC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}' # CHECK-ASEXEC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}' # # CHECK-ASEXEC-OUT: -- Test Suites -- # CHECK-ASEXEC-OUT: sub-suite - 2 tests # CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}} # CHECK-ASEXEC-OUT: top-level-suite - 3 tests # CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery$}} # CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]exec-discovery$}} # # CHECK-ASEXEC-OUT: -- Available Tests -- # CHECK-ASEXEC-OUT: sub-suite :: test-one # CHECK-ASEXEC-OUT: sub-suite :: test-two # CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three # CHECK-ASEXEC-OUT: top-level-suite :: test-one # CHECK-ASEXEC-OUT: top-level-suite :: test-two # Check discovery when exact test names are given. # # FIXME: Note that using a path into a subsuite doesn't work correctly here. # # RUN: %{lit} \ # RUN: %{inputs}/exec-discovery/subdir/test-three.py \ # RUN: -j 1 --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-EXACT-TEST < %t.out %s # # CHECK-ASEXEC-EXACT-TEST: -- Available Tests -- # CHECK-ASEXEC-EXACT-TEST: top-level-suite :: subdir/test-three # Check that we don't recurse infinitely when loading an site specific test # suite located inside the test source root. # # RUN: %{lit} \ # RUN: %{inputs}/exec-discovery-in-tree/obj/ \ # RUN: -j 1 --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s # # Try it again after cd'ing into the test suite using a short relative path. # # RUN: cd %{inputs}/exec-discovery-in-tree/obj/ # RUN: %{lit} . \ # RUN: -j 1 --show-tests --show-suites -v > %t.out # RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s # # CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests # CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*[/\\]exec-discovery-in-tree$}} # CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*[/\\]exec-discovery-in-tree[/\\]obj$}} # CHECK-ASEXEC-INTREE-NEXT: -- Available Tests -- # CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
# 1) fw dir # 2) result file # 3) log file <import_stmt>os<import_stmt>sys<import_stmt>json<class_stmt>Runner<block_start><def_stmt>__init__ self cmd<block_start>self.cmd=cmd<block_end><def_stmt>run_it self<block_start>os.system(self.cmd)<block_end><block_end><def_stmt>run config log_dir<block_start>jconfig=json.load(open(config 'r'))<line_sep>core_script='/'.join(__file__.split('/')[:-1])+'/run_core.py'<line_sep>cmd='python ./'+core_script+' -d '+jconfig['fw_path']+' -l '+log_dir<line_sep>obj=Runner(cmd)<line_sep>obj.run_it()<block_end><if_stmt>__name__<eq>'__main__'<block_start>run(sys.argv[1] sys.argv[2])<block_end>
<import_from_stmt>icevision.models.mmdet.fastai.callbacks *<import_from_stmt>icevision.models.mmdet.fastai.learner *<line_sep>
A=[1 2 3]<for_stmt>i,x enumerate(A)<block_start>A[i]<augadd>x<block_end>B=A[0]<line_sep>C=A[0]<line_sep>D:int=3<while_stmt>C<l>A[2]<block_start>C<augadd>1<block_end><if_stmt>C<eq>A[2]<block_start>print('True')<block_end><def_stmt>main <block_start>print("Main started")<line_sep>print(A)<line_sep>print(B)<line_sep>print(C)<line_sep>print(D)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
""" Module that provides a class that filters profanities f = ProfanitiesFilter(['bad', 'un\w+'], replacements="-") example = "I am doing bad ungood badlike things." print f.clean(example) # Returns "I am doing --- ------ badlike things." f.inside_words = True print f.clean(example) # Returns "I am doing --- ------ ---like things." f.complete = False print f.clean(example) # Returns "I am doing b-d u----d b-dlike things." """<import_stmt>random<import_stmt>re<class_stmt>ProfanitiesFilter(object)<block_start><def_stmt>__init__ self filterlist ignore_case=<true> replacements="$@%-?!" complete=<true> inside_words=<false><block_start>""" Inits the profanity filter. filterlist -- a list of regular expressions that matches words that are forbidden ignore_case -- ignore capitalization replacements -- string with characters to replace the forbidden word complete -- completely remove the word or keep the first and last char? inside_words -- search inside other words? """<line_sep>self.badwords=filterlist<line_sep>self.ignore_case=ignore_case<line_sep>self.replacements=replacements<line_sep>self.complete=complete<line_sep>self.inside_words=inside_words<block_end><def_stmt>_make_clean_word self length<block_start>""" Generates a random replacement string of a given length using the chars in self.replacements. """<line_sep><return>''.join([random.choice(self.replacements)<for>i range(length)])<block_end><def_stmt>__replacer self match<block_start>value=match.group()<if_stmt>self.complete<block_start><return>self._make_clean_word(len(value))<block_end><else_stmt><block_start><return>value[0]+self._make_clean_word(len(value)-2)+value[-1]<block_end><block_end><def_stmt>clean self text<block_start>"""Cleans a string from profanity."""<line_sep>regexp_insidewords={<true>:r'(%s)' <false>:r'\b(%s)\b' }<line_sep>regexp=(regexp_insidewords[self.inside_words]%'|'.join(self.badwords))<line_sep>r=re.compile(regexp re.IGNORECASE<if>self.ignore_case<else>0)<line_sep><return>r.sub(self.__replacer text)<block_end><block_end>
<class_stmt>Solution<block_start><def_stmt>minPathSum self grid:List[List[int]]<arrow>int<block_start>rows=len(grid)<line_sep>columns=len(grid[0])<for_stmt>i range(1 columns)<block_start>grid[0][i]<augadd>grid[0][i-1]<block_end><for_stmt>j range(1 rows)<block_start>grid[j][0]<augadd>grid[j-1][0]<block_end><for_stmt>k range(1 rows)<block_start><for_stmt>l range(1 columns)<block_start>grid[k][l]<augadd>min(grid[k][l-1] grid[k-1][l])<block_end><block_end><return>grid[-1][-1]<block_end><block_end>
# Generated by Django 2.2.5 on 2019-09-13 14:07 <import_stmt>InvenTree.fields<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('build' '0005_auto_20190604_2217') ]<line_sep>operations=[migrations.AlterField(model_name='build' name='URL' field=InvenTree.fields.InvenTreeURLField(blank=<true> help_text='Link to external URL') ) ]<block_end>
"""Core components of the policy daemon."""<import_stmt>asyncio<import_stmt>concurrent.futures<import_from_stmt>email.message EmailMessage<import_stmt>logging<import_stmt>aiosmtplib<import_from_stmt>dateutil.relativedelta relativedelta<import_stmt>aioredis<import_from_stmt>django.conf settings<import_from_stmt>django.db connections<import_from_stmt>django.template.loader render_to_string<import_from_stmt>django.utils timezone<import_from_stmt>django.utils translation<import_from_stmt>django.utils.translation ugettext<as>_ ugettext_lazy<import_from_stmt>modoboa.admin constants<as>admin_constants<import_from_stmt>modoboa.admin models<as>admin_models<import_from_stmt>modoboa.core models<as>core_models<import_from_stmt>modoboa.lib.email_utils split_mailbox<import_from_stmt>. constants<line_sep>logger=logging.getLogger("modoboa.policyd")<line_sep>SUCCESS_ACTION=b"dunno"<line_sep>FAILURE_ACTION=b"defer_if_permit Daily limit reached, retry later"<def_stmt>close_db_connections func *args **kwargs<block_start>""" Make sure to close all connections to DB. To use in threads. """<def_stmt>_close_db_connections *args **kwargs<block_start>ret=<none><try_stmt><block_start>ret=func(*args **kwargs)<block_end><finally_stmt><block_start><for_stmt>conn connections.all()<block_start>conn.close()<block_end><block_end><return>ret<block_end><return>_close_db_connections<block_end><async_keyword><def_stmt>wait_for dt<block_start>"""sleep until the specified datetime."""<line_sep>one_day=86400<while_stmt><true><block_start>now=timezone.now()<line_sep>remaining=(dt-now).total_seconds()<if_stmt>remaining<l>one_day<block_start><break><block_end># asyncio.sleep doesn't like long sleeps, so don't sleep more # than a day at a time <await>asyncio.sleep(one_day)<block_end><await>asyncio.sleep(remaining)<block_end><async_keyword><def_stmt>run_at dt coro *args<block_start>"""Run coroutine at given datetime."""<line_sep><await>wait_for(dt)<line_sep><return><await>coro(*args)<block_end>@close_db_connections<def_stmt>get_local_config <block_start>"""Return local configuration."""<line_sep><return>core_models.LocalConfig.objects.first()<block_end>@close_db_connections<def_stmt>get_notification_recipients <block_start>"""Return superadmins with a mailbox."""<line_sep><return>(core_models.User.objects.filter(is_superuser=<true> mailbox__isnull=<false>))<block_end>@close_db_connections<def_stmt>create_alarm ltype name<block_start>"""Create a new alarm."""<line_sep>title=_("Daily sending limit reached")<line_sep>internal_name="sending_limit"<if_stmt>ltype<eq>"domain"<block_start>domain=admin_models.Domain.objects.get(name=name)<line_sep>domain.alarms.create(title=title internal_name=internal_name)<block_end><else_stmt><block_start>localpart,domain=split_mailbox(name)<line_sep>mailbox=admin_models.Mailbox.objects.get(address=localpart domain__name=domain)<line_sep>mailbox.alarms.create(domain=mailbox.domain title=title internal_name=internal_name)<block_end><block_end><async_keyword><def_stmt>notify_limit_reached ltype name<block_start>"""Send a notification to super admins about item."""<line_sep>ltype_translations={"account":ugettext_lazy("account") "domain":ugettext_lazy("domain")}<line_sep># We're going to execute sync code so we need an executor executor=concurrent.futures.ThreadPoolExecutor(max_workers=3)<line_sep>loop=asyncio.get_event_loop()<line_sep>futures=[loop.run_in_executor(executor get_local_config) loop.run_in_executor(executor get_notification_recipients) loop.run_in_executor(executor create_alarm ltype name) ]<line_sep>lc,recipients,junk=<await>asyncio.gather(*futures)<line_sep>sender=lc.parameters.get_value("sender_address" app="core")<for_stmt>recipient recipients<block_start><with_stmt>translation.override(recipient.language)<block_start>content=render_to_string("policyd/notifications/limit_reached.html" {"ltype":ltype_translations[ltype] "name":name})<line_sep>subject=_("[modoboa] Sending limit reached")<block_end>msg=EmailMessage()<line_sep>msg["From"]=sender<line_sep>msg["To"]=recipient.email<line_sep>msg["Subject"]=subject<line_sep>msg.set_content(content)<line_sep><await>aiosmtplib.send(msg)<block_end><block_end><async_keyword><def_stmt>decrement_limit rclient ltype name<block_start>"""Decrement the given limit by one."""<line_sep>new_counter=<await>rclient.hincrby(constants.REDIS_HASHNAME name -1)<if_stmt>new_counter<le>0<block_start>logger.info("Limit reached for {} {}".format(ltype name))<line_sep>asyncio.ensure_future(notify_limit_reached(ltype name))<block_end><block_end><async_keyword><def_stmt>apply_policies attributes<block_start>"""Apply defined policies to received request."""<line_sep>sasl_username=attributes.get("sasl_username")<if_stmt><not>sasl_username<block_start><return>SUCCESS_ACTION<block_end>rclient=<await>aioredis.create_redis_pool(settings.REDIS_URL)<line_sep>decr_domain=<false><line_sep>decr_user=<false><line_sep>localpart,domain=split_mailbox(sasl_username)<if_stmt><await>rclient.hexists(constants.REDIS_HASHNAME domain)<block_start>counter=<await>rclient.hget(constants.REDIS_HASHNAME domain)<line_sep>logger.info("Domain {} current counter: {}".format(domain counter))<if_stmt>int(counter)<le>0<block_start><return>FAILURE_ACTION<block_end>decr_domain=<true><block_end><if_stmt><await>rclient.hexists(constants.REDIS_HASHNAME sasl_username)<block_start>counter=<await>rclient.hget(constants.REDIS_HASHNAME sasl_username)<line_sep>logger.info("Account {} current counter: {}".format(sasl_username counter))<if_stmt>int(counter)<le>0<block_start><return>FAILURE_ACTION<block_end>decr_user=<true><block_end><if_stmt>decr_domain<block_start><await>decrement_limit(rclient "domain" domain)<block_end><if_stmt>decr_user<block_start><await>decrement_limit(rclient "account" sasl_username)<block_end>rclient.close()<line_sep><await>rclient.wait_closed()<line_sep>logger.debug("Let it pass")<line_sep><return>SUCCESS_ACTION<block_end><async_keyword><def_stmt>handle_connection reader writer<block_start>"""Coroutine to handle a new connection to the server."""<line_sep>action=SUCCESS_ACTION<try_stmt><block_start>logger.debug("Reading data")<line_sep>data=<await>reader.readuntil(b"\n\n")<block_end><except_stmt>asyncio.IncompleteReadError<block_start><pass><block_end><else_stmt><block_start>attributes={}<for_stmt>line data.decode().split("\n")<block_start><if_stmt><not>line<block_start><continue><block_end><try_stmt><block_start>name,value=line.split("=")<block_end><except_stmt>ValueError<block_start><continue><block_end>attributes[name]=value<block_end>state=attributes.get("protocol_state")<if_stmt>state<eq>"RCPT"<block_start>logger.debug("Applying policies")<line_sep>action=<await>apply_policies(attributes)<line_sep>logger.debug("Done")<block_end><block_end>logger.debug("Sending action %s" action)<line_sep>writer.write(b"action="+action+b"\n\n")<line_sep><await>writer.drain()<block_end><async_keyword><def_stmt>new_connection reader writer<block_start><try_stmt><block_start><await>asyncio.wait_for(handle_connection(reader writer) timeout=5)<block_end><except_stmt>asyncio.TimeoutError<as>err<block_start>logger.warning("Timeout received while handling connection: %s" err)<block_end><finally_stmt><block_start>writer.close()<if_stmt>hasattr(writer "wait_closed")# Python 3.7+ only <block_start><await>writer.wait_closed()<block_end>logger.info("exit")<block_end><block_end><def_stmt>get_next_execution_dt <block_start>"""Return next execution date and time."""<line_sep><return>(timezone.now()+relativedelta(days=1)).replace(hour=0 minute=0 second=0)<block_end>@close_db_connections<def_stmt>get_domains_to_reset <block_start>""" Return a list of domain to reset. We also close all associated alarms. """<line_sep>qset=admin_models.Domain.objects.filter(message_limit__isnull=<false>)<line_sep>admin_models.Alarm.objects.filter(internal_name="limit_reached" domain__in=qset status=admin_constants.ALARM_OPENED).update(status=admin_constants.ALARM_CLOSED closed=timezone.now())<line_sep><return>qset<block_end>@close_db_connections<def_stmt>get_mailboxes_to_reset <block_start>""" Return a list of mailboxes to reset. We also close all associated alarms. """<line_sep>qset=(admin_models.Mailbox.objects.filter(message_limit__isnull=<false>).select_related("domain"))<line_sep>admin_models.Alarm.objects.filter(internal_name="limit_reached" mailbox__in=qset status=admin_constants.ALARM_OPENED).update(status=admin_constants.ALARM_CLOSED closed=timezone.now())<line_sep><return>qset<block_end><async_keyword><def_stmt>reset_counters <block_start>"""Reset all counters."""<line_sep>rclient=<await>aioredis.create_redis_pool(settings.REDIS_URL)<line_sep>logger.info("Resetting all counters")<line_sep># We're going to execute sync code so we need an executor executor=concurrent.futures.ThreadPoolExecutor(max_workers=3)<line_sep>loop=asyncio.get_event_loop()<line_sep>futures=[loop.run_in_executor(executor get_domains_to_reset) loop.run_in_executor(executor get_mailboxes_to_reset)]<line_sep>domains,mboxes=<await>asyncio.gather(*futures)<for_stmt>domain domains<block_start>rclient.hset(constants.REDIS_HASHNAME domain.name domain.message_limit)<block_end><for_stmt>mb mboxes<block_start>rclient.hset(constants.REDIS_HASHNAME mb.full_address mb.message_limit)<block_end>rclient.close()<line_sep><await>rclient.wait_closed()<line_sep># reschedule asyncio.ensure_future(run_at(get_next_execution_dt() reset_counters))<block_end><def_stmt>start_reset_counters_coro <block_start>"""Start coroutine."""<line_sep>first_time=(timezone.now()+relativedelta(days=1)).replace(hour=0 minute=0 second=0)<line_sep>asyncio.ensure_future(run_at(first_time reset_counters))<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.optim lr_scheduler<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>cnstd.lr_scheduler WarmupCosineAnnealingRestarts<class_stmt>NullModule(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.fc=nn.Linear(1 1)<block_end><block_end>ori_lr=5e-4<line_sep>model=NullModule()<line_sep>optimizer=torch.optim.Adam(model.parameters())<def_stmt>plot_lr scheduler step=900<block_start>lrs=[]<for_stmt>i range(step)<block_start>lr=optimizer.param_groups[0]['lr']<line_sep>scheduler.step()<line_sep>lrs.append(lr)<block_end>plt.plot(lrs)<line_sep>plt.show()<block_end><def_stmt>test_CosineAnnealingWarmRestarts <block_start>CAW=lr_scheduler.CosineAnnealingWarmRestarts(optimizer T_0=200 T_mult=1 eta_min=ori_lr/10.0)<line_sep>plot_lr(CAW step=1000)<block_end><def_stmt>test_WarmupCosineAnnealingRestarts <block_start>CAW=WarmupCosineAnnealingRestarts(optimizer first_cycle_steps=95600 cycle_mult=1.0 max_lr=0.001 min_lr=0.0001 warmup_steps=100 gamma=1.0 )<line_sep>plot_lr(CAW step=95600)<block_end><def_stmt>test_CyclicLR <block_start>Cyc=lr_scheduler.CyclicLR(optimizer base_lr=ori_lr/10.0 max_lr=ori_lr step_size_up=200 cycle_momentum=<false> )<line_sep>plot_lr(Cyc 1000)<block_end><def_stmt>test_OneCycleLR <block_start>Cyc=lr_scheduler.OneCycleLR(optimizer max_lr=0.1 epochs=20 steps_per_epoch=50 )<line_sep>plot_lr(Cyc 1000)<block_end>
<import_stmt>tkinter<as>tk<import_from_stmt>tkinter font colorchooser filedialog messagebox<import_from_stmt>tkinter ttk<import_stmt>os<line_sep>main_application=tk.Tk()<line_sep>main_application.geometry("1450x700+50+40")<line_sep># main_application.geometry("1530x800+0+0") main_application.title("Text Editor:- By <NAME>")<line_sep># MAIN MENU main_menu=tk.Menu()<line_sep># FILE ICONS new_icon=tk.PhotoImage(file='icons/new.png')<line_sep>open_icon=tk.PhotoImage(file='icons/open.png')<line_sep>save_icon=tk.PhotoImage(file='icons/save.png')<line_sep>save_as_icon=tk.PhotoImage(file='icons/save_as.png')<line_sep>exit_icon=tk.PhotoImage(file='icons/exit.png')<line_sep>file=tk.Menu(main_menu tearoff=<false>)<line_sep># EDIT copy_icon=tk.PhotoImage(file='icons/copy.png')<line_sep>paste_icon=tk.PhotoImage(file='icons/paste.png')<line_sep>cut_icon=tk.PhotoImage(file='icons/cut.png')<line_sep>clear_all_icon=tk.PhotoImage(file='icons/clear_all.png')<line_sep>find_icon=tk.PhotoImage(file='icons/find.png')<line_sep>edit=tk.Menu(main_menu tearoff=<false>)<line_sep># VIEW ICONS tool_bar_icon=tk.PhotoImage(file='icons/tool_bar.png')<line_sep>status_bar_icon=tk.PhotoImage(file='icons/status_bar.png')<line_sep>view=tk.Menu(main_menu tearoff=<false>)<line_sep># COLOR THEME light_default_icon=tk.PhotoImage(file='icons/light_default.png')<line_sep>light_plus_icon=tk.PhotoImage(file='icons/light_plus.png')<line_sep>dark_icon=tk.PhotoImage(file='icons/dark.png')<line_sep>red_icon=tk.PhotoImage(file='icons/red.png')<line_sep>monokai_icon=tk.PhotoImage(file='icons/monokai.png')<line_sep>night_blue_icon=tk.PhotoImage(file='icons/night_blue.png')<line_sep>color_theme=tk.Menu(main_menu tearoff=<false>)<line_sep>theme_choice=tk.StringVar()<line_sep>color_icons=(light_default_icon light_plus_icon dark_icon red_icon monokai_icon night_blue_icon)<line_sep>color_dict={'light_default_icon':('#000000' '#ffffff') 'light_plus_icon':('#474747' '#e0e0e0') 'dark_icon':('#c4c4c4' '#2d2d2d') 'red_icon':('#2d2d2d' '#ffe8e8') 'monokai_icon':('#d3b774' '#474747') 'night_blue_icon':('#ededed' '#6b9dc2')}<line_sep># CASCADE main_menu.add_cascade(label='File' menu=file)<line_sep>main_menu.add_cascade(label='Edit' menu=edit)<line_sep>main_menu.add_cascade(label='View' menu=view)<line_sep>main_menu.add_cascade(label='Color Theme' menu=color_theme)<line_sep># TOOLBAR tool_bar=ttk.Label(main_application)<line_sep>tool_bar.pack(side=tk.TOP fill=tk.X)<line_sep>#FONT BOX font_tuple=tk.font.families()<line_sep>font_family=tk.StringVar()<line_sep>font_box=ttk.Combobox(tool_bar width=30 textvariable=font_family state='readonly')<line_sep>font_box['values']=font_tuple<line_sep>font_box.current(font_tuple.index('Arial'))<line_sep>font_box.grid(row=0 column=0 padx=5)<line_sep># SIZE BOX size_var=tk.IntVar()<line_sep>font_size=ttk.Combobox(tool_bar width=14 textvariable=size_var state='readonly')<line_sep>font_size['values']=tuple(range(8 81))<line_sep>font_size.current(4)<line_sep>font_size.grid(row=0 column=1 padx=5)<line_sep># BOLD BUTTON bold_icon=tk.PhotoImage(file='icons/bold.png')<line_sep>bold_btn=ttk.Button(tool_bar image=bold_icon)<line_sep>bold_btn.grid(row=0 column=2 padx=5)<line_sep># ITALIC BUTTON italic_icon=tk.PhotoImage(file='icons/italic.png')<line_sep>italic_btn=ttk.Button(tool_bar image=italic_icon)<line_sep>italic_btn.grid(row=0 column=3 padx=5)<line_sep># UNDERLINE BUTTON underline_icon=tk.PhotoImage(file='icons/underline.png')<line_sep>underline_btn=ttk.Button(tool_bar image=underline_icon)<line_sep>underline_btn.grid(row=0 column=4 padx=5)<line_sep># FONT COLOR BUTTON font_color_icon=tk.PhotoImage(file='icons/font_color.png')<line_sep>font_color_btn=ttk.Button(tool_bar image=font_color_icon)<line_sep>font_color_btn.grid(row=0 column=5 padx=5)<line_sep># ALIGN LEFT BUTTON align_left_icon=tk.PhotoImage(file='icons/align_left.png')<line_sep>align_left_btn=ttk.Button(tool_bar image=align_left_icon)<line_sep>align_left_btn.grid(row=0 column=6 padx=5)<line_sep># ALIGN CENTER BUTTON align_center_icon=tk.PhotoImage(file='icons/align_center.png')<line_sep>align_center_btn=ttk.Button(tool_bar image=align_center_icon)<line_sep>align_center_btn.grid(row=0 column=7 padx=5)<line_sep># ALIGN RIGFT BUTTON align_right_icon=tk.PhotoImage(file='icons/align_right.png')<line_sep>align_right_btn=ttk.Button(tool_bar image=align_right_icon)<line_sep>align_right_btn.grid(row=0 column=8 padx=5)<line_sep># TEXT EDITOR text_editor=tk.Text(main_application)<line_sep>text_editor.config(wrap='word' relief=tk.FLAT)<line_sep>scroll_bar=tk.Scrollbar(main_application)<line_sep>text_editor.focus_set()<line_sep>scroll_bar.pack(side=tk.RIGHT fill=tk.Y)<line_sep>text_editor.pack(fill=tk.BOTH expand=<true>)<line_sep>scroll_bar.config(command=text_editor.yview)<line_sep>text_editor.config(yscrollcommand=scroll_bar.set)<line_sep>current_font_family='Arial'<line_sep>current_font_size=12<def_stmt>change_font event=<none><block_start><global>current_font_family<line_sep>current_font_family=font_family.get()<line_sep>text_editor.configure(font=(current_font_family current_font_size))<block_end><def_stmt>change_fontsize event=<none><block_start><global>current_font_size<line_sep>current_font_size=size_var.get()<line_sep>text_editor.configure(font=(current_font_family current_font_size))<block_end>font_box.bind("<<ComboboxSelected>>" change_font)<line_sep>font_size.bind("<<ComboboxSelected>>" change_fontsize)<line_sep># BUTTONS FUNCTIONALITY # BOLD <def_stmt>change_bold <block_start>text_property=tk.font.Font(font=text_editor['font'])<if_stmt>text_property.actual()['weight']<eq>'normal'<block_start>text_editor.configure(font=(current_font_family current_font_size 'bold'))<block_end><if_stmt>text_property.actual()['weight']<eq>'bold'<block_start>text_editor.configure(font=(current_font_family current_font_size 'normal'))<block_end><block_end>bold_btn.configure(command=change_bold)<line_sep># ITALIC <def_stmt>change_italic <block_start>text_property=tk.font.Font(font=text_editor['font'])<if_stmt>text_property.actual()['slant']<eq>'roman'<block_start>text_editor.configure(font=(current_font_family current_font_size 'italic'))<block_end><if_stmt>text_property.actual()['slant']<eq>'italic'<block_start>text_editor.configure(font=(current_font_family current_font_size 'normal'))<block_end><block_end>italic_btn.configure(command=change_italic)<line_sep># UNDERSCORE <def_stmt>change_underline <block_start>text_property=tk.font.Font(font=text_editor['font'])<if_stmt>text_property.actual()['underline']<eq>0<block_start>text_editor.configure(font=(current_font_family current_font_size 'underline'))<block_end><if_stmt>text_property.actual()['underline']<eq>1<block_start>text_editor.configure(font=(current_font_family current_font_size 'normal'))<block_end><block_end>underline_btn.configure(command=change_underline)<line_sep># FONT COLOR <def_stmt>change_font_color <block_start>color_var=tk.colorchooser.askcolor()<line_sep>text_editor.configure(fg=color_var[1])<block_end>font_color_btn.configure(command=change_font_color)<line_sep># ALIGN LEFT <def_stmt>align_left <block_start>text_content=text_editor.get(1.0 'end')<line_sep>text_editor.tag_config('left' justify=tk.LEFT)<line_sep>text_editor.delete(1.0 tk.END)<line_sep>text_editor.insert(tk.INSERT text_content 'left')<block_end>align_left_btn.configure(command=align_left)<line_sep># ALIGN CENTER <def_stmt>align_center <block_start>text_content=text_editor.get(1.0 'end')<line_sep>text_editor.tag_config('center' justify=tk.CENTER)<line_sep>text_editor.delete(1.0 tk.END)<line_sep>text_editor.insert(tk.INSERT text_content 'center')<block_end>align_center_btn.configure(command=align_center)<line_sep># ALIGN RIGHT <def_stmt>align_right <block_start>text_content=text_editor.get(1.0 'end')<line_sep>text_editor.tag_config('right' justify=tk.RIGHT)<line_sep>text_editor.delete(1.0 tk.END)<line_sep>text_editor.insert(tk.INSERT text_content 'right')<block_end>align_right_btn.configure(command=align_right)<line_sep>text_editor.configure(font=('Arial' 12))<line_sep># STATUS BAR status_bar=ttk.Label(main_application text='Statud Bar')<line_sep>status_bar.pack(side=tk.BOTTOM)<line_sep>text_changed=<false><def_stmt>changed event=<none><block_start><global>text_changed<if_stmt>text_editor.edit_modified()<block_start>text_changed=<true><line_sep>words=len(text_editor.get(1.0 'end-1c').split())<line_sep>characters=len(text_editor.get(1.0 'end-1c'))<line_sep>status_bar.config(text=f'Characters : {characters} Word : {words}')<block_end>text_editor.edit_modified(<false>)<block_end>text_editor.bind('<<Modified>>' changed)<line_sep># MAIN MENU FUNCTIONALITY url=''<def_stmt>new_file event=<none><block_start><global>url<line_sep>url=''<line_sep>text_editor.delete(1.0 tk.END)<block_end>file.add_command(label='New' image=new_icon compound=tk.LEFT accelerator='Ctrl+N' command=new_file)<def_stmt>open_file event=<none><block_start><global>url<line_sep>url=filedialog.askopenfilename(initialdir=os.getcwd() title='Select File' filetypes=(('Text File' '*.txt') ('All Files' '*.*')))<try_stmt><block_start><with_stmt>open(url 'r')<as>fr<block_start>text_editor.delete(1.0 tk.END)<line_sep>text_editor.insert(1.0 tk.read())<block_end><block_end><except_stmt>FileNotFoundError<block_start><return><block_end><except_stmt><block_start><return><block_end>main_application.title(os.path.basename(url))<block_end>file.add_command(label='Open' image=open_icon compound=tk.LEFT accelerator='Ctrl+O' command=open_file)<def_stmt>save_file event=<none><block_start><global>url<try_stmt><block_start><if_stmt>url<block_start>content=str(text_editor.get(1.0 tk.END))<with_stmt>open(url 'w' encoding='utf-8')<as>fw<block_start>fw.write(content)<block_end><block_end><else_stmt><block_start>url=filedialog.asksaveasfile(mode='w' defaultextension='.txt' filetypes=(('Text File' '*.txt') ('All Files' '*.*')))<line_sep>content2=text_editor.get(1.0 tk.END)<line_sep>url.write(content2)<line_sep>url.close()<block_end><block_end><except_stmt><block_start><return><block_end><block_end>file.add_command(label='Save' image=save_icon compound=tk.LEFT accelerator='Ctrl+S' command=save_file)<def_stmt>save_as event=<none><block_start><global>url<try_stmt><block_start>content=text_editor.get(1.0 tk.END)<line_sep>url=filedialog.asksaveasfile(mode='w' defaultextension='.txt' filetypes=(('Text File' '*.txt') ('All Files' '*.*')))<line_sep>url.write(content)<line_sep>url.close()<block_end><except_stmt><block_start><return><block_end><block_end>file.add_command(label='Save As' image=new_icon compound=tk.LEFT accelerator='Ctrl+Alt+S' command=save_as)<def_stmt>exit_func event=<none><block_start><global>url text_changed<try_stmt><block_start><if_stmt>text_changed<block_start>mbox=messagebox.askyesnocancel('Warning' 'Do you want to Save the file ?')<if_stmt>mbox<is><true><block_start><if_stmt>url<block_start>content=text_editor.get(1.0 tk.END)<with_stmt>open(url 'w' encoding='utf-8')<as>fw<block_start>fw.write(content)<line_sep>main_application.destroy()<block_end><block_end><else_stmt><block_start>content2=str(text_editor.get(1.0 tk.END))<line_sep>url=filedialog.asksaveasfile(mode='w' defaultextension='.txt' filetypes=(('Text File' '*.txt') ('All Files' '*.*')))<line_sep>url.write(content2)<line_sep>url.close()<line_sep>main_application.destroy()<block_end><block_end><elif_stmt>mbox<is><false><block_start>main_application.destroy()<block_end><block_end><else_stmt><block_start>main_application.destroy()<block_end><block_end><except_stmt><block_start><return><block_end><block_end>file.add_command(label='Exit' image=exit_icon compound=tk.LEFT accelerator='Ctrl+Q' command=exit_func)<def_stmt>find_func event=<none><block_start><def_stmt>find <block_start>word=find_input.get()<line_sep>text_editor.tag_remove('match' '1.0' tk.END)<line_sep>matches=0<if_stmt>word<block_start>start_pos='1.0'<while_stmt><true><block_start>start_pos=text_editor.search(word start_pos stopindex=tk.END)<if_stmt><not>start_pos<block_start><break><block_end>end_pos=f'{start_pos}+{len(word)}c'<line_sep>text_editor.tag_add('match' start_pos end_pos)<line_sep>matches<augadd>1<line_sep>start_pos=end_pos<line_sep>text_editor.tag_config('match' foreground='red' background='yellow')<block_end><block_end><block_end><def_stmt>replace <block_start>word=find_input.get()<line_sep>replace_text=replace_text.get()<line_sep>content=text_editor.get(1.0 tk.END)<line_sep>new_content=content.replace(word replace_text)<line_sep>text_editor.delete(1.0 tk.END)<line_sep>text_editor.insert(1.0 new_content)<block_end>find_dialogue=tk.Toplevel()<line_sep>find_dialogue.geometry('450x250+500+200')<line_sep>find_dialogue.title('Find')<line_sep>find_dialogue.resizable(0 0)<line_sep># FRAME find_frame=ttk.LabelFrame(find_dialogue text='Find/Replace')<line_sep>find_frame.pack(pady=20)<line_sep># LABELS text_find_label=ttk.Label(find_frame text='Find: ')<line_sep>text_replace_label=ttk.Label(find_frame text='Replace: ')<line_sep># ENTRY find_input=ttk.Entry(find_frame width=30)<line_sep>replace_input=ttk.Entry(find_frame widget=30)<line_sep># BUTTON find_button=ttk.Button(find_frame text='Find' command=find)<line_sep>replace_button=ttk.Button(find_frame text='Replace' command=replace)<line_sep># LABEL GRID text_find_label.grid(row=0 column=0 padx=4 pady=4)<line_sep>text_replace_label.grid(row=1 column=0 padx=4 pady=4)<line_sep># ENTRY GRID find_input.grid(row=0 column=1 padx=4 pady=4)<line_sep>replace_input.grid(row=1 column=1 padx=4 pady=4)<line_sep># BUTTON GRID find_button.grid(row=2 column=0 padx=4 pady=4)<line_sep>replace_button.grid(row=2 column=1 padx=4 pady=4)<line_sep>find_dialogue.mainloop()<block_end># EDIT COMMAND edit.add_command(label='Copy' image=copy_icon compound=tk.LEFT accelerator='Ctrl+C' command=<lambda>:text_editor.event_generate("<Control-c>"))<line_sep>edit.add_command(label='Paste' image=paste_icon compound=tk.LEFT accelerator='Ctrl+V' command=<lambda>:text_editor.event_generate("<Control-v>"))<line_sep>edit.add_command(label='Cut' image=cut_icon compound=tk.LEFT accelerator='Ctrl+X' command=<lambda>:text_editor.event_generate("<Control-x>"))<line_sep>edit.add_command(label='Clear All' image=clear_all_icon compound=tk.LEFT accelerator='Ctrl+Alt+X' command=<lambda>:text_editor.delete(1.0 tk.END))<line_sep>edit.add_command(label='Find' image=find_icon compound=tk.LEFT accelerator='Ctrl+F' command=find_func)<line_sep># VIEW CHECK BUTTON show_statusbar=tk.BooleanVar()<line_sep>show_statusbar.set(<true>)<line_sep>show_toolbar=tk.BooleanVar()<line_sep>show_toolbar.set(<true>)<def_stmt>hide_toolbar <block_start><global>show_toolbar<if_stmt>show_toolbar<block_start>tool_bar.pack_forget()<line_sep>show_toolbar=<false><block_end><else_stmt><block_start>text_editor.pack_forget()<line_sep>status_bar.pack_forget()<line_sep>tool_bar.pack(side=tk.TOP fill=tk.X)<line_sep>text_editor.pack(fill=tk.BOTH expand=<true>)<line_sep>status_bar.pack(side=tk.BOTTOM)<line_sep>show_toolbar=<true><block_end><block_end><def_stmt>hide_statusbar <block_start><global>show_statusbar<if_stmt>show_statusbar<block_start>status_bar.pack_forget()<line_sep>show_statusbar=<false><block_end><else_stmt><block_start>status_bar.pack(side=tk.BOTTOM)<line_sep>show_statusbar=<true><block_end><block_end>view.add_checkbutton(label='Tool Bar' onvalue=<true> offvalue=0 variable=show_toolbar image=tool_bar_icon compound=tk.LEFT command=hide_toolbar)<line_sep>view.add_checkbutton(label='Status Bar' onvalue=1 offvalue=<false> variable=show_statusbar image=status_bar_icon compound=tk.LEFT command=hide_statusbar)<line_sep># COLOR THEME <def_stmt>change_theme <block_start>chosen_theme=theme_choice.get()<line_sep>color_tuple=color_dict.get(chosen_theme)<line_sep>fg_color,bg_color=color_tuple[0] color_tuple[1]<line_sep>text_editor.config(background=bg_color fg=fg_color)<block_end>count=0<for_stmt>i color_dict<block_start>color_theme.add_radiobutton(label=i image=color_icons[count] variable=theme_choice compound=tk.LEFT command=change_theme)<line_sep>count<augadd>1<block_end>main_application.config(menu=main_menu)<line_sep># BIND SHORTCUT KEYS main_application.bind("<Control-n>" new_file)<line_sep>main_application.bind("<Control-o>" open_file)<line_sep>main_application.bind("<Control-s>" save_file)<line_sep>main_application.bind("<Control-Alt-s>" save_as)<line_sep>main_application.bind("<Control-q>" exit_func)<line_sep>main_application.bind("<Control-f>" find_func)<line_sep>main_application.mainloop()<line_sep>
""" Script to calculate the average IoU of the same obejct on consecutive frames, and the relative switch frequency (Figure3(b) and Figure3(c)). The original data in paper is calculated on all sets: train+val+test. On the train-set: * Average IoU on consecutive frames = 0.894 * Relative Position Switch frequency = 0.031 On the val-set: * Average IoU on consecutive frames = 0.909 * Relative Position Switch frequency = 0.030 The splitting of subsets is """<import_stmt>numpy<as>np<import_stmt>os<line_sep>source_dir="train"<line_sep># source_dir = "val" <def_stmt>box_area arr# arr: np.array([[x1, y1, x2, y2]]) <block_start>width=arr[: 2]-arr[: 0]<line_sep>height=arr[: 3]-arr[: 1]<line_sep><return>width<times>height<block_end><def_stmt>_box_inter_union arr1 arr2# arr1 of [N, 4] # arr2 of [N, 4] <block_start>area1=box_area(arr1)<line_sep>area2=box_area(arr2)<line_sep># Intersection top_left=np.maximum(arr1[: :2] arr2[: :2])# [[x, y]] bottom_right=np.minimum(arr1[: 2:] arr2[: 2:])# [[x, y]] wh=bottom_right-top_left<line_sep># clip: if boxes not overlap then make it zero intersection=wh[: 0].clip(0)<times>wh[: 1].clip(0)<line_sep>#union union=area1+area2-intersection<line_sep><return>intersection union<block_end><def_stmt>box_iou arr1 arr2# arr1[N, 4] # arr2[N, 4] # N = number of bounding boxes <block_start><assert_stmt>(arr1[: 2:]<g>arr1[: :2]).all()<assert_stmt>(arr2[: 2:]<g>arr2[: :2]).all()<line_sep>inter,union=_box_inter_union(arr1 arr2)<line_sep>iou=inter/union<line_sep><return>iou<block_end><def_stmt>consecutive_iou annos<block_start>""" calculate the IoU over bboxes on the consecutive frames """<line_sep>max_frame=int(annos[: 0].max())<line_sep>min_frame=int(annos[: 0].min())<line_sep>total_iou=0<line_sep>total_frequency=0<for_stmt>find range(min_frame max_frame)<block_start>anno_cur=annos[np.where(annos[: 0]<eq>find)]<line_sep>anno_next=annos[np.where(annos[: 0]<eq>find+1)]<line_sep>ids_cur=np.unique(anno_cur[: 1])<line_sep>ids_next=np.unique(anno_next[: 1])<line_sep>common_ids=np.intersect1d(ids_cur ids_next)<for_stmt>tid common_ids<block_start>cur_box=anno_cur[np.where(anno_cur[: 1]<eq>tid)][: 2:6]<line_sep>next_box=anno_next[np.where(anno_next[: 1]<eq>tid)][: 2:6]<line_sep>cur_box[: 2:]<augadd>cur_box[: :2]<line_sep>next_box[: 2:]<augadd>next_box[: :2]<line_sep>iou=box_iou(cur_box next_box).item()<line_sep>total_iou<augadd>iou<line_sep>total_frequency<augadd>1<block_end><block_end><return>total_iou total_frequency<block_end><def_stmt>center box<block_start><return>(box[0]+0.5<times>box[2] box[1]+0.5<times>box[3])<block_end><def_stmt>relative_switch annos<block_start>""" calculate the frequency of relative position switch regarding center location """<line_sep>max_frame=int(annos[: 0].max())<line_sep>min_frame=int(annos[: 0].min())<line_sep>switch=0<line_sep>sw_freq=0<for_stmt>find range(min_frame max_frame)<block_start>anno_cur=annos[np.where(annos[: 0]<eq>find)]<line_sep>anno_next=annos[np.where(annos[: 0]<eq>find+1)]<line_sep>ids_cur=np.unique(anno_cur[: 1])<line_sep>ids_next=np.unique(anno_next[: 1])<line_sep>common_ids=np.intersect1d(ids_cur ids_next)<for_stmt>id1 common_ids<block_start><for_stmt>id2 common_ids<block_start>sw_freq<augadd>1<if_stmt>id1<eq>id2<block_start><continue><block_end>box_cur_1=anno_cur[np.where(anno_cur[: 1]<eq>id1)][0][2:6]<line_sep>box_cur_2=anno_cur[np.where(anno_cur[: 1]<eq>id2)][0][2:6]<line_sep>box_next_1=anno_next[np.where(anno_next[: 1]<eq>id1)][0][2:6]<line_sep>box_next_2=anno_next[np.where(anno_next[: 1]<eq>id2)][0][2:6]<line_sep>left_right_cur=center(box_cur_1)[0]<ge>center(box_cur_2)[0]<line_sep>left_right_next=center(box_next_1)[0]<ge>center(box_next_2)[0]<line_sep>top_down_cur=center(box_cur_1)[1]<ge>center(box_cur_2)[1]<line_sep>top_down_next=center(box_next_1)[1]<ge>center(box_next_2)[1]<if_stmt>(left_right_cur<ne>left_right_next)<or>(top_down_cur<ne>top_down_next)<block_start>switch<augadd>1<block_end><block_end><block_end><block_end><return>switch sw_freq<block_end><if_stmt>__name__<eq>"__main__"<block_start>seqs=os.listdir(source_dir)<line_sep>all_iou,all_freq=0 0<line_sep>all_switch,all_sw_freq=0 0<for_stmt>seq seqs<block_start><if_stmt>seq<eq>".DS_Store"<block_start><continue><block_end>anno_file=os.path.join(source_dir seq "gt/gt.txt")<line_sep>annos=np.loadtxt(anno_file delimiter=",")<line_sep>seq_iou,seq_freq=consecutive_iou(annos)<line_sep>seq_switch,seq_sw_freq=relative_switch(annos)<line_sep>all_iou<augadd>seq_iou<line_sep>all_freq<augadd>seq_freq<line_sep>all_switch<augadd>seq_switch<line_sep>all_sw_freq<augadd>seq_sw_freq<block_end>print("Average IoU on consecutive frames = {}".format(all_iou/all_freq))<line_sep>print("Relative Position Switch frequency = {}".format(all_switch/all_sw_freq))<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test search policy"""<import_stmt>numpy<as>np<import_stmt>tempfile<import_stmt>tvm<import_stmt>tvm.testing<import_from_stmt>tvm auto_scheduler<import_from_stmt>tvm.auto_scheduler.utils get_const_tuple<import_from_stmt>tvm.testing.auto_scheduler matmul_auto_scheduler_test zero_rank_compute_auto_scheduler_test zero_rank_reduce_auto_scheduler_test <def_stmt>test_search_task_add_task_input <block_start>auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()<line_sep>N=64<line_sep>target="llvm"<line_sep>test_input_0=tvm.runtime.ndarray.empty((64 64))<line_sep>test_input_1=tvm.runtime.ndarray.empty((10 20))<line_sep>test_input_2=tvm.runtime.ndarray.empty((30 40 50))<line_sep>task=auto_scheduler.SearchTask(func="matmul_auto_scheduler_test" args=(N N N) target=target task_inputs={"test_input_0":test_input_0 "test_input_1":test_input_1 "test_input_2":test_input_2 } task_inputs_overwrite=<true> )<assert_stmt>len(task.task_input_names)<eq>3<assert_stmt>task.task_input_names[0]<eq>"test_input_0"<assert_stmt>task.task_input_names[1]<eq>"test_input_1"<assert_stmt>task.task_input_names[2]<eq>"test_input_2"<block_end><def_stmt>test_search_task_record <block_start>auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()<line_sep>N=64<line_sep>target="llvm"<line_sep># Log with no task input task=auto_scheduler.SearchTask(func="matmul_auto_scheduler_test" args=(N N N) target=target)<line_sep>task_record=auto_scheduler._ffi_api.SerializeSearchTask(task)<line_sep>new_task=auto_scheduler._ffi_api.DeserializeSearchTask(task_record)<line_sep># TODO(jcf94): Check the compute dag & hardware parameter <assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<line_sep># Log with 1 task input test_input_0=tvm.runtime.ndarray.empty((64 64))<line_sep>task=auto_scheduler.SearchTask(func="matmul_auto_scheduler_test" args=(N N N) target=target task_inputs={"test_input_0":test_input_0} task_inputs_overwrite=<true> )<line_sep>task_record=auto_scheduler._ffi_api.SerializeSearchTask(task)<line_sep>new_task=auto_scheduler._ffi_api.DeserializeSearchTask(task_record)<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>1<assert_stmt>new_task.task_input_names[0]<eq>"test_input_0"<line_sep># Log with multiple task inputs test_input_1=tvm.runtime.ndarray.empty((64 64))<line_sep>task=auto_scheduler.SearchTask(func="matmul_auto_scheduler_test" args=(N N N) target=target task_inputs={"test_input_0":test_input_0 "test_input_1":test_input_1 } task_inputs_overwrite=<true> )<line_sep>task_record=auto_scheduler._ffi_api.SerializeSearchTask(task)<line_sep>new_task=auto_scheduler._ffi_api.DeserializeSearchTask(task_record)<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>2<assert_stmt>new_task.task_input_names[0]<eq>"test_input_0"<assert_stmt>new_task.task_input_names[1]<eq>"test_input_1"<line_sep># Log with version 0.5 v5_log="""["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]"""<line_sep>new_task=auto_scheduler._ffi_api.DeserializeSearchTask(v5_log)<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>0<block_end><def_stmt>test_recover_measure_input_with_task_input <block_start>auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()<line_sep># Since this file is tests for search_task, we only check the search_task here # Log with no task input task=auto_scheduler.SearchTask(func=matmul_auto_scheduler_test args=(512 512 512) target="llvm")<line_sep>inp=auto_scheduler.measure.MeasureInput(task task.compute_dag.init_state)<line_sep>res=auto_scheduler.measure.MeasureResult([0.1] 0 "" 0.2 1)<line_sep>measure_record=auto_scheduler.measure_record.dump_record_to_string(inp res)<line_sep>measure_log=auto_scheduler.measure_record.load_record_from_string(measure_record)<line_sep>new_task=measure_log[0].task<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<line_sep># Log with 1 task input test_input_0=tvm.runtime.ndarray.empty((64 64))<line_sep>task=auto_scheduler.SearchTask(func=matmul_auto_scheduler_test args=(512 512 512) target="llvm" task_inputs={"test_input_0":test_input_0 } task_inputs_overwrite=<true> )<line_sep>inp=auto_scheduler.measure.MeasureInput(task task.compute_dag.init_state)<line_sep>res=auto_scheduler.measure.MeasureResult([0.1] 0 "" 0.2 1)<line_sep>measure_record=auto_scheduler.measure_record.dump_record_to_string(inp res)<line_sep>measure_log=auto_scheduler.measure_record.load_record_from_string(measure_record)<line_sep>new_task=measure_log[0].task<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>1<assert_stmt>new_task.task_input_names[0]<eq>"test_input_0"<line_sep># Log with multiple task inputs test_input_1=tvm.runtime.ndarray.empty((64 64))<line_sep>task=auto_scheduler.SearchTask(func=matmul_auto_scheduler_test args=(512 512 512) target="llvm" task_inputs={"test_input_0":test_input_0 "test_input_1":test_input_1 } task_inputs_overwrite=<true> )<line_sep>inp=auto_scheduler.measure.MeasureInput(task task.compute_dag.init_state)<line_sep>res=auto_scheduler.measure.MeasureResult([0.1] 0 "" 0.2 1)<line_sep>measure_record=auto_scheduler.measure_record.dump_record_to_string(inp res)<line_sep>measure_log=auto_scheduler.measure_record.load_record_from_string(measure_record)<line_sep>new_task=measure_log[0].task<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>2<assert_stmt>new_task.task_input_names[0]<eq>"test_input_0"<assert_stmt>new_task.task_input_names[1]<eq>"test_input_1"<line_sep># Log with version 0.5 v5_log="""{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}"""<line_sep>measure_log=auto_scheduler.measure_record.load_record_from_string(v5_log)<line_sep>new_task=measure_log[0].task<assert_stmt>task.workload_key<eq>new_task.workload_key<assert_stmt>str(task.target)<eq>str(new_task.target)<assert_stmt>str(task.target.host)<eq>str(new_task.target.host)<assert_stmt>task.layout_rewrite_option<eq>new_task.layout_rewrite_option<assert_stmt>len(new_task.task_input_names)<eq>0<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_search_task_add_task_input()<line_sep>test_search_task_record()<line_sep>test_recover_measure_input_with_task_input()<block_end>
PRED_TYPE='Basic'<line_sep>TTA_PRED_TYPE='TTA'<line_sep>ENS_TYPE='Ens'<line_sep>MEGA_ENS_TYPE='MegaEns'<line_sep>
<import_stmt>taichi<as>ti<import_from_stmt>.bls_test_template bls_particle_grid<line_sep>@ti.test(require=ti.extension.bls)<def_stmt>test_scattering <block_start>bls_particle_grid(N=128 ppc=10 block_size=8 scatter=<true> use_offset=<false>)<block_end>@ti.test(require=ti.extension.bls)<def_stmt>test_scattering_offset <block_start>bls_particle_grid(N=128 ppc=10 block_size=8 scatter=<true> use_offset=<true>)<block_end>@ti.test(require=ti.extension.bls)<def_stmt>test_scattering_two_pointer_levels <block_start>bls_particle_grid(N=128 ppc=10 block_size=8 scatter=<true> pointer_level=2 use_offset=<false>)<block_end>@ti.test(require=ti.extension.bls)<def_stmt>test_gathering <block_start>bls_particle_grid(N=128 ppc=10 block_size=8 scatter=<false> use_offset=<false>)<block_end>@ti.test(require=ti.extension.bls)<def_stmt>test_gathering_offset <block_start>bls_particle_grid(N=128 ppc=10 block_size=8 scatter=<false> use_offset=<true>)<block_end># TODO: debug mode behavior of assume_in_range
# -*- coding: utf-8 -*- """ @author: <NAME> <<EMAIL>> @brief: script for generating the best ensemble model from Chenglong's side @note: 1. make sure you have run `python run_data.py` first 2. make sure you have built `some diverse` 1st level models first (see `./Log/level1_models` for example) """<import_stmt>os<line_sep>cmd="python run_stacking_ridge.py -l 2 -d 0 -t 10 -c 1 -L reg_ensemble -o"<line_sep>os.system(cmd)<line_sep>
# -*- coding: utf-8 -*- <import_stmt>shutil<import_stmt>tempfile<import_stmt>warnings<line_sep>#=============================================================================== <try_stmt><block_start><import_from_stmt>tempfile TemporaryDirectory<block_end><except_stmt>ImportError<block_start><class_stmt>TemporaryDirectory(object)<block_start><def_stmt>__init__ self suffix='' prefix='tmp' dir=<none><block_start>self.name=tempfile.mkdtemp(suffix prefix dir)<block_end><def_stmt>__enter__ self<block_start><return>self.name<block_end><def_stmt>__exit__ self exc value tb<block_start><try_stmt><block_start>shutil.rmtree(self.name)<block_end><except_stmt>OSError<as>err<block_start><if_stmt>err.errno<ne>2<block_start><raise><block_end><block_end><block_end><block_end><block_end>#=============================================================================== <class_stmt>UserLoginContext(object)<block_start><def_stmt>__init__ self testcase **kwargs<block_start>self.testcase=testcase<line_sep>self.kwargs=kwargs<block_end><def_stmt>__enter__ self<block_start>self.testcase.assertTrue(self.testcase.client.login(**self.kwargs))<block_end><def_stmt>__exit__ self exc value tb<block_start>self.testcase.client.logout()<block_end><block_end>#=============================================================================== <class_stmt>AssertThrowsWarningContext(object)<block_start><def_stmt>__init__ self test_case klass number<block_start>self.test_case=test_case<line_sep>self.klass=klass<line_sep>self.number=number<line_sep>self.ctx=warnings.catch_warnings(record=<true>)<block_end><def_stmt>__enter__ self<block_start>self.warnings=self.ctx.__enter__()<line_sep>warnings.resetwarnings()<line_sep>warnings.simplefilter('always')<block_end><def_stmt>__exit__ self type value traceback<block_start>self.test_case.assertEqual(len(self.warnings) self.number "%d warnings thrown, %d expected"%(len(self.warnings) self.number))<for_stmt>warning self.warnings<block_start>self.test_case.assertTrue(issubclass(warning.category self.klass) '%s warning thrown, %s expected'%(warning.category.__name__ self.klass.__name__))<block_end>self.ctx.__exit__(type value traceback)<block_end><block_end>
<import_from_stmt>unittest mock<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>mocksleep <block_start><with_stmt>mock.patch("time.sleep")<as>mocked<block_start><yield>mocked<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>mock<import_stmt>pytest<import_from_stmt>bravado_core.operation Operation<import_from_stmt>bravado_core.request IncomingRequest<import_from_stmt>bravado_core.request unmarshal_request<import_from_stmt>bravado_core.spec Spec<import_from_stmt>typing Any<import_from_stmt>typing Dict<import_from_stmt>bravado.client CallableOperation<import_from_stmt>bravado.client construct_request<def_stmt>build_swagger_spec swagger_dict<block_start>spec=Spec(swagger_dict)<line_sep>spec.api_url='http://localhost/'<line_sep><return>spec<block_end>@pytest.mark.parametrize('timeout_kv' [('timeout' 1) ('connect_timeout' 2) ])@mock.patch('bravado.client.marshal_param')<def_stmt>test_with_timeouts mock_marshal_param minimal_swagger_spec getPetById_spec request_dict timeout_kv <block_start>request_dict['url']='/pet/{petId}'<line_sep>op=CallableOperation(Operation.from_spec(minimal_swagger_spec '/pet/{petId}' 'get' getPetById_spec))<line_sep>k,v=timeout_kv<line_sep>request=construct_request(op request_options={k:v} petId=34 api_key='foo')<assert_stmt>request[k]<eq>v<assert_stmt>mock_marshal_param.call_count<eq>2<block_end>@pytest.mark.parametrize('swagger_type, swagger_format, header_name, header_value' [('boolean' <none> 'boolean' <true>) ('integer' <none> 'integer' 1) ('number' 'float' 'float' 2.0) ] )<def_stmt>test_with_not_string_headers minimal_swagger_dict getPetById_spec request_dict swagger_type swagger_format header_name header_value <block_start>url='/pet/{petId}'<line_sep>parameter={'name':header_name 'in':'header' 'required':<false> 'type':swagger_type }<if_stmt>swagger_format<block_start>parameter['format']=swagger_format<block_end>minimal_swagger_dict['paths'][url]['get']['parameters'].append(parameter)<line_sep>minimal_swagger_spec=build_swagger_spec(minimal_swagger_dict)<line_sep>request_dict['url']=url<line_sep>operation=Operation.from_spec(swagger_spec=minimal_swagger_spec path_name='/pet/{petId}' http_method='get' op_spec=getPetById_spec )<line_sep>petId=34<line_sep>api_key='foo'<line_sep>request=construct_request(operation=operation request_options={'headers':{header_name:header_value}} petId=petId api_key=api_key )<line_sep># To unmarshall a request bravado-core needs the request to be wrapped # by an object with a specific list of attributes request_object=type('IncomingRequest' (IncomingRequest ) {'path':{'petId':petId} 'query':{} 'form':{} 'headers':request['headers'] 'files':mock.Mock() })<line_sep>expected_header_value=str(header_value)<line_sep># we need to handle a backwards-incompatible change in bravado-core 5.0.5 <if_stmt>swagger_type<eq>'boolean'<block_start><assert_stmt>request['headers'][header_name]<in>(expected_header_value expected_header_value.lower())<block_end><else_stmt><block_start><assert_stmt>request['headers'][header_name]<eq>expected_header_value<block_end>unmarshalled_request=unmarshal_request(request_object operation)<assert_stmt>unmarshalled_request[header_name]<eq>header_value<block_end><def_stmt>test_use_msgpack minimal_swagger_spec getPetById_spec <block_start>op=CallableOperation(Operation.from_spec(minimal_swagger_spec '/pet/{petId}' 'get' getPetById_spec))<line_sep>request_options={'use_msgpack':<true> 'headers':{'Some-Header':'header-value'}}<line_sep># type: Dict[str, Any] request=construct_request(op request_options=request_options petId=1 )<assert_stmt>request['headers']['Accept']<eq>'application/msgpack'<assert_stmt>request['headers']['Some-Header']<eq>'header-value' "Requested header should be present"<assert_stmt>'Accept'<not><in>request_options['headers'] "Original request options should not be modified"<block_end>
<import_from_stmt>lark Lark Tree <import_from_stmt>.node Node NodeType <class_stmt>Parser(object)<block_start>grammar=r''' start: grammar grammar: imports? external_imports? name? start_expression? production+ production: annotations? symbol _OPER expression _OPER: "::=" expression: sequence (_BAR sequence)* _BAR: "|" sequence: probability? annotations? (symbol | TERMINAL) (_WHITESPACE (symbol | TERMINAL))* TERMINAL: "\"" (LETTER | ESCAPED | NUMBER | "_" | "-" | ":")+ "\"" | "ε" ESCAPED: "\\" ("." | "," | "*" | "^" | "(" | ")" | "+" | "-" | "/" | "\"" | " " | "]" | "[" | "|") probability: NUMBER+ start_expression: _START symbol _START: "start:" name: _GRAMMAR NAME NAME: LETTER+ _GRAMMAR: "Grammar:" external_imports: external_import+ external_import: _FROM FILENAME _IMPORT _LP items _RP _FROM: "from" _LP: "(" _RP: ")" items: ITEM ","? | ITEM "," items ITEM: /\w+/ imports: import+ import: _IMPORT FILENAME FILENAME: /(\w|\\|\.|-|_)+/ _IMPORT: "import" annotations: annotation+ annotation: _AT IDENT _AT: "@" symbol: _LB IDENT _RB _LB: "<" _RB: ">" IDENT: LETTER (LETTER | NUMBER | "_" | "-")* %import common.LETTER %import common.NUMBER _COMMENT: /#[^\n]*/ %ignore _COMMENT _WHITESPACE: (" " | "\n" | "\t")+ %ignore _WHITESPACE '''<line_sep># noqa: E501 <def_stmt>__init__ self<block_start>self.delegate=Lark(self.grammar)<block_end><def_stmt>parse self value:str<arrow>Node<block_start>tree=self.delegate.parse(value)<line_sep><return>Node.from_lark_tree(tree)<block_end><def_stmt>parse_production self value:str<arrow>Node<block_start>"""Parse just an production. Args: value: The string to parse. Throws: Exception: If there is more than a single production in the value. Returns: A node which is the head of the production (not the grammar.) """<if_stmt>'\n'<in>value<block_start><raise>Exception('There should only be a single product, but '<concat>'a newline is present.')<block_end>grammar=self.parse(value)<if_stmt>grammar.children[0].node_type<eq>NodeType.PRODUCTION<block_start>production=grammar.children[0]<block_end><else_stmt><block_start>production=grammar.children[1]<block_end>grammar.children=list()<line_sep><return>production<block_end><block_end>
"""Pretrained models for Standford Dogs dataset"""<import_stmt>os<import_stmt>mxnet<as>mx<import_stmt>gluoncv<as>gcv<import_from_stmt>..model_store get_model_file<line_sep>__all__=['standford_dog_resnet152_v1' 'standford_dog_resnext101_64x4d']<def_stmt>standford_dog_resnet152_v1 pretrained=<false> root=os.path.join('~' '.autogluon' 'models') ctx=mx.cpu(0) **kwargs<block_start>net=gcv.model_zoo.resnet152_v1(classes=120 **kwargs)<if_stmt>pretrained<block_start>net.load_parameters(get_model_file('standford_dog_resnet152_v1' root=root) ctx=ctx)<block_end><return>net<block_end><def_stmt>standford_dog_resnext101_64x4d pretrained=<false> root=os.path.join('~' '.autogluon' 'models') ctx=mx.cpu(0) **kwargs<block_start>net=gcv.model_zoo.resnext.resnext101_64x4d(classes=120 **kwargs)<if_stmt>pretrained<block_start>net.load_parameters(get_model_file('standford_dog_resnext101_64x4d' root=root) ctx=ctx)<block_end><return>net<block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>mmpose.models.detectors AssociativeEmbedding<def_stmt>test_ae_forward <block_start>model_cfg=dict(type='AssociativeEmbedding' pretrained=<none> backbone=dict(type='ResNet' depth=18) keypoint_head=dict(type='AESimpleHead' in_channels=512 num_joints=17 num_deconv_layers=0 tag_per_joint=<true> with_ae_loss=[<true>] extra=dict(final_conv_kernel=1 ) loss_keypoint=dict(type='MultiLossFactory' num_joints=17 num_stages=1 ae_loss_type='exp' with_ae_loss=[<true>] push_loss_factor=[0.001] pull_loss_factor=[0.001] with_heatmaps_loss=[<true>] heatmaps_loss_factor=[1.0])) train_cfg=dict() test_cfg=dict(num_joints=17 max_num_people=30 scale_factor=[1] with_heatmaps=[<true>] with_ae=[<true>] project2image=<true> nms_kernel=5 nms_padding=2 tag_per_joint=<true> detection_threshold=0.1 tag_threshold=1 use_detection_val=<true> ignore_too_much=<false> adjust=<true> refine=<true> soft_nms=<false> flip_test=<true> post_process=<true> shift_heatmap=<true> use_gt_bbox=<true> flip_pairs=[[1 2] [3 4] [5 6] [7 8] [9 10] [11 12] [13 14] [15 16]] ))<line_sep>detector=AssociativeEmbedding(model_cfg['backbone'] model_cfg['keypoint_head'] model_cfg['train_cfg'] model_cfg['test_cfg'] model_cfg['pretrained'])<line_sep>detector.init_weights()<line_sep>input_shape=(1 3 256 256)<line_sep>mm_inputs=_demo_mm_inputs(input_shape)<line_sep>imgs=mm_inputs.pop('imgs')<line_sep>target=mm_inputs.pop('target')<line_sep>mask=mm_inputs.pop('mask')<line_sep>joints=mm_inputs.pop('joints')<line_sep>img_metas=mm_inputs.pop('img_metas')<line_sep># Test forward train losses=detector.forward(imgs target mask joints img_metas return_loss=<true>)<assert_stmt>isinstance(losses dict)<line_sep># Test forward test <with_stmt>torch.no_grad()<block_start>_=detector.forward(imgs img_metas=img_metas return_loss=<false>)<line_sep>_=detector.forward_dummy(imgs)<block_end><block_end><def_stmt>_demo_mm_inputs input_shape=(1 3 256 256)<block_start>"""Create a superset of inputs needed to run test or train batches. Args: input_shape (tuple): input batch dimensions """<line_sep>(N C H W)=input_shape<line_sep>rng=np.random.RandomState(0)<line_sep>imgs=rng.rand(*input_shape)<line_sep>target=np.zeros([N 17 H<floordiv>32 W<floordiv>32] dtype=np.float32)<line_sep>mask=np.ones([N H<floordiv>32 W<floordiv>32] dtype=np.float32)<line_sep>joints=np.zeros([N 30 17 2] dtype=np.float32)<line_sep>img_metas=[{'image_file':'test.jpg' 'aug_data':[torch.zeros(1 3 256 256)] 'test_scale_factor':[1] 'base_size':(256 256) 'center':np.array([128 128]) 'scale':np.array([1.28 1.28]) 'flip_index':[0 2 1 4 3 6 5 8 7 10 9 12 11 14 13 16 15]}<for>_ range(N)]<line_sep>mm_inputs={'imgs':torch.FloatTensor(imgs).requires_grad_(<true>) 'target':[torch.FloatTensor(target)] 'mask':[torch.FloatTensor(mask)] 'joints':[torch.FloatTensor(joints)] 'img_metas':img_metas}<line_sep><return>mm_inputs<block_end>
<import_stmt>logging<import_from_stmt>typing Any Dict List Optional<import_from_stmt>dbus_next.aio MessageBus<import_from_stmt>dbus_next.constants BusType MessageType<import_from_stmt>dbus_next.message Message<import_from_stmt>dbus_next.signature Variant<import_from_stmt>bleak.backends.bluezdbus defs<import_from_stmt>bleak.backends.bluezdbus.signals MatchRules add_match remove_match<import_from_stmt>bleak.backends.bluezdbus.utils assert_reply unpack_variants validate_address <import_from_stmt>bleak.backends.device BLEDevice<import_from_stmt>bleak.backends.scanner BaseBleakScanner AdvertisementData<line_sep>logger=logging.getLogger(__name__)<line_sep># set of org.bluez.Device1 property names that come from advertising data _ADVERTISING_DATA_PROPERTIES={"AdvertisingData" "AdvertisingFlags" "ManufacturerData" "Name" "ServiceData" "UUIDs" }<def_stmt>_device_info path props<block_start><try_stmt><block_start>name=props.get("Alias" "Unknown")<line_sep>address=props.get("Address" <none>)<if_stmt>address<is><none><block_start><try_stmt><block_start>address=path[-17:].replace("_" ":")<if_stmt><not>validate_address(address)<block_start>address=<none><block_end><block_end><except_stmt>Exception<block_start>address=<none><block_end><block_end>rssi=props.get("RSSI" "?")<line_sep><return>name address rssi path<block_end><except_stmt>Exception<block_start><return><none> <none> <none> <none><block_end><block_end><class_stmt>BleakScannerBlueZDBus(BaseBleakScanner)<block_start>"""The native Linux Bleak BLE Scanner. For possible values for `filters`, see the parameters to the ``SetDiscoveryFilter`` method in the `BlueZ docs <https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/doc/adapter-api.txt?h=5.48&id=0d1e3b9c5754022c779da129025d493a198d49cf>`_ Keyword Args: adapter (str): Bluetooth adapter to use for discovery. filters (dict): A dict of filters to be applied on discovery. """<def_stmt>__init__ self **kwargs<block_start>super(BleakScannerBlueZDBus self).__init__(**kwargs)<line_sep># kwarg "device" is for backwards compatibility self._adapter=kwargs.get("adapter" kwargs.get("device" "hci0"))<line_sep>self._bus:Optional[MessageBus]=<none><line_sep>self._cached_devices:Dict[str Variant]={}<line_sep>self._devices:Dict[str Dict[str Any]]={}<line_sep>self._rules:List[MatchRules]=[]<line_sep>self._adapter_path:str=f"/org/bluez/{self._adapter}"<line_sep># Discovery filters self._filters:Dict[str Variant]={}<line_sep>self.set_scanning_filter(**kwargs)<block_end><async_keyword><def_stmt>start self<block_start>self._bus=<await>MessageBus(bus_type=BusType.SYSTEM).connect()<line_sep>self._devices.clear()<line_sep>self._cached_devices.clear()<line_sep># Add signal listeners self._bus.add_message_handler(self._parse_msg)<line_sep>rules=MatchRules(interface=defs.OBJECT_MANAGER_INTERFACE member="InterfacesAdded" arg0path=f"{self._adapter_path}/" )<line_sep>reply=<await>add_match(self._bus rules)<line_sep>assert_reply(reply)<line_sep>self._rules.append(rules)<line_sep>rules=MatchRules(interface=defs.OBJECT_MANAGER_INTERFACE member="InterfacesRemoved" arg0path=f"{self._adapter_path}/" )<line_sep>reply=<await>add_match(self._bus rules)<line_sep>assert_reply(reply)<line_sep>self._rules.append(rules)<line_sep>rules=MatchRules(interface=defs.PROPERTIES_INTERFACE member="PropertiesChanged" path_namespace=self._adapter_path )<line_sep>reply=<await>add_match(self._bus rules)<line_sep>assert_reply(reply)<line_sep>self._rules.append(rules)<line_sep># Find the HCI device to use for scanning and get cached device properties reply=<await>self._bus.call(Message(destination=defs.BLUEZ_SERVICE path="/" member="GetManagedObjects" interface=defs.OBJECT_MANAGER_INTERFACE ))<line_sep>assert_reply(reply)<line_sep># get only the device interface self._cached_devices={path:unpack_variants(interfaces[defs.DEVICE_INTERFACE])<for>path,interfaces reply.body[0].items()<if>defs.DEVICE_INTERFACE<in>interfaces}<line_sep>logger.debug(f"cached devices: {self._cached_devices}")<line_sep># Apply the filters reply=<await>self._bus.call(Message(destination=defs.BLUEZ_SERVICE path=self._adapter_path interface=defs.ADAPTER_INTERFACE member="SetDiscoveryFilter" signature="a{sv}" body=[self._filters] ))<line_sep>assert_reply(reply)<line_sep># Start scanning reply=<await>self._bus.call(Message(destination=defs.BLUEZ_SERVICE path=self._adapter_path interface=defs.ADAPTER_INTERFACE member="StartDiscovery" ))<line_sep>assert_reply(reply)<block_end><async_keyword><def_stmt>stop self<block_start>reply=<await>self._bus.call(Message(destination=defs.BLUEZ_SERVICE path=self._adapter_path interface=defs.ADAPTER_INTERFACE member="StopDiscovery" ))<line_sep>assert_reply(reply)<for_stmt>rule self._rules<block_start><await>remove_match(self._bus rule)<block_end>self._rules.clear()<line_sep>self._bus.remove_message_handler(self._parse_msg)<line_sep># Try to disconnect the System Bus. <try_stmt><block_start>self._bus.disconnect()<block_end><except_stmt>Exception<as>e<block_start>logger.error("Attempt to disconnect system bus failed: {0}".format(e))<block_end>self._bus=<none><block_end><def_stmt>set_scanning_filter self **kwargs<block_start>"""Sets OS level scanning filters for the BleakScanner. For possible values for `filters`, see the parameters to the ``SetDiscoveryFilter`` method in the `BlueZ docs <https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/doc/adapter-api.txt?h=5.48&id=0d1e3b9c5754022c779da129025d493a198d49cf>`_ See variant types here: <https://python-dbus-next.readthedocs.io/en/latest/type-system/> Keyword Args: filters (dict): A dict of filters to be applied on discovery. """<for_stmt>k,v kwargs.get("filters" {}).items()<block_start><if_stmt>k<eq>"UUIDs"<block_start>self._filters[k]=Variant("as" v)<block_end><elif_stmt>k<eq>"RSSI"<block_start>self._filters[k]=Variant("n" v)<block_end><elif_stmt>k<eq>"DuplicateData"<block_start>self._filters[k]=Variant("b" v)<block_end><elif_stmt>k<eq>"Pathloss"<block_start>self._filters[k]=Variant("n" v)<block_end><elif_stmt>k<eq>"Transport"<block_start>self._filters[k]=Variant("s" v)<block_end><else_stmt><block_start>logger.warning("Filter '%s' is not currently supported."%k)<block_end><block_end><if_stmt>"Transport"<not><in>self._filters<block_start>self._filters["Transport"]=Variant("s" "le")<block_end><block_end>@property<def_stmt>discovered_devices self<arrow>List[BLEDevice]# Reduce output. <block_start>discovered_devices=[]<for_stmt>path,props self._devices.items()<block_start><if_stmt><not>props<block_start>logger.debug("Disregarding %s since no properties could be obtained."%path)<line_sep><continue><block_end>name,address,_,path=_device_info(path props)<if_stmt>address<is><none><block_start><continue><block_end>uuids=props.get("UUIDs" [])<line_sep>manufacturer_data=props.get("ManufacturerData" {})<line_sep>discovered_devices.append(BLEDevice(address name {"path":path "props":props} props.get("RSSI" 0) uuids=uuids manufacturer_data=manufacturer_data ))<block_end><return>discovered_devices<block_end># Helper methods <def_stmt>_invoke_callback self path:str message:Message<arrow><none><block_start>"""Invokes the advertising data callback. Args: message: The D-Bus message that triggered the callback. """<if_stmt>self._callback<is><none><block_start><return><block_end>props=self._devices[path]<line_sep># Get all the information wanted to pack in the advertisement data _local_name=props.get("Name")<line_sep>_manufacturer_data={k:bytes(v)<for>k,v props.get("ManufacturerData" {}).items()}<line_sep>_service_data={k:bytes(v)<for>k,v props.get("ServiceData" {}).items()}<line_sep>_service_uuids=props.get("UUIDs" [])<line_sep># Pack the advertisement data advertisement_data=AdvertisementData(local_name=_local_name manufacturer_data=_manufacturer_data service_data=_service_data service_uuids=_service_uuids platform_data=(props message) )<line_sep>device=BLEDevice(props["Address"] props["Alias"] {"path":path "props":props} props.get("RSSI" 0) )<line_sep>self._callback(device advertisement_data)<block_end><def_stmt>_parse_msg self message:Message<block_start><if_stmt>message.message_type<ne>MessageType.SIGNAL<block_start><return><block_end>logger.debug("received D-Bus signal: {0}.{1} ({2}): {3}".format(message.interface message.member message.path message.body))<if_stmt>message.member<eq>"InterfacesAdded"# if a new device is discovered while we are scanning, add it to # the discovered devices list <block_start>obj_path:str<line_sep>interfaces_and_props:Dict[str Dict[str Variant]]<line_sep>obj_path,interfaces_and_props=message.body<line_sep>device_props=unpack_variants(interfaces_and_props.get(defs.DEVICE_INTERFACE {}))<if_stmt>device_props<block_start>self._devices[obj_path]=device_props<line_sep>self._invoke_callback(obj_path message)<block_end><block_end><elif_stmt>message.member<eq>"InterfacesRemoved"# if a device disappears while we are scanning, remove it from the # discovered devices list <block_start>obj_path:str<line_sep>interfaces:List[str]<line_sep>obj_path,interfaces=message.body<if_stmt>defs.DEVICE_INTERFACE<in>interfaces# Using pop to avoid KeyError if obj_path does not exist <block_start>self._devices.pop(obj_path <none>)<block_end><block_end><elif_stmt>message.member<eq>"PropertiesChanged"# Property change events basically mean that new advertising data # was received or the RSSI changed. Either way, it lets us know # that the device is active and we can add it to the discovered # devices list. <block_start>interface:str<line_sep>changed:Dict[str Variant]<line_sep>invalidated:List[str]<line_sep>interface,changed,invalidated=message.body<if_stmt>interface<ne>defs.DEVICE_INTERFACE<block_start><return><block_end>first_time_seen=<false><if_stmt>message.path<not><in>self._devices<block_start><if_stmt>message.path<not><in>self._cached_devices# This can happen when we start scanning. The "PropertyChanged" # handler is attached before "GetManagedObjects" is called # and so self._cached_devices is not assigned yet. # This is not a problem. We just discard the property value # since "GetManagedObjects" will return a newer value. <block_start><return><block_end>first_time_seen=<true><line_sep>self._devices[message.path]=self._cached_devices[message.path]<block_end>changed=unpack_variants(changed)<line_sep>self._devices[message.path].update(changed)<line_sep># Only do advertising data callback if this is the first time the # device has been seen or if an advertising data property changed. # Otherwise we get a flood of callbacks from RSSI changing. <if_stmt>first_time_seen<or><not>_ADVERTISING_DATA_PROPERTIES.isdisjoint(changed.keys())<block_start>self._invoke_callback(message.path message)<block_end><block_end><block_end><block_end>
""" Given the capacity of the knapsack and items specified by weights and values, return the maximum summarized value of the items that can be fit in the knapsack. Example: capacity = 5, items(value, weight) = [(60, 5), (50, 3), (70, 4), (30, 2)] result = 80 (items valued 50 and 30 can both be fit in the knapsack) The time complexity is O(n * m) and the space complexity is O(m), where n is the total number of items and m is the knapsack's capacity. """<class_stmt>Item<block_start><def_stmt>__init__ self value weight<block_start>self.value=value<line_sep>self.weight=weight<block_end><block_end><def_stmt>get_maximum_value items capacity<block_start>dp=[0]<times>(capacity+1)<for_stmt>item items<block_start><for_stmt>cur_weight reversed(range(item.weight capacity+1))<block_start>dp[cur_weight]=max(dp[cur_weight] item.value+dp[cur_weight-item.weight])<block_end><block_end><return>dp[capacity]<block_end>
<import_stmt>nvisii<import_stmt>math<import_stmt>PySide2<import_stmt>colorsys<import_from_stmt>PySide2.QtCore *<import_from_stmt>PySide2.QtWidgets *<line_sep>nvisii.initialize()<line_sep>nvisii.resize_window(1000 1000)<line_sep>nvisii.enable_denoiser()<line_sep># nvisii.configure_denoiser(False, False, True) nvisii.set_max_bounce_depth(diffuse_depth=2 glossy_depth=8 transparency_depth=8 transmission_depth=12 volume_depth=2)<line_sep># Set the sky nvisii.disable_dome_light_sampling()<line_sep>nvisii.set_dome_light_color((0 0 0))<line_sep># Set camera camera=nvisii.entity.create(name="camera" transform=nvisii.transform.create(name="camera_transform") camera=nvisii.camera.create(name="camera_camera" aspect=1.0))<line_sep>camera.get_transform().look_at(at=(0 0 0.5) # at position up=(0 0 1) # up vector eye=(0 5 2)# eye position )<line_sep>nvisii.set_camera_entity(camera)<line_sep># Floor floor=nvisii.entity.create(name="floor" mesh=nvisii.mesh.create_plane("mesh_floor") transform=nvisii.transform.create("transform_floor") material=nvisii.material.create("material_floor"))<line_sep>floor.get_material().set_base_color((0.19 0.16 0.19))<line_sep>floor.get_material().set_metallic(0)<line_sep>floor.get_material().set_roughness(1)<line_sep>floor.get_transform().set_scale((5 5 1))<line_sep># Mirror 1 mirror1=nvisii.entity.create(name="mirror1" mesh=nvisii.mesh.create_box("mesh_mirror1") transform=nvisii.transform.create("transform_mirror1") material=nvisii.material.create("material_mirror1"))<line_sep>mirror1.get_transform().look_at(eye=(-1.5 -1.5 .5) at=(0 0 .7) up=(0 0 1))<line_sep>mirror1.get_material().set_base_color((1. 1. 1.))<line_sep>mirror1.get_material().set_metallic(1)<line_sep>mirror1.get_material().set_roughness(0)<line_sep>mirror1.get_transform().set_scale((.7 .7 .1))<line_sep># Glass 1 glass1=nvisii.entity.create(name="glass1" mesh=nvisii.mesh.create_box("mesh_glass1") transform=nvisii.transform.create("transform_glass1") material=nvisii.material.create("material_glass1"))<line_sep>glass1.get_transform().look_at(eye=(1.5 1.5 .5) at=(0 0 .7) up=(0 0 1))<line_sep>glass1.get_material().set_base_color((1. 1. 1.))<line_sep>glass1.get_material().set_transmission(1)<line_sep>glass1.get_material().set_roughness(0)<line_sep>glass1.get_transform().set_scale((.7 .7 .1))<line_sep># Mirror 2 mirror2=nvisii.entity.create(name="mirror2" mesh=nvisii.mesh.create_box("mesh_mirror2") transform=nvisii.transform.create("transform_mirror2") material=nvisii.material.create("material_mirror2"))<line_sep>mirror2.get_transform().look_at(eye=(1.5 -1.5 .5) at=(0 0 .7) up=(0 0 1))<line_sep>mirror2.get_material().set_base_color((1. 1. 1.))<line_sep>mirror2.get_material().set_metallic(1)<line_sep>mirror2.get_material().set_roughness(0)<line_sep>mirror2.get_transform().set_scale((.7 .7 .1))<line_sep># Glass 2 glass2=nvisii.entity.create(name="glass2" mesh=nvisii.mesh.create_box("mesh_glass2") transform=nvisii.transform.create("transform_glass2") material=nvisii.material.create("material_glass2"))<line_sep>glass2.get_transform().look_at(eye=(-1.5 1.5 .5) at=(0 0 .7) up=(0 0 1))<line_sep>glass2.get_material().set_base_color((1. 1. 1.))<line_sep>glass2.get_material().set_transmission(1)<line_sep>glass2.get_material().set_roughness(0)<line_sep>glass2.get_transform().set_scale((.7 .7 .1))<line_sep># Fog fog=nvisii.entity.create(name="fog" volume=nvisii.volume.create_box("mesh_fog") transform=nvisii.transform.create("transform_fog") material=nvisii.material.create("material_fog"))<line_sep>fog.get_material().set_base_color((1. 1. 1.))<line_sep>fog.get_material().set_transmission(1)<line_sep>fog.get_material().set_roughness(0)<line_sep>fog.get_volume().set_scale(100)<line_sep># Light light=nvisii.entity.create(name="light" light=nvisii.light.create("light") transform=nvisii.transform.create("light") mesh=nvisii.mesh.create_sphere("light"))<line_sep>light.get_transform().set_position((0 0 5))<line_sep>light.get_transform().set_scale((.1 .1 .1))<line_sep>light.get_light().set_exposure(7)<line_sep># Light blocker blocker=nvisii.entity.create(name="blocker" mesh=nvisii.mesh.create_capped_tube("blocker" innerRadius=.04) transform=nvisii.transform.create("blocker") material=nvisii.material.create("blocker"))<line_sep>blocker.get_transform().set_scale((10 10 .01))<line_sep>blocker.get_transform().set_position((0 0 3.0))<line_sep># Teapot teapotahedron=nvisii.entity.create(name="teapotahedron" mesh=nvisii.mesh.create_teapotahedron("teapotahedron" segments=32) transform=nvisii.transform.create("teapotahedron") material=nvisii.material.create("teapotahedron"))<line_sep>teapotahedron.get_transform().set_rotation(nvisii.angleAxis(nvisii.pi()/4.0 (0 0 1)))<line_sep>teapotahedron.get_transform().set_position((0 0 0))<line_sep>teapotahedron.get_transform().set_scale((0.4 0.4 0.4))<line_sep>teapotahedron.get_material().set_base_color((255.0/255.0 100.0/255.0 2.0/256.0))<line_sep>teapotahedron.get_material().set_roughness(0.0)<line_sep>teapotahedron.get_material().set_specular(1.0)<line_sep>teapotahedron.get_material().set_metallic(1.0)<line_sep># Make a QT window to demonstrate the difference between alpha transparency and transmission app=QApplication([])# Start an application. window=QWidget()# Create a window. layout=QVBoxLayout()# Create a layout. <def_stmt>rotateCamera value<block_start>value=value/100.0<line_sep>cam_pos=camera.get_transform().get_position()<line_sep>camera.get_transform().look_at(at=(0 0 0.5) # at position up=(0 0 1) # up vector eye=(5<times>math.cos(value<times>2<times>nvisii.pi()) 5<times>math.sin(value<times>2<times>nvisii.pi()) cam_pos[2])# eye position )<block_end>rotateCamera(0)<line_sep>dial=QDial()<line_sep>dial.setWrapping(<true>)<line_sep>dial.valueChanged[int].connect(rotateCamera)<line_sep>layout.addWidget(QLabel('Camera rotation'))<line_sep>layout.addWidget(dial)<def_stmt>rotateCameraElevation value# print(value) <block_start>value=value/100<line_sep>cam_pos=camera.get_transform().get_position()<line_sep>camera.get_transform().look_at(at=(0 0 0.5) # at position up=(0 0 1) # up vector eye=(cam_pos[0] cam_pos[1] 0.1+2.5<times>value)# eye position )<line_sep># print(value, 2 * math.cos(value * 2 * nvisii.pi())) <block_end>slider=QSlider(Qt.Horizontal)<line_sep>slider.valueChanged[int].connect(rotateCameraElevation)<line_sep>slider.setValue(40)<line_sep>layout.addWidget(QLabel('Camera Elevation'))<line_sep>layout.addWidget(slider)<line_sep># Add some toggles to demonstrate how the set_visibility function works camera_visibility=<true><line_sep>diffuse_visibility=<true><line_sep>glossy_visibility=<true><line_sep>transmission_visibility=<true><line_sep>scatter_visibility=<true><line_sep>shadow_visibility=<true><def_stmt>updateVisibility <block_start><global>camera_visibility<line_sep><global>diffuse_visibility<line_sep><global>glossy_visibility<line_sep><global>transmission_visibility<line_sep><global>scatter_visibility<line_sep><global>shadow_visibility<line_sep>teapotahedron.set_visibility(camera=camera_visibility diffuse=diffuse_visibility glossy=glossy_visibility transmission=transmission_visibility volume_scatter=scatter_visibility shadow=shadow_visibility)<block_end><def_stmt>toggleCamera <block_start><global>camera_visibility<line_sep>camera_visibility=<not>camera_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleCamera")<line_sep>button.clicked.connect(toggleCamera)<line_sep>layout.addWidget(button)<def_stmt>toggleDiffuse <block_start><global>diffuse_visibility<line_sep>diffuse_visibility=<not>diffuse_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleDiffuse")<line_sep>button.clicked.connect(toggleDiffuse)<line_sep>layout.addWidget(button)<def_stmt>toggleGlossy <block_start><global>glossy_visibility<line_sep>glossy_visibility=<not>glossy_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleGlossy")<line_sep>button.clicked.connect(toggleGlossy)<line_sep>layout.addWidget(button)<def_stmt>toggleTransmission <block_start><global>transmission_visibility<line_sep>transmission_visibility=<not>transmission_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleTransmission")<line_sep>button.clicked.connect(toggleTransmission)<line_sep>layout.addWidget(button)<def_stmt>toggleScattering <block_start><global>scatter_visibility<line_sep>scatter_visibility=<not>scatter_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleScattering")<line_sep>button.clicked.connect(toggleScattering)<line_sep>layout.addWidget(button)<def_stmt>toggleShadows <block_start><global>shadow_visibility<line_sep>shadow_visibility=<not>shadow_visibility<line_sep>updateVisibility()<block_end>button=QPushButton("toggleShadows")<line_sep>button.clicked.connect(toggleShadows)<line_sep>layout.addWidget(button)<def_stmt>setFogStrength value<block_start>value=(100-value)<times>2+10<line_sep>fog.get_volume().set_scale(value)<block_end>setFogStrength(100)<line_sep>slider=QSlider(Qt.Horizontal)<line_sep>slider.valueChanged[int].connect(setFogStrength)<line_sep>slider.setValue(100)<line_sep>layout.addWidget(QLabel('Fog Strength'))<line_sep>layout.addWidget(slider)<def_stmt>setLightHeight value<block_start>value=value/100.0<line_sep>light.get_transform().set_position((0 0 3+value<times>2))<block_end>setLightHeight(50)<line_sep>slider=QSlider(Qt.Horizontal)<line_sep>slider.valueChanged[int].connect(setLightHeight)<line_sep>slider.setValue(50)<line_sep>layout.addWidget(QLabel('Light Height'))<line_sep>layout.addWidget(slider)<line_sep>window.setLayout(layout)<line_sep>window.show()<line_sep>app.exec_()<line_sep>nvisii.deinitialize()<line_sep>
# # Copyright © 2009- The Spyder Development Team # # Licensed under the terms of the MIT License # (see LICENSE.txt for details) """ Provides QtPrintSupport classes and functions. """<import_from_stmt>. PYQT5 PYQT6 PYSIDE6 PYSIDE2 PythonQtError<if_stmt>PYQT5<block_start><import_from_stmt>PyQt5.QtPrintSupport *<block_end><elif_stmt>PYQT6<block_start><import_from_stmt>PyQt6.QtPrintSupport *<line_sep>QPageSetupDialog.exec_=QPageSetupDialog.exec<line_sep>QPrintDialog.exec_=QPrintDialog.exec<line_sep>QPrintPreviewWidget.print_=QPrintPreviewWidget.print<block_end><elif_stmt>PYSIDE6<block_start><import_from_stmt>PySide6.QtPrintSupport *<block_end><elif_stmt>PYSIDE2<block_start><import_from_stmt>PySide2.QtPrintSupport *<block_end><else_stmt><block_start><raise>PythonQtError('No Qt bindings could be found')<block_end>
<import_from_stmt>typing List<import_from_stmt>unittest.case TestCase<import_from_stmt>eventsourcing.dispatch singledispatchmethod<import_from_stmt>eventsourcing.domain AggregateEvent<import_from_stmt>eventsourcing.persistence IntegrityError Notification Transcoder<import_from_stmt>eventsourcing.system Follower Leader ProcessApplication ProcessEvent Promptable <import_from_stmt>eventsourcing.tests.test_aggregate BankAccount<import_from_stmt>eventsourcing.tests.test_application_with_popo BankAccounts EmailAddressAsStr <import_from_stmt>eventsourcing.tests.test_processingpolicy EmailNotification<class_stmt>TestProcessApplication(TestCase)<block_start><def_stmt>test_pull_and_process self<block_start>leader_cls=type(BankAccounts.__name__ (BankAccounts Leader) {} )<line_sep>accounts=leader_cls()<line_sep>email_process=EmailProcess()<line_sep>email_process.follow(accounts.name accounts.log )<line_sep>section=email_process.log["1,5"]<line_sep>self.assertEqual(len(section.items) 0)<line_sep>accounts.open_account("Alice" "<EMAIL>")<line_sep>email_process.pull_and_process(BankAccounts.name)<line_sep>section=email_process.log["1,5"]<line_sep>self.assertEqual(len(section.items) 1)<line_sep># Check we have processed the first event. self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name) 1)<line_sep># Check trying to reprocess the first event causes an IntegrityError. <with_stmt>self.assertRaises(IntegrityError)<block_start>email_process.pull_and_process(BankAccounts.name start=1)<block_end># Check we can continue from the next position. email_process.pull_and_process(BankAccounts.name start=2)<line_sep># Check we haven't actually processed anything further. self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name) 1)<line_sep>section=email_process.log["1,5"]<line_sep>self.assertEqual(len(section.items) 1)<line_sep># Subscribe for notifications. accounts.lead(PromptForwarder(email_process))<line_sep># Create another notification. accounts.open_account("Bob" "<EMAIL>")<line_sep># Check we have processed the next notification. section=email_process.log["1,5"]<line_sep>self.assertEqual(len(section.items) 2)<line_sep># Check we have actually processed the second event. self.assertEqual(email_process.recorder.max_tracking_id(BankAccounts.name) 2)<block_end><block_end><class_stmt>EmailProcess(ProcessApplication)<block_start><def_stmt>register_transcodings self transcoder:Transcoder<arrow><none><block_start>super(EmailProcess self).register_transcodings(transcoder)<line_sep>transcoder.register(EmailAddressAsStr())<block_end>@singledispatchmethod<def_stmt>policy self domain_event:AggregateEvent process_event:ProcessEvent <block_start>"""Default policy"""<block_end>@policy.register(BankAccount.Opened)<def_stmt>_ self domain_event:AggregateEvent process_event:ProcessEvent <block_start><assert_stmt>isinstance(domain_event BankAccount.Opened)<line_sep>notification=EmailNotification.create(to=domain_event.email_address subject="Your New Account" message="Dear {}, ...".format(domain_event.full_name) )<line_sep>process_event.collect_events(notification)<block_end><block_end><class_stmt>PromptForwarder(Promptable)<block_start><def_stmt>__init__ self application:Follower<block_start>self.application=application<block_end><def_stmt>receive_notifications self leader_name:str notifications:List[Notification]<arrow><none><block_start>self.application.pull_and_process(leader_name start=notifications[0].id)<block_end><block_end>
# -*- coding: utf-8 -* <import_from_stmt>collections OrderedDict<import_from_stmt>expects *<import_from_stmt>expects.testing failure<line_sep>IRRELEVANT_ARGS=(1 2)<with_stmt>describe('end_with')<block_start><with_stmt>before.each<block_start>self.str='My foo string'<line_sep>self.lst=[1 2 3]<line_sep>self.dct={'bar':0 'baz':1}<line_sep>self.ordered_dct=OrderedDict([('bar' 0) ('baz' 1)])<block_end><with_stmt>it('should pass if string ends with string')<block_start>expect(self.str).to(end_with(self.str[5:]))<block_end><with_stmt>it('should pass if list ends with arg')<block_start>expect(self.lst).to(end_with(self.lst[-1]))<block_end><with_stmt>it('should pass if list ends with args')<block_start>expect(self.lst).to(end_with(*self.lst[-2:]))<block_end><with_stmt>it('should pass if ordered dict ends with keys')<block_start>expected_args=list(self.ordered_dct)[:2]<line_sep>expect(self.ordered_dct).to(end_with(*expected_args))<block_end><with_stmt>it('should fail if string does not end with string')<block_start>str_='My foo string'<with_stmt>failure('but: ends with {0!r}'.format(str_[-5:]))<block_start>expect(self.str).to(end_with(str_[:5]))<block_end><block_end><with_stmt>it('should fail if list ends with first arg but not second')<block_start><with_stmt>failure('but: ends with {0!r}'.format(self.lst[-2:]))<block_start>expect(self.lst).to(end_with(self.lst[-1] self.lst[-1]))<block_end><block_end><with_stmt>it('should fail if subject is a dict')<block_start><with_stmt>failure('but: does not have ordered keys')<block_start>expect(self.dct).to(end_with(*IRRELEVANT_ARGS))<block_end><block_end><with_stmt>context('when negated')<block_start><with_stmt>it('should pass if string does not end with string')<block_start>expect(self.str).not_to(end_with(self.str[:5]))<block_end><with_stmt>it('should pass if list does not end with args')<block_start>expect(self.lst).not_to(end_with(*self.lst[:2]))<block_end><with_stmt>it('should pass if list ends with first arg but not second')<block_start>expected_args=self.lst[-1] self.lst[-1]<line_sep>expect(self.lst).not_to(end_with(*expected_args))<block_end><with_stmt>it('should fail if subject is a dict')<block_start><with_stmt>failure('but: does not have ordered keys')<block_start>expect(self.dct).not_to(end_with(*IRRELEVANT_ARGS))<block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>re<import_stmt>codecs<try_stmt><block_start><import_from_stmt>setuptools setup find_packages<block_end><except_stmt>ImportError<block_start><import_from_stmt>distutils.core setup<block_end><def_stmt>get_metadata package field<block_start>""" Return package data as listed in `__{field}__` in `init.py`. """<line_sep>init_py=codecs.open(os.path.join(package '__init__.py') encoding='utf-8').read()<line_sep><return>re.search("^__{}__ = ['\"]([^'\"]+)['\"]".format(field) init_py re.MULTILINE).group(1)<block_end>setup(name='puput' version=get_metadata('puput' 'version') packages=find_packages(exclude=("example*" "tests*")) include_package_data=<true> keywords="django wagtail puput blog cms app" description='A Django blog app implemented in Wagtail.' long_description=codecs.open(os.path.join(os.path.dirname(__file__) 'README.rst') encoding='utf-8').read() install_requires=['Django>=2.0' 'wagtail>=2.7,<3.0' 'django-el-pagination>=3.2.4' 'django-social-share>=1.3.0' 'django-colorful>=1.3'] url='http://github.com/APSL/puput' author=get_metadata('puput' 'author') author_email=get_metadata('puput' 'email') long_description_content_type='text/x-rst' classifiers=['Environment :: Web Environment' 'Framework :: Django' 'Framework :: Django :: 2.2' 'Framework :: Django :: 3.0' 'Framework :: Django :: 3.1' 'Framework :: Django :: 3.2' 'Intended Audience :: Developers' 'Programming Language :: Python' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.6' 'Programming Language :: Python :: 3.7' 'Programming Language :: Python :: 3.8' 'Programming Language :: Python :: 3.9' 'Operating System :: OS Independent' 'Topic :: Software Development'])<line_sep>
<import_stmt>torch<import_stmt>triton<line_sep>confs=[triton.testing.Benchmark(x_names=['N'] x_vals=[128 256 512 1024 2048 3072 4096 6144 8192] line_arg='provider' line_vals=['triton' 'torch'] line_names=['Triton' 'Torch'] ylabel='GBPS' plot_name=f'{mode}-2048' args={'M':2048 'dtype':torch.float16 'mode':mode})<for>mode ['forward' 'backward']]<line_sep>@triton.testing.perf_report(confs)<def_stmt>bench_op M N dtype mode provider# create inputs <block_start>x=torch.randn(M N dtype=dtype device='cuda' requires_grad=<true>)<line_sep>idx=4+torch.ones(M dtype=torch.int64 device='cuda')<line_sep>num_gb=(2<times>x.numel()<times>x.element_size()<times>1e-9)<line_sep>gbps=<lambda>ms:num_gb/ms<times>1e3<line_sep># forward pass op={'torch':torch.nn.CrossEntropyLoss(reduction='none') 'triton':triton.ops.cross_entropy}[provider]<if_stmt>mode<eq>'forward'<block_start>mean_ms,min_ms,max_ms=triton.testing.do_bench(<lambda>:op(x idx))<block_end><if_stmt>mode<eq>'backward'<block_start>y=op(x idx)<line_sep>dy=torch.randn_like(y)<line_sep>fn=<lambda>:y.backward(dy retain_graph=<true>)<line_sep>mean_ms,min_ms,max_ms=triton.testing.do_bench(fn grad_to_none=[x])<block_end><return>gbps(mean_ms) gbps(min_ms) gbps(max_ms)<block_end><if_stmt>__name__<eq>'__main__'<block_start>bench_op.run(print_data=<true>)<block_end>
<import_stmt>pytest<import_from_stmt>brownie.test given strategy<import_from_stmt>tests.conftest INITIAL_SUPPLY YEAR YEAR_1_SUPPLY approx<line_sep>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>initial_setup chain token<block_start>chain.sleep(86401)<line_sep>token.update_mining_parameters()<block_end>@given(time=strategy("decimal" min_value=1 max_value=7))<def_stmt>test_mintable_in_timeframe accounts token theoretical_supply time chain<block_start>t0=token.start_epoch_time()<line_sep>chain.sleep(int(10<power>time))<line_sep>chain.mine()<line_sep>t1=chain[-1].timestamp<if_stmt>t1-t0<ge>YEAR<block_start>token.update_mining_parameters({"from":accounts[0]})<block_end>t1=chain[-1].timestamp<line_sep>available_supply=token.available_supply()<line_sep>mintable=token.mintable_in_timeframe(t0 t1)<assert_stmt>(available_supply-(INITIAL_SUPPLY<times>10<power>18))<ge>mintable<line_sep># Should only round down, not up <if_stmt>t1<eq>t0<block_start><assert_stmt>mintable<eq>0<block_end><else_stmt><block_start><assert_stmt>(available_supply-(INITIAL_SUPPLY<times>10<power>18))/mintable-1<l>1e-7<block_end><assert_stmt>approx(theoretical_supply() available_supply 1e-16)<block_end>@given(time1=strategy("uint" max_value=YEAR) time2=strategy("uint" max_value=YEAR))<def_stmt>test_random_range_year_one token chain accounts time1 time2<block_start>creation_time=token.start_epoch_time()<line_sep>start,end=sorted((creation_time+time1 creation_time+time2))<line_sep>rate=YEAR_1_SUPPLY<floordiv>YEAR<assert_stmt>token.mintable_in_timeframe(start end)<eq>rate<times>(end-start)<block_end>@given(start=strategy("uint" max_value=YEAR<times>6) duration=strategy("uint" max_value=YEAR))<def_stmt>test_random_range_multiple_epochs token chain accounts start duration<block_start>creation_time=token.start_epoch_time()<line_sep>start<augadd>creation_time<line_sep>end=duration+start<line_sep>start_epoch=(start-creation_time)<floordiv>YEAR<line_sep>end_epoch=(end-creation_time)<floordiv>YEAR<line_sep>rate=int(YEAR_1_SUPPLY<floordiv>YEAR/(2<power>0.25)<power>start_epoch)<for_stmt>i range(end_epoch)<block_start>chain.sleep(YEAR)<line_sep>chain.mine()<line_sep>token.update_mining_parameters({"from":accounts[0]})<block_end><if_stmt>start_epoch<eq>end_epoch<block_start><assert_stmt>approx(token.mintable_in_timeframe(start end) rate<times>(end-start) 2e-16)<block_end><else_stmt><block_start><assert_stmt>token.mintable_in_timeframe(start end)<l>rate<times>end<block_end><block_end>@given(duration=strategy("uint" min_value=1 max_value=YEAR))<def_stmt>test_available_supply chain web3 token duration<block_start>creation_time=token.start_epoch_time()<line_sep>initial_supply=token.totalSupply()<line_sep>rate=token.rate()<line_sep>chain.sleep(duration)<line_sep>chain.mine()<line_sep>expected=initial_supply+(web3.eth.getBlock("latest")["timestamp"]-creation_time)<times>rate<assert_stmt>token.available_supply()<eq>expected<block_end>
<import_from_stmt>.voxel_set_abstraction VoxelSetAbstraction<line_sep>__all__={'VoxelSetAbstraction':VoxelSetAbstraction}<line_sep>
# My solution using Hepa <import_stmt>heapq<def_stmt>findThreeLargestNumbers array<block_start>hp=[]<for_stmt>num array<block_start><if_stmt>len(hp)<l>3<block_start>heapq.heappush(hp num)<block_end><else_stmt><block_start><if_stmt>hp[0]<l>num<block_start>heapq.heappop(hp)<line_sep>heapq.heappush(hp num)<block_end><block_end><block_end><return>sorted(hp)<block_end># Solution providd by Algoexpert # O(n) time | O(1) space <def_stmt>find_three_largest_number array<block_start>three_largest_number=[<none> <none> <none>]<for_stmt>num array<block_start>update_largest(num three_largest_number)<block_end><return>three_largest_number<block_end><def_stmt>update_largest number three_largest_number<block_start><if_stmt>three_largest_number[2]<is><none><or>number<g>three_largest_number[2]<block_start>shift_and_update(three_largest_number number 2)<block_end><elif_stmt>three_largest_number[1]<is><none><or>number<g>three_largest_number[1]<block_start>shift_and_update(three_largest_number number 1)<block_end><elif_stmt>three_largest_number[0]<is><none><or>number<g>three_largest_number[0]<block_start>shift_and_update(three_largest_number number 0)<block_end><block_end><def_stmt>shift_and_update three_largest_number number index<block_start><for_stmt>i range(index+1)<block_start><if_stmt>i<eq>index<block_start>three_largest_number[index]=number<block_end><else_stmt><block_start>three_largest_number[i]=three_largest_number[i+1]<block_end><block_end><block_end>given_numbers=[141 1 17 -7 -17 -27 18 541 8 7 7]<line_sep>largest_numbers=find_three_largest_number(given_numbers)<line_sep>print("Largest numbers are: " largest_numbers)<line_sep>
<import_from_stmt>.crypto generate_privkey pubkey_to_addr privkey_to_addr addr_from_sig pack keccak256 keccak256_hex sign sign_transaction eth_message_hash eth_sign eth_verify eth_sign_typed_data_message eth_sign_typed_data eth_sign_typed_data_message_eip eth_sign_typed_data_eip get_balance_message sign_balance_proof verify_balance_proof sign_close verify_closing_sig <import_from_stmt>.contract create_signed_transaction create_transaction create_signed_contract_transaction create_contract_transaction create_transaction_data get_logs get_event_blocking wait_for_transaction <import_from_stmt>.private_key check_permission_safety get_private_key <import_from_stmt>.misc get_function_kwargs pop_function_kwargs <line_sep>__all__=[generate_privkey pubkey_to_addr privkey_to_addr addr_from_sig pack keccak256 keccak256_hex sign sign_transaction eth_message_hash eth_sign eth_verify eth_sign_typed_data_message eth_sign_typed_data eth_sign_typed_data_message_eip eth_sign_typed_data_eip get_balance_message sign_balance_proof verify_balance_proof sign_close verify_closing_sig create_signed_transaction create_transaction create_signed_contract_transaction create_contract_transaction create_transaction_data get_logs get_event_blocking wait_for_transaction check_permission_safety get_private_key get_function_kwargs pop_function_kwargs ]<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>siStripBuildTrackerMap=cms.EDAnalyzer("BuildTrackerMapPlugin" #input root file containing histograms InputFileName=cms.untracked.string('DQMStore.root') DoDifference=cms.untracked.bool(<false>) InputFileNameForDiff=cms.untracked.string('DQMStore.root') #name of tkHistoMap to dump TkHistoMapNameVec=cms.untracked.vstring('TkHMap_MeanCMAPV0' 'TkHMap_MeanCMAPV1' 'TkHMap_MeanCMAPV0minusAPV1' 'TkHMap_RmsCMAPV0' 'TkHMap_RmsCMAPV1' 'TkHMap_RmsCMAPV0minusAPV1') MinValueVec=cms.untracked.vdouble(120 120 -20 0 0 0) MaxValueVec=cms.untracked.vdouble(140 140 20 10 10 10) MechanicalView=cms.untracked.bool(<true>) #Name of top folder (SiStrip/MechanicalView appended automatically) HistogramFolderName=cms.untracked.string('DQMData/') #Whether to dump buffer info and raw data if any error is found: #1=errors, 2=minimum info, 3=full debug with printing of the data buffer of each FED per event. PrintDebugMessages=cms.untracked.uint32(1) TkmapParameters=cms.PSet(loadFedCabling=cms.untracked.bool(<true>) # trackerdatPath = cms.untracked.string('CommonTools/TrackerMap/data/'), # trackermaptxtPath = cms.untracked.string('CommonTools/TrackerMap/data/') ))<line_sep>
<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>pandas Timedelta timedelta_range to_timedelta <import_stmt>pandas._testing<as>tm<import_from_stmt>pandas.tseries.offsets Day Second <class_stmt>TestTimedeltas<block_start><def_stmt>test_timedelta_range self<block_start>expected=to_timedelta(np.arange(5) unit="D")<line_sep>result=timedelta_range("0 days" periods=5 freq="D")<line_sep>tm.assert_index_equal(result expected)<line_sep>expected=to_timedelta(np.arange(11) unit="D")<line_sep>result=timedelta_range("0 days" "10 days" freq="D")<line_sep>tm.assert_index_equal(result expected)<line_sep>expected=to_timedelta(np.arange(5) unit="D")+Second(2)+Day()<line_sep>result=timedelta_range("1 days, 00:00:02" "5 days, 00:00:02" freq="D")<line_sep>tm.assert_index_equal(result expected)<line_sep>expected=to_timedelta([1 3 5 7 9] unit="D")+Second(2)<line_sep>result=timedelta_range("1 days, 00:00:02" periods=5 freq="2D")<line_sep>tm.assert_index_equal(result expected)<line_sep>expected=to_timedelta(np.arange(50) unit="T")<times>30<line_sep>result=timedelta_range("0 days" freq="30T" periods=50)<line_sep>tm.assert_index_equal(result expected)<block_end>@pytest.mark.parametrize("periods, freq" [(3 "2D") (5 "D") (6 "19H12T") (7 "16H") (9 "12H")])<def_stmt>test_linspace_behavior self periods freq# GH 20976 <block_start>result=timedelta_range(start="0 days" end="4 days" periods=periods)<line_sep>expected=timedelta_range(start="0 days" end="4 days" freq=freq)<line_sep>tm.assert_index_equal(result expected)<block_end><def_stmt>test_errors self# not enough params <block_start>msg=("Of the four parameters: start, end, periods, and freq, "<concat>"exactly three must be specified")<with_stmt>pytest.raises(ValueError match=msg)<block_start>timedelta_range(start="0 days")<block_end><with_stmt>pytest.raises(ValueError match=msg)<block_start>timedelta_range(end="5 days")<block_end><with_stmt>pytest.raises(ValueError match=msg)<block_start>timedelta_range(periods=2)<block_end><with_stmt>pytest.raises(ValueError match=msg)<block_start>timedelta_range()<block_end># too many params <with_stmt>pytest.raises(ValueError match=msg)<block_start>timedelta_range(start="0 days" end="5 days" periods=10 freq="H")<block_end><block_end>@pytest.mark.parametrize("start, end, freq, expected_periods" [("1D" "10D" "2D" (10-1)<floordiv>2+1) ("2D" "30D" "3D" (30-2)<floordiv>3+1) ("2s" "50s" "5s" (50-2)<floordiv>5+1) # tests that worked before GH 33498: ("4D" "16D" "3D" (16-4)<floordiv>3+1) ("8D" "16D" "40s" (16<times>3600<times>24-8<times>3600<times>24)<floordiv>40+1) ] )<def_stmt>test_timedelta_range_freq_divide_end self start end freq expected_periods# GH 33498 only the cases where `(end % freq) == 0` used to fail <block_start>res=timedelta_range(start=start end=end freq=freq)<assert_stmt>Timedelta(start)<eq>res[0]<assert_stmt>Timedelta(end)<ge>res[-1]<assert_stmt>len(res)<eq>expected_periods<block_end><def_stmt>test_timedelta_range_infer_freq self# https://github.com/pandas-dev/pandas/issues/35897 <block_start>result=timedelta_range("0s" "1s" periods=31)<assert_stmt>result.freq<is><none><block_end><block_end>
<import_from_stmt>collections OrderedDict<import_from_stmt>copy copy deepcopy<import_from_stmt>psycopg2.sql Identifier Literal SQL<import_from_stmt>rest_framework.request Request<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<import_from_stmt>usaspending_api.common.cache_decorator cache_response<import_from_stmt>usaspending_api.common.helpers.generic_helper get_pagination_metadata<import_from_stmt>usaspending_api.common.helpers.sql_helpers execute_sql_to_ordered_dictionary<import_from_stmt>usaspending_api.common.validator.award get_internal_or_generated_award_id_model<import_from_stmt>usaspending_api.common.validator.pagination PAGINATION<import_from_stmt>usaspending_api.common.validator.tinyshield TinyShield<line_sep># In gather_award_ids, if any awards are found for IDVs in the second half of # the union, by definition, they have to be grandchildren so even though the # grandchild boolean appears to be applying to the IDV, it will actually # trickle down to its children. ACTIVITY_SQL=SQL(""" with gather_award_ids as ( select award_id, false grandchild from parent_award where {award_id_column} = {award_id} union all select cpa.award_id, true grandchild from parent_award ppa inner join parent_award cpa on cpa.parent_award_id = ppa.award_id where ppa.{award_id_column} = {award_id} ) select ca.id award_id, ta.name awarding_agency, ca.awarding_agency_id awarding_agency_id, ca.generated_unique_award_id, tf.period_of_perf_potential_e period_of_performance_potential_end_date, pa.id parent_award_id, pa.generated_unique_award_id parent_generated_unique_award_id, ca.parent_award_piid, ca.total_obligation obligated_amount, ca.base_and_all_options_value awarded_amount, ca.period_of_performance_start_date, ca.piid, rl.legal_business_name recipient_name, rp.recipient_hash || '-' || rp.recipient_level recipient_id, gaids.grandchild from gather_award_ids gaids inner join awards pa on pa.id = gaids.award_id inner join awards ca on ca.parent_award_piid = pa.piid and ca.fpds_parent_agency_id = pa.fpds_agency_id and ca.type not like 'IDV%' {hide_edges_awarded_amount} left outer join transaction_fpds tf on tf.transaction_id = ca.latest_transaction_id left outer join recipient_lookup rl on rl.duns = tf.awardee_or_recipient_uniqu left outer join recipient_profile rp on rp.recipient_hash = rl.recipient_hash and rp.recipient_level = case when tf.ultimate_parent_unique_ide is null then 'R' else 'C' end left outer join agency a on a.id = ca.awarding_agency_id left outer join toptier_agency ta on ta.toptier_agency_id = a.toptier_agency_id {hide_edges_end_date} order by ca.total_obligation desc, ca.id desc limit {limit} offset {offset} """)<line_sep># So, as it turns out, we already count all descendant contracts. Go us! # There's always the chance these may not 100% match the actual count for a # myriad of reasons, but they pretty much all involve failed operations # processes or bad data so we're going to go ahead and give the benefit of # the doubt and assume everything works as expected. COUNT_ACTIVITY_SQL=SQL(""" select rollup_contract_count from parent_award where {award_id_column} = {award_id} """)<line_sep>COUNT_ACTIVITY_HIDDEN_SQL=SQL(""" with gather_award_ids as ( select award_id, false grandchild from parent_award where {award_id_column} = {award_id} union all select cpa.award_id, true grandchild from parent_award ppa inner join parent_award cpa on cpa.parent_award_id = ppa.award_id where ppa.{award_id_column} = {award_id} ) select count(*) rollup_contract_count from gather_award_ids gaids inner join awards pa on pa.id = gaids.award_id inner join awards ca on ca.parent_award_piid = pa.piid and ca.fpds_parent_agency_id = pa.fpds_agency_id and ca.type not like 'IDV%' {hide_edges_awarded_amount} left outer join transaction_fpds tf on tf.transaction_id = ca.latest_transaction_id {hide_edges_end_date} """)<def_stmt>_prepare_tiny_shield_models # This endpoint has a fixed sort. No need for "sort" or "order". <block_start>models=[copy(p)<for>p PAGINATION<if>p["name"]<in>("page" "limit")]<line_sep>models.extend([get_internal_or_generated_award_id_model()])<line_sep>models.extend([{"key":"hide_edge_cases" "name":"hide_edge_cases" "type":"boolean" "optional":<true> "default":<false>}])<line_sep><return>models<block_end>TINY_SHIELD_MODELS=_prepare_tiny_shield_models()<class_stmt>IDVActivityViewSet(APIView)<block_start>""" Returns award funding info for children and grandchildren of an IDV. Used to power the Activity visualization on IDV Summary page. """<line_sep>endpoint_doc="usaspending_api/api_contracts/contracts/v2/idvs/activity.md"<line_sep>@staticmethod<def_stmt>_parse_and_validate_request request:dict<arrow>dict<block_start><return>TinyShield(deepcopy(TINY_SHIELD_MODELS)).block(request)<block_end>@staticmethod<def_stmt>_business_logic request_data:dict<arrow>tuple# By this point, our award_id has been validated and cleaned up by # TinyShield. We will either have an internal award id that is an # integer or a generated award id that is a string. <block_start>award_id=request_data["award_id"]<line_sep>hide_edge_cases=request_data.get("hide_edge_cases")<line_sep>hide_edges_awarded_amount=""<line_sep>hide_edges_end_date=""<line_sep>award_id_column="award_id"<if>type(award_id)<is>int<else>"generated_unique_award_id"<if_stmt>hide_edge_cases<block_start>hide_edges_awarded_amount="and ca.base_and_all_options_value > 0 and ca.total_obligation > 0"<line_sep>hide_edges_end_date="where tf.period_of_perf_potential_e is not null"<line_sep>sql=COUNT_ACTIVITY_HIDDEN_SQL.format(award_id_column=Identifier(award_id_column) award_id=Literal(award_id) hide_edges_awarded_amount=SQL(hide_edges_awarded_amount) hide_edges_end_date=SQL(hide_edges_end_date) )<block_end><else_stmt><block_start>sql=COUNT_ACTIVITY_SQL.format(award_id_column=Identifier(award_id_column) award_id=Literal(award_id))<block_end>overall_count_results=execute_sql_to_ordered_dictionary(sql)<line_sep>overall_count=overall_count_results[0]["rollup_contract_count"]<if>overall_count_results<else>0<line_sep>sql=ACTIVITY_SQL.format(award_id_column=Identifier(award_id_column) award_id=Literal(award_id) limit=Literal(request_data["limit"]+1) offset=Literal((request_data["page"]-1)<times>request_data["limit"]) hide_edges_awarded_amount=SQL(hide_edges_awarded_amount) hide_edges_end_date=SQL(hide_edges_end_date) )<line_sep><return>execute_sql_to_ordered_dictionary(sql) overall_count<block_end>@cache_response()<def_stmt>post self request:Request<arrow>Response<block_start>request_data=self._parse_and_validate_request(request.data)<line_sep>results,overall_count=self._business_logic(request_data)<line_sep>page_metadata=get_pagination_metadata(overall_count request_data["limit"] request_data["page"])<line_sep>response=OrderedDict((("results" results[:request_data["limit"]]) ("page_metadata" page_metadata)))<line_sep><return>Response(response)<block_end><block_end>
"""Unit tests for the Bitbucket hosting service."""<import_from_future_stmt> unicode_literals<import_stmt>logging<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.test.client RequestFactory<import_from_stmt>django.utils.safestring SafeText<import_from_stmt>djblets.testing.decorators add_fixtures<import_from_stmt>reviewboard.hostingsvcs.bitbucket BitbucketAuthForm<import_from_stmt>reviewboard.hostingsvcs.errors AuthorizationError RepositoryError <import_from_stmt>reviewboard.hostingsvcs.testing HostingServiceTestCase<import_from_stmt>reviewboard.reviews.models ReviewRequest<import_from_stmt>reviewboard.scmtools.core Branch Commit<import_from_stmt>reviewboard.scmtools.crypto_utils decrypt_password encrypt_password <import_from_stmt>reviewboard.scmtools.errors FileNotFoundError<import_from_stmt>reviewboard.site.models LocalSite<import_from_stmt>reviewboard.site.urlresolvers local_site_reverse<class_stmt>BitbucketTestCase(HostingServiceTestCase)<block_start>"""Base class for Bitbucket test suites."""<line_sep>service_name='bitbucket'<line_sep>fixtures=['test_scmtools']<line_sep>default_account_data={'password':encrypt_password(HostingServiceTestCase.default_password) }<line_sep>default_repository_extra_data={'bitbucket_repo_name':'myrepo' }<block_end><class_stmt>BitbucketTests(BitbucketTestCase)<block_start>"""Unit tests for the Bitbucket hosting service."""<def_stmt>test_service_support self<block_start>"""Testing Bitbucket service support capabilities"""<line_sep>self.assertTrue(self.service_class.supports_bug_trackers)<line_sep>self.assertTrue(self.service_class.supports_repositories)<block_end><def_stmt>test_get_repository_fields_with_git_and_personal_plan self<block_start>"""Testing Bitbucket.get_repository_fields for Git and plan=personal"""<line_sep>self.assertEqual(self.get_repository_fields('Git' fields={'bitbucket_repo_name':'myrepo' } plan='personal') {'path':'[email protected]:myuser/myrepo.git' 'mirror_path':('https://[email protected]/myuser/'<concat>'myrepo.git') })<block_end><def_stmt>test_get_repository_fields_with_git_and_team_plan self<block_start>"""Testing Bitbucket.get_repository_fields for Git and plan=team"""<line_sep>self.assertEqual(self.get_repository_fields('Git' fields={'bitbucket_team_name':'myteam' 'bitbucket_team_repo_name':'myrepo' } plan='team') {'path':'git<EMAIL>:myteam/myrepo.git' 'mirror_path':('https://[email protected]/myteam/'<concat>'myrepo.git') })<block_end><def_stmt>test_get_repository_fields_with_git_and_other_user_plan self<block_start>"""Testing Bitbucket.get_repository_fields for Git and plan=other-user """<line_sep>self.assertEqual(self.get_repository_fields('Git' fields={'bitbucket_other_user_username':'someuser' 'bitbucket_other_user_repo_name':'myrepo' } plan='other-user') {'path':'git<EMAIL>:someuser/myrepo.git' 'mirror_path':('https://[email protected]/someuser/'<concat>'myrepo.git') })<block_end><def_stmt>test_get_bug_tracker_field_with_personal_plan self<block_start>"""Testing Bitbucket.get_bug_tracker_field with plan=personal"""<line_sep>self.assertTrue(self.service_class.get_bug_tracker_requires_username(plan='personal'))<line_sep>self.assertEqual(self.service_class.get_bug_tracker_field('personal' {'bitbucket_repo_name':'myrepo' 'hosting_account_username':'myuser' }) 'https://bitbucket.org/myuser/myrepo/issue/%s/')<block_end><def_stmt>test_get_bug_tracker_field_with_team_plan self<block_start>"""Testing Bitbucket.get_bug_tracker_field with plan=team"""<line_sep>self.assertFalse(self.service_class.get_bug_tracker_requires_username(plan='team'))<line_sep>self.assertEqual(self.service_class.get_bug_tracker_field('team' {'bitbucket_team_name':'myteam' 'bitbucket_team_repo_name':'myrepo' }) 'https://bitbucket.org/myteam/myrepo/issue/%s/')<block_end><def_stmt>test_get_bug_tracker_field_with_other_user_plan self<block_start>"""Testing Bitbucket.get_bug_tracker_field with plan=other-user"""<line_sep>self.assertFalse(self.service_class.get_bug_tracker_requires_username(plan='other-user'))<line_sep>self.assertEqual(self.service_class.get_bug_tracker_field('other-user' {'bitbucket_other_user_username':'someuser' 'bitbucket_other_user_repo_name':'myrepo' }) 'https://bitbucket.org/someuser/myrepo/issue/%s/')<block_end><def_stmt>test_get_repository_hook_instructions self<block_start>"""Testing BitBucket.get_repository_hook_instructions"""<line_sep>account=self.create_hosting_account()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep>hooks_uuid=repository.get_or_create_hooks_uuid()<line_sep>request=RequestFactory().get(path='/')<line_sep>request.user=User.objects.create(username='test-user')<line_sep>content=repository.hosting_service.get_repository_hook_instructions(request=request repository=repository)<line_sep>self.assertIsInstance(content SafeText)<line_sep>self.assertIn(('https://bitbucket.org/myuser/myrepo/admin/addon/admin/'<concat>'bitbucket-webhooks/bb-webhooks-repo-admin') content)<line_sep>self.assertIn(('http://example.com/repos/1/bitbucket/hooks/%s/close-submitted/'%hooks_uuid) content)<line_sep>self.assertIn('Review Board supports closing' content)<line_sep>self.assertIn('<code>Review Board</code>' content)<block_end><def_stmt>test_check_repository_with_personal_plan self<block_start>"""Testing Bitbucket.check_repository with plan=personal"""<with_stmt>self.setup_http_test(payload=b'{"scm": "git"}' expected_http_calls=1)<as>ctx<block_start>ctx.service.check_repository(bitbucket_repo_name='myrepo' plan='personal' tool_name='Git')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo'<concat>'?fields=scm'))<block_end><def_stmt>test_check_repository_with_team_plan self<block_start>"""Testing Bitbucket.check_repository with plan=team"""<with_stmt>self.setup_http_test(payload=b'{"scm": "git"}' expected_http_calls=1)<as>ctx<block_start>ctx.service.check_repository(bitbucket_team_name='myteam' bitbucket_team_repo_name='myrepo' tool_name='Git' plan='team')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myteam/myrepo'<concat>'?fields=scm'))<block_end><def_stmt>test_check_repository_with_other_user_plan self<block_start>"""Testing Bitbucket.check_repository with plan=other-user"""<with_stmt>self.setup_http_test(payload=b'{"scm": "git"}' expected_http_calls=1)<as>ctx<block_start>ctx.service.check_repository(bitbucket_other_user_username='someuser' bitbucket_other_user_repo_name='myrepo' plan='other-user' tool_name='Git')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/someuser/myrepo'<concat>'?fields=scm'))<block_end><def_stmt>test_check_repository_with_slash self<block_start>"""Testing Bitbucket.check_repository with /"""<line_sep>expected_message='Please specify just the name of the repository, not a path.'<with_stmt>self.setup_http_test(expected_http_calls=0)<as>ctx<block_start><with_stmt>self.assertRaisesMessage(RepositoryError expected_message)<block_start>ctx.service.check_repository(bitbucket_team_name='myteam' bitbucket_team_repo_name='myteam/myrepo' plan='team')<block_end><block_end><block_end><def_stmt>test_check_repository_with_dot_git self<block_start>"""Testing Bitbucket.check_repository with .git"""<line_sep>expected_message='Please specify just the name of the repository without ".git".'<with_stmt>self.setup_http_test(expected_http_calls=0)<as>ctx<block_start><with_stmt>self.assertRaisesMessage(RepositoryError expected_message)<block_start>ctx.service.check_repository(bitbucket_team_name='myteam' bitbucket_team_repo_name='myrepo.git' plan='team')<block_end><block_end><block_end><def_stmt>test_check_repository_with_type_mismatch self<block_start>"""Testing Bitbucket.check_repository with type mismatch"""<line_sep>error_message=('The Bitbucket repository being configured does not match the '<concat>'type of repository you have selected.')<with_stmt>self.setup_http_test(payload=b'{"scm": "git"}' expected_http_calls=1)<as>ctx# Check Git repositories. <block_start><with_stmt>self.assertRaisesMessage(RepositoryError error_message)<block_start>ctx.service.check_repository(bitbucket_team_name='myteam' bitbucket_team_repo_name='myrepo' plan='team' tool_name='Mercurial')<block_end><block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myteam/myrepo'<concat>'?fields=scm'))<block_end><def_stmt>test_authorize self<block_start>"""Testing Bitbucket.authorize"""<line_sep>hosting_account=self.create_hosting_account(data={})<with_stmt>self.setup_http_test(payload=b'{}' hosting_account=hosting_account expected_http_calls=1)<as>ctx<block_start>self.assertFalse(ctx.service.is_authorized())<line_sep>ctx.service.authorize(username='myuser' password='<PASSWORD>')<block_end>self.assertIn('password' hosting_account.data)<line_sep>self.assertNotEqual(hosting_account.data['password'] '<PASSWORD>')<line_sep>self.assertEqual(decrypt_password(hosting_account.data['password']) '<PASSWORD>')<line_sep>self.assertTrue(ctx.service.is_authorized())<line_sep>ctx.assertHTTPCall(0 url='https://bitbucket.org/api/2.0/user' username='myuser' password='<PASSWORD>')<block_end><def_stmt>test_authorize_with_bad_credentials self<block_start>"""Testing Bitbucket.authorize with bad credentials"""<line_sep>hosting_account=self.create_hosting_account(data={})<line_sep>expected_message=('Invalid Bitbucket username or password. Make sure you are using '<concat>'your Bitbucket username and not e-mail address, and are using an '<concat>'app password if two-factor authentication is enabled.')<with_stmt>self.setup_http_test(status_code=401 hosting_account=hosting_account expected_http_calls=1)<as>ctx<block_start>self.assertFalse(ctx.service.is_authorized())<with_stmt>self.assertRaisesMessage(AuthorizationError expected_message)<block_start>ctx.service.authorize(username='myuser' password='<PASSWORD>')<block_end><block_end>self.assertNotIn('password' hosting_account.data)<line_sep>self.assertFalse(ctx.service.is_authorized())<line_sep>ctx.assertHTTPCall(0 url='https://bitbucket.org/api/2.0/user' username='myuser' password='<PASSWORD>')<block_end><def_stmt>test_get_file_with_git_and_base_commit_id self<block_start>"""Testing Bitbucket.get_file with Git and base commit ID"""<line_sep>self._test_get_file(tool_name='Git' revision='123' base_commit_id='456' expected_revision='456')<block_end><def_stmt>test_get_file_with_git_and_revision self<block_start>"""Testing Bitbucket.get_file with Git and revision"""<with_stmt>self.assertRaises(FileNotFoundError)<block_start>self._test_get_file(tool_name='Git' revision='123' base_commit_id=<none> expected_revision='123')<block_end><block_end><def_stmt>test_get_file_exists_with_git_and_base_commit_id self<block_start>"""Testing Bitbucket.get_file_exists with Git and base commit ID"""<line_sep>self._test_get_file_exists(tool_name='Git' revision='123' base_commit_id='456' expected_revision='456' expected_found=<true>)<block_end><def_stmt>test_get_file_exists_with_git_and_revision self<block_start>"""Testing Bitbucket.get_file_exists with Git and revision"""<line_sep>self._test_get_file_exists(tool_name='Git' revision='123' base_commit_id=<none> expected_revision='123' expected_found=<false> expected_http_called=<false>)<block_end><def_stmt>test_get_file_exists_with_git_and_404 self<block_start>"""Testing BitBucket.get_file_exists with Git and a 404 error"""<line_sep>self._test_get_file_exists(tool_name='Git' revision='123' base_commit_id='456' expected_revision='456' expected_found=<false>)<block_end><def_stmt>test_get_branches self<block_start>"""Testing Bitbucket.get_branches"""<line_sep>branches_api_response_1=self.dump_json({'next':('https://bitbucket.org/api/2.0/repositories/myuser/'<concat>'myrepo/refs/branches'<concat>'?fields=values.name%2Cvalues.target.hash%2Cnext'<concat>'&pagelen=100&page=2') 'values':[{'name':'branch1' 'target':{'hash':'1c44b461cebe5874a857c51a4a13a849a4d1e52d' } } {'name':'branch2' 'target':{'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' } } ] })<line_sep>branches_api_response_2=self.dump_json({'values':[{'name':'branch3' 'target':{'hash':'e5874a857c51a4a13a849a4d1e52d1c44b461ceb' } } {'name':'branch4' 'target':{'hash':'d286691517e6325fea5c7a21d5e44568f7d33647' } } ] })<line_sep>get_repository_api_response=self.dump_json({'mainbranch':{'name':'branch3' } })<line_sep>paths={'/api/2.0/repositories/myuser/myrepo':{'payload':get_repository_api_response } ('/api/2.0/repositories/myuser/myrepo/refs/branches'<concat>'?fields=values.name%2Cvalues.target.hash%2Cnext&pagelen=100'):{'payload':branches_api_response_1 } ('/api/2.0/repositories/myuser/myrepo/refs/branches'<concat>'?fields=values.name%2Cvalues.target.hash%2Cnext&page=2'<concat>'&pagelen=100'):{'payload':branches_api_response_2 } }<with_stmt>self.setup_http_test(self.make_handler_for_paths(paths) expected_http_calls=3)<as>ctx<block_start>repository=self.create_repository(tool_name='Git')<line_sep>branches=ctx.service.get_branches(repository)<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo'<concat>'?fields=mainbranch.name'))<line_sep>ctx.assertHTTPCall(1 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'refs/branches'<concat>'?fields=values.name%2Cvalues.target.hash%2Cnext'<concat>'&pagelen=100'))<line_sep>ctx.assertHTTPCall(2 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'refs/branches'<concat>'?fields=values.name%2Cvalues.target.hash%2Cnext'<concat>'&page=2&pagelen=100'))<line_sep>self.assertEqual(branches [Branch(id='branch1' commit='1<PASSWORD>a<PASSWORD>a<PASSWORD>') Branch(id='branch2' commit='44568f7d33647d286691517e6325fea5c7a21d5e') Branch(id='branch3' commit='e5874a857c51a4a13a849a4d1e52d1c44b461ceb' default=<true>) Branch(id='branch4' commit='d286691517e6325fea5c7a21d5e44568f7d33647') ])<block_end><def_stmt>test_get_commits self<block_start>"""Testing Bitbucket.get_commits"""<line_sep>payload=self.dump_json({'size':2 'values':[{'hash':'1c44b461cebe5874a857c51a4a13a849a4d1e52d' 'author':{'raw':'Some User 1 <<EMAIL>>' } 'date':'2017-01-24T13:11:22+00:00' 'message':'This is commit 1.' 'parents':[{'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' } ] } {'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' 'author':{'raw':'Some User 2 <<EMAIL>>' } 'date':'2017-01-23T08:09:10+00:00' 'message':'This is commit 2.' 'parents':[{'hash':'e5874a857c51a4a13a849a4d1e52d1c44b461ceb' } ] } ] })<with_stmt>self.setup_http_test(payload=payload expected_http_calls=1)<as>ctx<block_start>repository=ctx.create_repository(tool_name='Git')<line_sep>commits=ctx.service.get_commits(repository)<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'commits'<concat>'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'<concat>'%2Cvalues.message%2Cvalues.parents.hash'<concat>'&pagelen=20'))<line_sep>self.assertEqual(commits [Commit(author_name='Some User 1 <<EMAIL>>' date='2017-01-24T13:11:22+00:00' id='<PASSWORD>be<PASSWORD>' message='This is commit 1.' parent='44568f7d33647d286691517e6325fea5c7a21d5e') Commit(author_name='Some User 2 <<EMAIL>>' date='2017-01-23T08:09:10+00:00' id='<PASSWORD>' message='This is commit 2.' parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb') ])<for_stmt>commit commits<block_start>self.assertIsNone(commit.diff)<block_end><block_end><def_stmt>test_get_commits_with_start self<block_start>"""Testing Bitbucket.get_commits with start="""<line_sep>payload=self.dump_json({'size':2 'values':[{'hash':'1c44b461cebe5874a857c51a4a13a849a4d1e52d' 'author':{'raw':'Some User 1 <<EMAIL>>' } 'date':'2017-01-24T13:11:22+00:00' 'message':'This is commit 1.' 'parents':[{'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' } ] } {'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' 'author':{'raw':'Some User 2 <<EMAIL>>' } 'date':'2017-01-23T08:09:10+00:00' 'message':'This is commit 2.' 'parents':[{'hash':'e5874a857c51a4a13a849a4d1e52d1c44b461ceb' } ] } ] })<with_stmt>self.setup_http_test(payload=payload expected_http_calls=1)<as>ctx<block_start>repository=ctx.create_repository(tool_name='Git')<line_sep>commits=ctx.service.get_commits(repository start='1c44b461cebe5874a857c51a4a13a849a4d1e5')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'commits/1c44b461cebe5874a857c51a4a13a849a4d1e5'<concat>'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'<concat>'%2Cvalues.message%2Cvalues.parents.hash'<concat>'&pagelen=20'))<line_sep>self.assertEqual(commits [Commit(author_name='Some User 1 <<EMAIL>>' date='2017-01-24T13:11:22+00:00' id='<PASSWORD>' message='This is commit 1.' parent='44568f7d33647d286691517e6325fea5c7a21d5e') Commit(author_name='Some User 2 <<EMAIL>>' date='2017-01-23T08:09:10+00:00' id='<PASSWORD>' message='This is commit 2.' parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb') ])<for_stmt>commit commits<block_start>self.assertIsNone(commit.diff)<block_end><block_end><def_stmt>test_get_commits_with_branch self<block_start>"""Testing Bitbucket.get_commits with branch="""<line_sep>payload=self.dump_json({'size':2 'values':[{'hash':'1c44b461cebe5874a857c51a4a13a849a4d1e52d' 'author':{'raw':'Some User 1 <<EMAIL>>' } 'date':'2017-01-24T13:11:22+00:00' 'message':'This is commit 1.' 'parents':[{'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' } ] } {'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' 'author':{'raw':'Some User 2 <<EMAIL>>' } 'date':'2017-01-23T08:09:10+00:00' 'message':'This is commit 2.' 'parents':[{'hash':'e5874a857c51a4a13a849a4d1e52d1c44b461ceb' } ] } ] })<with_stmt>self.setup_http_test(payload=payload expected_http_calls=1)<as>ctx<block_start>repository=ctx.create_repository(tool_name='Git')<line_sep>commits=ctx.service.get_commits(repository branch='master')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'commits/master'<concat>'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'<concat>'%2Cvalues.message%2Cvalues.parents.hash'<concat>'&pagelen=20'))<line_sep>self.assertEqual(commits [Commit(author_name='Some User 1 <<EMAIL>>' date='2017-01-24T13:11:22+00:00' id='<PASSWORD>1a4<PASSWORD>a8<PASSWORD>4<PASSWORD>' message='This is commit 1.' parent='44568f7d33647d286691517e6325fea5c7a21d5e') Commit(author_name='Some User 2 <<EMAIL>>' date='2017-01-23T08:09:10+00:00' id='<PASSWORD>' message='This is commit 2.' parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb') ])<for_stmt>commit commits<block_start>self.assertIsNone(commit.diff)<block_end><block_end><def_stmt>test_get_commits_with_start_and_branch self<block_start>"""Testing Bitbucket.get_commits with start= and branch="""<line_sep>payload=self.dump_json({'size':2 'values':[{'hash':'1c44b461cebe5874a857c51a4a13a849a4d1e52d' 'author':{'raw':'Some User 1 <<EMAIL>>' } 'date':'2017-01-24T13:11:22+00:00' 'message':'This is commit 1.' 'parents':[{'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' } ] } {'hash':'44568f7d33647d286691517e6325fea5c7a21d5e' 'author':{'raw':'Some User 2 <<EMAIL>>' } 'date':'2017-01-23T08:09:10+00:00' 'message':'This is commit 2.' 'parents':[{'hash':'e5874a857c51a4a13a849a4d1e52d1c44b461ceb' } ] } ] })<with_stmt>self.setup_http_test(payload=payload expected_http_calls=1)<as>ctx<block_start>repository=ctx.create_repository(tool_name='Git')<line_sep>commits=ctx.service.get_commits(repository start='1c44b461cebe5874a857c51a4a13a849a4d1e52d' branch='master')<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'commits/1c44b461cebe5874a857c51a4a13a849a4d1e52d'<concat>'?fields=values.author.raw%2Cvalues.hash%2Cvalues.date'<concat>'%2Cvalues.message%2Cvalues.parents.hash'<concat>'&pagelen=20'))<line_sep>self.assertEqual(commits [Commit(author_name='Some User 1 <<EMAIL>>' date='2017-01-24T13:11:22+00:00' id='<PASSWORD>' message='This is commit 1.' parent='44568f7d33647d286691517e6325fea5c7a21d5e') Commit(author_name='Some User 2 <<EMAIL>>' date='2017-01-23T08:09:10+00:00' id='<PASSWORD>' message='This is commit 2.' parent='e5874a857c51a4a13a849a4d1e52d1c44b461ceb') ])<for_stmt>commit commits<block_start>self.assertIsNone(commit.diff)<block_end><block_end><def_stmt>test_get_change self<block_start>"""Testing BitBucket.get_change"""<line_sep>commit_sha='1c44b461cebe5874a857c51a4a13a849a4d1e52d'<line_sep>parent_sha='44568f7d33647d286691517e6325fea5c7a21d5e'<line_sep>paths={'/api/2.0/repositories/myuser/myrepo/commit/%s'%commit_sha:{'payload':self.dump_json({'hash':commit_sha 'author':{'raw':'Some User <<EMAIL>>' } 'date':'2017-01-24T13:11:22+00:00' 'message':'This is a message.' 'parents':[{'hash':parent_sha}] }) } '/api/2.0/repositories/myuser/myrepo/diff/%s'%commit_sha:{'payload':b'This is a test \xc7.' } }<with_stmt>self.setup_http_test(self.make_handler_for_paths(paths) expected_http_calls=2)<as>ctx<block_start>repository=ctx.create_repository(tool_name='Git')<line_sep>commit=ctx.service.get_change(repository commit_sha)<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'commit/1c44b461cebe5874a857c51a4a13a849a4d1e52d'<concat>'?fields=author.raw%2Chash%2Cdate%2Cmessage%2Cparents.hash'))<line_sep>ctx.assertHTTPCall(1 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'diff/1c44b461cebe5874a857c51a4a13a849a4d1e52d'))<line_sep>self.assertEqual(commit Commit(author_name='Some User <<EMAIL>>' date='2017-01-24T13:11:22+00:00' id=commit_sha message='This is a message.' parent=parent_sha))<line_sep>self.assertEqual(commit.diff b'This is a test \xc7.\n')<block_end><def_stmt>_test_get_file self tool_name revision base_commit_id expected_revision<block_start>"""Test file fetching. Args: tool_name (unicode): The name of the SCM Tool to test with. revision (unicode, optional): The revision to check. base_commit_id (unicode, optional): The base commit to fetch against. expected_revision (unicode, optional): The revision expected in the payload. """<with_stmt>self.setup_http_test(payload=b'My data' expected_http_calls=1)<as>ctx<block_start>repository=ctx.create_repository(tool_name=tool_name)<line_sep>result=ctx.service.get_file(repository=repository path='path' revision=revision base_commit_id=base_commit_id)<block_end>ctx.assertHTTPCall(0 url=('https://bitbucket.org/api/2.0/repositories/myuser/myrepo/'<concat>'src/%s/path'%expected_revision))<line_sep>self.assertIsInstance(result bytes)<line_sep>self.assertEqual(result b'My data')<block_end><def_stmt>_test_get_file_exists self tool_name revision base_commit_id expected_revision expected_found expected_http_called=<true><block_start>"""Test file existence checks. Args: tool_name (unicode): The name of the SCM Tool to test with. revision (unicode, optional): The revision to check. base_commit_id (unicode, optional): The base commit to fetch against. expected_revision (unicode, optional): The revision expected in the payload. expected_found (bool, optional): Whether a truthy response should be expected. expected_http_called (bool, optional): Whether an HTTP request is expected to have been made. """<if_stmt>expected_found<block_start>payload=b'file...'<line_sep>status_code=<none><block_end><else_stmt><block_start>payload=<none><line_sep>status_code=404<block_end><if_stmt>expected_http_called<block_start>expected_calls=1<block_end><else_stmt><block_start>expected_calls=0<block_end><with_stmt>self.setup_http_test(payload=payload status_code=status_code expected_http_calls=expected_calls)<as>ctx<block_start>repository=ctx.create_repository(tool_name=tool_name)<line_sep>result=ctx.service.get_file_exists(repository=repository path='path' revision=revision base_commit_id=base_commit_id)<block_end><if_stmt>expected_http_called<block_start>ctx.assertHTTPCall(0 method='HEAD' url=('https://bitbucket.org/api/2.0/repositories/myuser/'<concat>'myrepo/src/%s/path'%expected_revision))<block_end>self.assertEqual(result expected_found)<block_end><block_end><class_stmt>BitbucketAuthFormTests(BitbucketTestCase)<block_start>"""Unit tests for BitbucketAuthForm."""<def_stmt>test_clean_hosting_account_username_with_username self<block_start>"""Testing BitbucketAuthForm.clean_hosting_account_username with username """<line_sep>form=BitbucketAuthForm(hosting_service_cls=self.service_class data={'hosting_account_username':'myuser' 'hosting_account_password':'<PASSWORD>' })<line_sep>self.assertTrue(form.is_valid())<block_end><def_stmt>test_clean_hosting_account_username_with_email self<block_start>"""Testing BitbucketAuthForm.clean_hosting_account_username with e-mail address """<line_sep>form=BitbucketAuthForm(hosting_service_cls=self.service_class data={'hosting_account_username':'<EMAIL>' 'hosting_account_password':'<PASSWORD>' })<line_sep>self.assertFalse(form.is_valid())<line_sep>self.assertEqual(form.errors['hosting_account_username'] ['This must be your Bitbucket username (the same one '<concat>'you would see in URLs for your own repositories), '<concat>'not your Atlassian e-mail address.'])<block_end><block_end><class_stmt>CloseSubmittedHookTests(BitbucketTestCase)<block_start>"""Unit tests for the Bitbucket close-submitted webhook."""<line_sep>fixtures=['test_users' 'test_scmtools']<line_sep>COMMITS_URL=('/api/2.0/repositories/test/test/commits'<concat>'?exclude=abc123&include=def123')<def_stmt>test_close_submitted_hook self<block_start>"""Testing BitBucket close_submitted hook"""<line_sep>self._test_post_commit_hook()<block_end>@add_fixtures(['test_site'])<def_stmt>test_close_submitted_hook_with_local_site self<block_start>"""Testing BitBucket close_submitted hook with a Local Site"""<line_sep>self._test_post_commit_hook(LocalSite.objects.get(name=self.local_site_name))<block_end><def_stmt>test_close_submitted_hook_with_truncated_commits self<block_start>"""Testing BitBucket close_submitted hook with truncated list of commits """<line_sep>account=self.create_hosting_account()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep># Create two review requests: One per referenced commit. review_request1=self.create_review_request(id=99 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>review_request2=self.create_review_request(id=100 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request2.status review_request2.PENDING_REVIEW)<line_sep>page2_url='%s&page=2&pagelen=100'%self.COMMITS_URL<line_sep>paths={'%s&pagelen=100'%self.COMMITS_URL:{'payload':self.dump_json({'next':page2_url 'values':[{'hash':'1c44b461cebe5874a857c51a4a13a84'<concat>'9a4d1e52d' 'message':'This is my fancy commit.\n'<concat>'\n'<concat>'Reviewed at http://example.com%s'%review_request1.get_absolute_url() } ] }) } page2_url:{'payload':self.dump_json({'values':[{'hash':'9fad89712ebe5874a857c5112a3c9d1'<concat>'87ada0dbc' 'message':'This is another commit\n'<concat>'\n'<concat>'Reviewed at http://example.com%s'%review_request2.get_absolute_url() } ] }) }}<line_sep># Simulate the webhook. url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<with_stmt>self.setup_http_test(self.make_handler_for_paths(paths) expected_http_calls=2)<block_start>self._post_commit_hook_payload(post_url=url review_request_url=review_request1.get_absolute_url() truncated=<true>)<block_end># Check the first review request. # # The first review request has an entry in the truncated list and the # fetched list. We'll make sure we've only processed it once. review_request1=ReviewRequest.objects.get(pk=review_request1.pk)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.SUBMITTED)<line_sep>self.assertEqual(review_request1.changedescs.count() 1)<line_sep>changedesc=review_request1.changedescs.get()<line_sep>self.assertEqual(changedesc.text 'Pushed to master (1c44b46)')<line_sep># Check the first review request. review_request2=ReviewRequest.objects.get(pk=review_request2.pk)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request2.status review_request2.SUBMITTED)<line_sep>self.assertEqual(review_request2.changedescs.count() 1)<line_sep>changedesc=review_request2.changedescs.get()<line_sep>self.assertEqual(changedesc.text 'Pushed to master (9fad897)')<block_end><def_stmt>test_close_submitted_hook_with_truncated_commits_limits self<block_start>"""Testing BitBucket close_submitted hook with truncated list of commits obeys limits """<line_sep>paths={'%s&pagelen=100'%self.COMMITS_URL:{'payload':self.dump_json({'next':'%s&page=2'%self.COMMITS_URL 'values':[] }) } }<line_sep>paths.update({'%s&page=%s&pagelen=100'%(self.COMMITS_URL i):{'payload':self.dump_json({'next':'%s&page=%s'%(self.COMMITS_URL i+1) 'values':[] }) }<for>i range(1 10)})<line_sep>account=self.create_hosting_account()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep># Create two review requests: One per referenced commit. review_request1=self.create_review_request(id=99 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>review_request2=self.create_review_request(id=100 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request2.status review_request2.PENDING_REVIEW)<line_sep># Simulate the webhook. url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep># There should have been 5 API requests. We'll never hit the final # page. <with_stmt>self.setup_http_test(self.make_handler_for_paths(paths) expected_http_calls=5)<block_start>self._post_commit_hook_payload(post_url=url review_request_url=review_request1.get_absolute_url() truncated=<true>)<block_end># The review requests should not have been updated. review_request1=ReviewRequest.objects.get(pk=review_request1.pk)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>self.assertEqual(review_request1.changedescs.count() 0)<line_sep># Check the first review request. review_request2=ReviewRequest.objects.get(pk=review_request2.pk)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>self.assertEqual(review_request2.changedescs.count() 0)<block_end><def_stmt>test_close_submitted_hook_with_truncated_and_auth_error self<block_start>"""Testing BitBucket close_submitted hook with truncated list of commits and authentication error talking to Bitbucket """<line_sep>account=self.create_hosting_account()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep># Create two review requests: One per referenced commit. review_request1=self.create_review_request(id=99 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>review_request2=self.create_review_request(id=100 repository=repository publish=<true>)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request2.status review_request2.PENDING_REVIEW)<line_sep># Simulate the webhook. url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<with_stmt>self.setup_http_test(status_code=401 hosting_account=account expected_http_calls=1)<block_start>response=self._post_commit_hook_payload(post_url=url review_request_url=review_request1.get_absolute_url() truncated=<true>)<block_end>self.assertEqual(response.status_code 403)<line_sep>self.assertEqual(response.content b'Incorrect username or password configured for '<concat>b'this repository on Review Board.')<line_sep># The review requests should not have been updated. review_request1=ReviewRequest.objects.get(pk=review_request1.pk)<line_sep>self.assertTrue(review_request1.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>self.assertEqual(review_request1.changedescs.count() 0)<line_sep># Check the first review request. review_request2=ReviewRequest.objects.get(pk=review_request2.pk)<line_sep>self.assertTrue(review_request2.public)<line_sep>self.assertEqual(review_request1.status review_request1.PENDING_REVIEW)<line_sep>self.assertEqual(review_request2.changedescs.count() 0)<block_end><def_stmt>test_close_submitted_hook_with_invalid_repo self<block_start>"""Testing BitBucket close_submitted hook with invalid repository"""<line_sep>repository=self.create_repository()<line_sep>review_request=self.create_review_request(repository=repository publish=<true>)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep>response=self._post_commit_hook_payload(post_url=url review_request_url=review_request.get_absolute_url())<line_sep>self.assertEqual(response.status_code 404)<line_sep>review_request=ReviewRequest.objects.get(pk=review_request.pk)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>self.assertEqual(review_request.changedescs.count() 0)<block_end>@add_fixtures(['test_site'])<def_stmt>test_close_submitted_hook_with_invalid_site self<block_start>"""Testing BitBucket close_submitted hook with invalid Local Site"""<line_sep>local_site=LocalSite.objects.get(name=self.local_site_name)<line_sep>account=self.create_hosting_account(local_site=local_site)<line_sep>account.save()<line_sep>repository=self.create_repository(hosting_account=account local_site=local_site)<line_sep>review_request=self.create_review_request(repository=repository publish=<true>)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>url=local_site_reverse('bitbucket-hooks-close-submitted' local_site_name='badsite' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep>response=self._post_commit_hook_payload(post_url=url review_request_url=review_request.get_absolute_url())<line_sep>self.assertEqual(response.status_code 404)<line_sep>review_request=ReviewRequest.objects.get(pk=review_request.pk)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>self.assertEqual(review_request.changedescs.count() 0)<block_end><def_stmt>test_close_submitted_hook_with_invalid_service_id self<block_start>"""Testing BitBucket close_submitted hook with invalid hosting service ID """<line_sep># We'll test against GitHub for this test. account=self.create_hosting_account()<line_sep>account.service_name='github'<line_sep>account.save()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep>review_request=self.create_review_request(repository=repository publish=<true>)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep>response=self._post_commit_hook_payload(post_url=url review_request_url=review_request.get_absolute_url())<line_sep>self.assertEqual(response.status_code 404)<line_sep>review_request=ReviewRequest.objects.get(pk=review_request.pk)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>self.assertEqual(review_request.changedescs.count() 0)<block_end><def_stmt>test_close_submitted_hook_with_invalid_review_request self<block_start>"""Testing BitBucket close_submitted hook with invalid review request """<line_sep>self.spy_on(logging.error)<line_sep>account=self.create_hosting_account()<line_sep>repository=self.create_repository(hosting_account=account)<line_sep>review_request=self.create_review_request(repository=repository publish=<true>)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>url=local_site_reverse('bitbucket-hooks-close-submitted' kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep>response=self._post_commit_hook_payload(post_url=url review_request_url='/r/9999/')<line_sep>self.assertEqual(response.status_code 200)<line_sep>review_request=ReviewRequest.objects.get(pk=review_request.pk)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>self.assertEqual(review_request.changedescs.count() 0)<line_sep>self.assertTrue(logging.error.called_with('close_all_review_requests: Review request #%s does not exist.' 9999))<block_end><def_stmt>_test_post_commit_hook self local_site=<none><block_start>"""Testing posting to a commit hook. This will simulate pushing a commit and posting the resulting webhook payload from Bitbucket to the handler for the hook. Args: local_site (reviewboard.site.models.LocalSite, optional): The Local Site owning the review request. """<line_sep>account=self.create_hosting_account(local_site=local_site)<line_sep>repository=self.create_repository(hosting_account=account local_site=local_site)<line_sep>review_request=self.create_review_request(repository=repository local_site=local_site publish=<true>)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.PENDING_REVIEW)<line_sep>url=local_site_reverse('bitbucket-hooks-close-submitted' local_site=local_site kwargs={'repository_id':repository.pk 'hosting_service_id':'bitbucket' 'hooks_uuid':repository.get_or_create_hooks_uuid() })<line_sep>self._post_commit_hook_payload(post_url=url review_request_url=review_request.get_absolute_url())<line_sep>review_request=ReviewRequest.objects.get(pk=review_request.pk)<line_sep>self.assertTrue(review_request.public)<line_sep>self.assertEqual(review_request.status review_request.SUBMITTED)<line_sep>self.assertEqual(review_request.changedescs.count() 1)<line_sep>changedesc=review_request.changedescs.get()<line_sep>self.assertEqual(changedesc.text 'Pushed to master (1c44b46)')<block_end><def_stmt>_post_commit_hook_payload self post_url review_request_url truncated=<false><block_start>"""Post a payload for a hook for testing. Args: post_url (unicode): The URL to post to. review_request_url (unicode): The URL of the review request being represented in the payload. truncated (bool, optional): Whether the commit list should be marked truncated. Results: django.core.handlers.request.wsgi.WSGIRequest: The post request. """<line_sep><return>self.client.post(post_url content_type='application/json' data=self.dump_json({# NOTE: This payload only contains the content we make # use of in the hook. 'push':{'changes':[{'new':{'type':'branch' 'name':'master' } 'truncated':truncated 'commits':[{'hash':'1c44b461cebe5874a857c51a4a13a84'<concat>'9a4d1e52d' 'message':'This is my fancy commit\n'<concat>'\n'<concat>'Reviewed at http://example.com'<concat>'%s'%review_request_url } ] 'links':{'commits':{'href':self.COMMITS_URL } } } # Some entries containing missing keys. {'new':{'type':'frobblegobble' 'name':'master' } 'truncated':truncated 'commits':[{'hash':'1c44b461cebe5874a857c51a4a13a84'<concat>'9a4d1e52d' 'message':'This is my fancy commit\n'<concat>'\n'<concat>'Reviewed at http://example.com'<concat>'%s'%review_request_url } ] 'links':{'commits':{'href':self.COMMITS_URL } } } {'new':{'type':'branch' 'name':'other' } 'truncated':truncated 'commits':[{'hash':'f46a13a1cc43bebea857c558741a484'<concat>'1e52d9a4d' 'message':'Ignored commit.'} ] 'links':{} } {'new':{} 'commits':[] } {'new':<none> 'commits':<none> } {}] }} for_response=<false>))<block_end><block_end>
<import_from_stmt>aleph.core db<import_from_stmt>aleph.views.util validate<import_from_stmt>aleph.tests.util TestCase<class_stmt>GroupsApiTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>super(GroupsApiTestCase self).setUp()<line_sep>self.role=self.create_user(foreign_id="user_1")<line_sep>self.create_group("group_1" self.role)<line_sep>self.create_group("group_2" self.role)<line_sep>self.other=self.create_user(foreign_id="other")<line_sep>db.session.commit()<block_end><def_stmt>test_index self<block_start>res=self.client.get("/api/2/groups")<assert_stmt>res.status_code<eq>403 res<line_sep>_,headers=self.login(foreign_id="user_1")<line_sep>res=self.client.get("/api/2/groups" headers=headers)<assert_stmt>res.status_code<eq>200 res<assert_stmt>res.json["total"]<eq>2 res.json<line_sep>validate(res.json["results"][0] "Role")<line_sep>_,headers=self.login(foreign_id="other")<line_sep>res=self.client.get("/api/2/groups" headers=headers)<assert_stmt>res.status_code<eq>200 res<assert_stmt>res.json["total"]<eq>0 res.json<block_end><block_end>
<import_from_stmt>collections defaultdict Counter<import_stmt>codecs<import_stmt>time<import_stmt>random<import_stmt>dynet<as>dy<import_stmt>numpy<as>np<import_from_stmt>tree Tree<def_stmt>read_dataset filename<block_start><return>[Tree.from_sexpr(line.strip())<for>line codecs.open(filename "r")]<block_end><def_stmt>get_vocabs trees<block_start>label_vocab=Counter()<line_sep>word_vocab=Counter()<for_stmt>tree trees<block_start>label_vocab.update([n.label<for>n tree.nonterms()])<line_sep>word_vocab.update([l.label<for>l tree.leaves()])<block_end>labels=[x<for>x,c label_vocab.iteritems()<if>c<g>0]<line_sep>words=["_UNK_"]+[x<for>x,c word_vocab.iteritems()<if>c<g>0]<line_sep>l2i={l:i<for>i,l enumerate(labels)}<line_sep>w2i={w:i<for>i,w enumerate(words)}<line_sep><return>l2i w2i labels words<block_end>train=read_dataset("../data/parsing/trees/train.txt")<line_sep>dev=read_dataset("../data/parsing/trees/dev.txt")<line_sep>l2i,w2i,i2l,i2w=get_vocabs(train)<line_sep>ntags=len(l2i)<line_sep>nwords=len(w2i)<line_sep># Socher-style Tree RNN <class_stmt>TreeRNNBuilder(object)<block_start><def_stmt>__init__ self model word_vocab hdim<block_start>self.W=model.add_parameters((hdim 2<times>hdim))<line_sep>self.E=model.add_lookup_parameters((len(word_vocab) hdim))<line_sep>self.w2i=word_vocab<block_end><def_stmt>expr_for_tree self tree<block_start><if_stmt>tree.isleaf()<block_start><return>self.E[self.w2i.get(tree.label 0)]<block_end><if_stmt>len(tree.children)<eq>1<block_start><assert_stmt>(tree.children[0].isleaf())<line_sep>expr=self.expr_for_tree(tree.children[0])<line_sep><return>expr<block_end><assert_stmt>(len(tree.children)<eq>2) tree.children[0]<line_sep>e1=self.expr_for_tree(tree.children[0])<line_sep>e2=self.expr_for_tree(tree.children[1])<line_sep>W=dy.parameter(self.W)<line_sep>expr=dy.tanh(W<times>dy.concatenate([e1 e2]))<line_sep><return>expr<block_end><block_end># Tai-style Tree LSTM <class_stmt>TreeLSTMBuilder(object)<block_start><def_stmt>__init__ self model word_vocab wdim hdim<block_start>self.WS=[model.add_parameters((hdim wdim))<for>_ "iou"]<line_sep>self.US=[model.add_parameters((hdim 2<times>hdim))<for>_ "iou"]<line_sep>self.UFS=[model.add_parameters((hdim hdim))<for>_ "ff"]<line_sep>self.BS=[model.add_parameters(hdim)<for>_ "iouf"]<line_sep>self.E=model.add_lookup_parameters((len(word_vocab) wdim))<line_sep>self.w2i=word_vocab<block_end><def_stmt>expr_for_tree self tree<block_start><if_stmt>tree.isleaf()<block_start><return>self.E[self.w2i.get(tree.label 0)]<block_end><if_stmt>len(tree.children)<eq>1<block_start><assert_stmt>(tree.children[0].isleaf())<line_sep>emb=self.expr_for_tree(tree.children[0])<line_sep>Wi,Wo,Wu=[dy.parameter(w)<for>w self.WS]<line_sep>bi,bo,bu,_=[dy.parameter(b)<for>b self.BS]<line_sep>i=dy.logistic(Wi<times>emb+bi)<line_sep>o=dy.logistic(Wo<times>emb+bo)<line_sep>u=dy.tanh(Wu<times>emb+bu)<line_sep>c=dy.cmult(i u)<line_sep>expr=dy.cmult(o dy.tanh(c))<line_sep><return>expr<block_end><assert_stmt>(len(tree.children)<eq>2) tree.children[0]<line_sep>e1=self.expr_for_tree(tree.children[0])<line_sep>e2=self.expr_for_tree(tree.children[1])<line_sep>Ui,Uo,Uu=[dy.parameter(u)<for>u self.US]<line_sep>Uf1,Uf2=[dy.parameter(u)<for>u self.UFS]<line_sep>bi,bo,bu,bf=[dy.parameter(b)<for>b self.BS]<line_sep>e=dy.concatenate([e1 e2])<line_sep>i=dy.logistic(Ui<times>e+bi)<line_sep>o=dy.logistic(Uo<times>e+bo)<line_sep>f1=dy.logistic(Uf1<times>e1+bf)<line_sep>f2=dy.logistic(Uf2<times>e2+bf)<line_sep>u=dy.tanh(Uu<times>e+bu)<line_sep>c=dy.cmult(i u)+dy.cmult(f1 e1)+dy.cmult(f2 e2)<line_sep>h=dy.cmult(o dy.tanh(c))<line_sep>expr=h<line_sep><return>expr<block_end><block_end># Start DyNet and define trainer model=dy.Model()<line_sep>trainer=dy.AdamTrainer(model)<line_sep># Define the model EMB_SIZE=64<line_sep>HID_SIZE=64<line_sep># builder = TreeRNNBuilder(model, w2i, HID_SIZE) builder=TreeLSTMBuilder(model w2i HID_SIZE EMB_SIZE)<line_sep>W_sm=model.add_parameters((ntags HID_SIZE))# Softmax weights b_sm=model.add_parameters((ntags))# Softmax bias # A function to calculate scores for one value <def_stmt>calc_scores tree<block_start>dy.renew_cg()<line_sep>emb=builder.expr_for_tree(tree)<line_sep>W_sm_exp=dy.parameter(W_sm)<line_sep>b_sm_exp=dy.parameter(b_sm)<line_sep><return>W_sm_exp<times>emb+b_sm_exp<block_end><for_stmt>ITER range(100)# Perform training <block_start>random.shuffle(train)<line_sep>train_loss=0.0<line_sep>start=time.time()<for_stmt>tree train<block_start>my_loss=dy.hinge(calc_scores(tree) l2i[tree.label])<line_sep># my_loss = dy.pickneglogsoftmax(calc_scores(tree), l2i[tree.label]) train_loss<augadd>my_loss.value()<line_sep>my_loss.backward()<line_sep>trainer.update()<block_end>print("iter %r: train loss/sent=%.4f, time=%.2fs"%(ITER train_loss/len(train) time.time()-start))<line_sep># Perform testing test_correct=0.0<for_stmt>tree dev<block_start>scores=calc_scores(tree).npvalue()<line_sep>predict=np.argmax(scores)<if_stmt>predict<eq>l2i[tree.label]<block_start>test_correct<augadd>1<block_end><block_end>print("iter %r: test acc=%.4f"%(ITER test_correct/len(dev)))<block_end>
""" testing compound fields list and dict """<import_from_future_stmt> unicode_literals<import_from_stmt>django.test TestCase<import_from_stmt>mongoengine Document fields<import_from_stmt>rest_framework_mongoengine.serializers DocumentSerializer<import_from_stmt>.models DumbEmbedded<import_from_stmt>.utils dedent<class_stmt>BasicCompoundDoc(Document)<block_start>list_field=fields.ListField()<line_sep>int_list_field=fields.ListField(fields.IntField())<line_sep>dict_field=fields.DictField()<line_sep>int_dict_field=fields.DictField(field=fields.IntField())<line_sep>int_map_field=fields.MapField(fields.IntField())<block_end><class_stmt>OptionsCompoundDoc(Document)<block_start>int_list_field=fields.ListField(fields.IntField(min_value=3 max_value=7))<block_end><class_stmt>NestedCompoundDoc(Document)<block_start>dict_list_field=fields.ListField(fields.DictField())<line_sep>list_dict_field=fields.MapField(fields.ListField())<line_sep>list_dict_list_field=fields.ListField(fields.MapField(fields.ListField()))<block_end><class_stmt>TestCompundFieldMappings(TestCase)<block_start>maxDiff=10000<def_stmt>test_basic self<block_start><class_stmt>TestSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=BasicCompoundDoc<line_sep>fields='__all__'<block_end><block_end>expected=dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) list_field = ListField(required=False) int_list_field = ListField(child=IntegerField(required=False), required=False) dict_field = DictField(required=False) int_dict_field = DictField(child=IntegerField(required=False), required=False) int_map_field = DictField(child=IntegerField(required=False), required=False) """)<assert_stmt>repr(TestSerializer())<eq>expected<block_end><def_stmt>test_suboptions self<block_start><class_stmt>TestSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=OptionsCompoundDoc<line_sep>fields='__all__'<block_end><block_end>expected=dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) int_list_field = ListField(child=IntegerField(max_value=7, min_value=3, required=False), required=False) """)<assert_stmt>repr(TestSerializer())<eq>expected<block_end><def_stmt>test_nested self<block_start><class_stmt>TestSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=NestedCompoundDoc<line_sep>fields='__all__'<block_end><block_end>expected=dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) dict_list_field = ListField(child=DictField(required=False), required=False) list_dict_field = DictField(child=ListField(required=False), required=False) list_dict_list_field = ListField(child=DictField(child=ListField(required=False), required=False), required=False) """)<assert_stmt>repr(TestSerializer())<eq>expected<block_end><block_end><class_stmt>TestSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=BasicCompoundDoc<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestIntegration(TestCase)<block_start><def_stmt>doCleanups self<block_start>BasicCompoundDoc.drop_collection()<block_end><def_stmt>test_parsing self<block_start>input_data={'list_field':["1" 2 3.0] 'int_list_field':[1 2 3] 'dict_field':{'a':"1" 'b':2 'c':3.0} 'int_dict_field':{'a':1 'b':2 'c':3} 'int_map_field':{'a':1 'b':2 'c':3}}<line_sep>serializer=TestSerializer(data=input_data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>expected={'list_field':["1" 2 3.0] 'int_list_field':[1 2 3] 'dict_field':{'a':"1" 'b':2 'c':3.0} 'int_dict_field':{'a':1 'b':2 'c':3} 'int_map_field':{'a':1 'b':2 'c':3}}<assert_stmt>serializer.validated_data<eq>expected<block_end><def_stmt>test_retrieval self<block_start>instance=BasicCompoundDoc.objects.create(list_field=["1" 2 3.0] int_list_field=[1 2 3] dict_field={'a':"1" 'b':2 'c':3.0} int_dict_field={'a':1 'b':2 'c':3} int_map_field={'a':1 'b':2 'c':3})<line_sep>serializer=TestSerializer(instance)<line_sep>expected={'id':str(instance.id) 'list_field':["1" 2 3.0] 'int_list_field':[1 2 3] 'dict_field':{'a':"1" 'b':2 'c':3.0} 'int_dict_field':{'a':1 'b':2 'c':3} 'int_map_field':{'a':1 'b':2 'c':3}}<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_create self<block_start>data={'list_field':["1" 2 3.0] 'int_list_field':[1 2 3] 'dict_field':{'a':"1" 'b':2 'c':3.0} 'int_dict_field':{'a':1 'b':2 'c':3} 'int_map_field':{'a':1 'b':2 'c':3}}<line_sep>serializer=TestSerializer(data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<assert_stmt>instance.list_field<eq>["1" 2 3.0]<assert_stmt>instance.int_list_field<eq>[1 2 3]<assert_stmt>instance.dict_field<eq>{'a':"1" 'b':2 'c':3.0}<assert_stmt>instance.int_dict_field<eq>{'a':1 'b':2 'c':3}<assert_stmt>instance.int_map_field<eq>{'a':1 'b':2 'c':3}<line_sep>expected={'id':str(instance.id) 'list_field':["1" 2 3.0] 'int_list_field':[1 2 3] 'dict_field':{'a':"1" 'b':2 'c':3.0} 'int_dict_field':{'a':1 'b':2 'c':3} 'int_map_field':{'a':1 'b':2 'c':3}}<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_update self<block_start>instance=BasicCompoundDoc.objects.create(list_field=["1" 2 3.0] int_list_field=[1 2 3] dict_field={'a':"1" 'b':2 'c':3.0} int_dict_field={'a':1 'b':2 'c':3} int_map_field={'a':1 'b':2 'c':3})<line_sep>data={'list_field':["0" 1 2.0] 'int_list_field':[9 1 2] 'dict_field':{'a':"0" 'b':1 'c':2.0 'd':3} 'int_dict_field':{'a':0 'b':1 'c':2 'd':3} 'int_map_field':{'a':0 'b':1 'c':2 'd':3}}<line_sep>serializer=TestSerializer(instance data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<assert_stmt>instance.list_field<eq>["0" 1 2.0]<assert_stmt>instance.int_list_field<eq>[9 1 2]<assert_stmt>instance.dict_field<eq>{'a':"0" 'b':1 'c':2.0 'd':3}<assert_stmt>instance.int_dict_field<eq>{'a':0 'b':1 'c':2 'd':3}<assert_stmt>instance.int_map_field<eq>{'a':0 'b':1 'c':2 'd':3}<line_sep>expected={'id':str(instance.id) 'list_field':["0" 1 2.0] 'int_list_field':[9 1 2] 'dict_field':{'a':"0" 'b':1 'c':2.0 'd':3} 'int_dict_field':{'a':0 'b':1 'c':2 'd':3} 'int_map_field':{'a':0 'b':1 'c':2 'd':3}}<assert_stmt>serializer.data<eq>expected<block_end><block_end><class_stmt>ValidatingSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=OptionsCompoundDoc<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestCompoundValidation(TestCase)<block_start><def_stmt>test_validation_is_executed self<block_start>serializer=ValidatingSerializer(data={'int_list_field':[1 2 3]})<assert_stmt><not>serializer.is_valid()<assert_stmt>'int_list_field'<in>serializer.errors<block_end><def_stmt>test_validation_passing self<block_start>serializer=ValidatingSerializer(data={'int_list_field':[3 4 5]})<assert_stmt>serializer.is_valid() serializer.errors<block_end><block_end># Mongoengine's ListField has a specific meaning of required argument # Thus, we have to test that it's compatible with DRF's ListField <class_stmt>RequiredListDocument(Document)<block_start>required_list=fields.ListField(fields.StringField() required=<true>)<block_end><class_stmt>RequiredListSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=RequiredListDocument<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestRequriedList(TestCase)<block_start><def_stmt>doCleanups self<block_start>RequiredListDocument.drop_collection()<block_end><def_stmt>test_parsing self<block_start>input_data={'required_list':[]}<line_sep>serializer=RequiredListSerializer(data=input_data)<line_sep>serializer.is_valid()<assert_stmt>serializer.errors['required_list']<eq>[u'This list may not be empty.']<block_end><block_end># Check that ListField is allowed to be empty, if required=False <class_stmt>NonRequiredListDocument(Document)<block_start>non_required_list=fields.ListField(fields.StringField() required=<false>)<block_end><class_stmt>NonRequiredListSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=NonRequiredListDocument<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestNonRequiredList(TestCase)<block_start><def_stmt>doCleanups self<block_start>NonRequiredListDocument.drop_collection()<block_end><def_stmt>test_parsing self<block_start>input_data={'non_required_list':[]}<line_sep>serializer=NonRequiredListSerializer(data=input_data)<assert_stmt>serializer.is_valid()<block_end><block_end># Check that Compound fields work with DynamicField # So far implemented only for ListField, cause it's failing <class_stmt>CompoundsWithDynamicFieldDoc(Document)<block_start>list_field=fields.ListField(fields.DynamicField(null=<true>))<block_end><class_stmt>CompoundsWithDynamicFieldSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=CompoundsWithDynamicFieldDoc<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestCompoundsWithDynamicField(TestCase)<block_start><def_stmt>doCleanups self<block_start>CompoundsWithDynamicFieldDoc.drop_collection()<block_end><def_stmt>test_parsing self<block_start>input_data={'list_field':[<none> "1" 2 3.0]}<line_sep>serializer=CompoundsWithDynamicFieldSerializer(data=input_data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>expected={'list_field':[<none> "1" 2 3.0]}<assert_stmt>serializer.validated_data<eq>expected<block_end><def_stmt>test_retrieval self<block_start>instance=CompoundsWithDynamicFieldDoc.objects.create(list_field=[<none> "1" 2 3.0])<line_sep>serializer=CompoundsWithDynamicFieldSerializer(instance)<line_sep>expected={'id':str(instance.id) 'list_field':[<none> "1" 2 3.0]}<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_create self<block_start>data={'list_field':[<none> "1" 2 3.0]}<line_sep>serializer=CompoundsWithDynamicFieldSerializer(data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<assert_stmt>instance.list_field<eq>[<none> "1" 2 3.0]<line_sep>expected={'id':str(instance.id) 'list_field':[<none> "1" 2 3.0] }<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_update self<block_start>instance=BasicCompoundDoc.objects.create(list_field=[<none> "1" 2 3.0])<line_sep>data={'list_field':["0" 1 2.0 <none>]}<line_sep>serializer=CompoundsWithDynamicFieldSerializer(instance data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<assert_stmt>instance.list_field<eq>["0" 1 2.0 <none>]<line_sep>expected={'id':str(instance.id) 'list_field':["0" 1 2.0 <none>]}<assert_stmt>serializer.data<eq>expected<block_end><block_end><class_stmt>MapEmbeddedDoc(Document)<block_start>embedded_map_field=fields.MapField(fields.EmbeddedDocumentField(DumbEmbedded))<block_end><class_stmt>MapEmbeddedFieldSerializer(DocumentSerializer)<block_start><class_stmt>Meta<block_start>model=MapEmbeddedDoc<line_sep>fields='__all__'<block_end><block_end><class_stmt>TestMapFieldWithEmbeddedDocument(TestCase)<block_start><def_stmt>doCleanups self<block_start>MapEmbeddedDoc.drop_collection()<block_end><def_stmt>test_parsing self<block_start>input_data={"embedded_map_field":{"a":{"name":"spam" "foo":1} "b":{"name":"ham" "foo":2}} }<line_sep>serializer=MapEmbeddedFieldSerializer(data=input_data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>expected={"embedded_map_field":{"a":{"name":"spam" "foo":1} "b":{"name":"ham" "foo":2}} }<assert_stmt>serializer.validated_data<eq>expected<block_end><def_stmt>test_retrieval self<block_start>instance=MapEmbeddedDoc.objects.create(embedded_map_field={"a":DumbEmbedded(name="spam" foo=1) "b":DumbEmbedded(name="ham" foo=2)} )<line_sep>serializer=MapEmbeddedFieldSerializer(instance)<line_sep>expected={"id":str(instance.id) "embedded_map_field":{"a":{"name":"spam" "foo":1} "b":{"name":"ham" "foo":2}} }<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_create self<block_start>data={"embedded_map_field":{"a":{"name":"spam" "foo":1} "b":{"name":"ham" "foo":2}} }<line_sep>serializer=MapEmbeddedFieldSerializer(data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<line_sep>expected={"id":str(instance.id) "embedded_map_field":{"a":{"name":"spam" "foo":1} "b":{"name":"ham" "foo":2}} }<assert_stmt>serializer.data<eq>expected<block_end><def_stmt>test_update self<block_start>instance=MapEmbeddedDoc.objects.create(embedded_map_field={"a":DumbEmbedded(name="spam" foo=1) "b":DumbEmbedded(name="ham" foo=2)} )<line_sep>data={"embedded_map_field":{"a":{"name":"spam" "foo":3} "b":{"name":"ham" "foo":4}} }<line_sep>serializer=MapEmbeddedFieldSerializer(instance data=data)<assert_stmt>serializer.is_valid() serializer.errors<line_sep>instance=serializer.save()<line_sep>expected={"id":str(instance.id) "embedded_map_field":{"a":{"name":"spam" "foo":3} "b":{"name":"ham" "foo":4}} }<assert_stmt>serializer.data<eq>expected<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<import_stmt>random<class_stmt>PPOReplay(object)<block_start><def_stmt>__init__ self actor_critic clip_param ppo_epoch num_mini_batch value_loss_coef entropy_coef on_policy_epoch off_policy_epoch lr=<none> eps=<none> max_grad_norm=<none> amsgrad=<true> weight_decay=0.0 intrinsic_losses=<none> # list of loss key words intrinsic_loss_coef=0.0<block_start>self.actor_critic=actor_critic<line_sep>self.clip_param=clip_param<line_sep>self.ppo_epoch=ppo_epoch<line_sep>self.on_policy_epoch=on_policy_epoch<line_sep>self.off_policy_epoch=off_policy_epoch<line_sep>self.num_mini_batch=num_mini_batch<line_sep>self.value_loss_coef=value_loss_coef<line_sep>self.entropy_coef=entropy_coef<line_sep>self.intrinsic_loss_coef=intrinsic_loss_coef# TODO make this a list self.max_grad_norm=max_grad_norm<line_sep>self.optimizer=optim.Adam(actor_critic.parameters() lr=lr eps=eps weight_decay=weight_decay amsgrad=amsgrad)<line_sep>self.last_grad_norm=<none><line_sep>self.intrinsic_losses=intrinsic_losses<if>intrinsic_losses<is><not><none><else>[]<block_end><def_stmt>update self rollouts<block_start>value_loss_epoch=0<line_sep>action_loss_epoch=0<line_sep>dist_entropy_epoch=0<line_sep>max_importance_weight_epoch=0<line_sep>on_policy=[0]<times>self.on_policy_epoch<line_sep>off_policy=[1]<times>self.off_policy_epoch<line_sep>epochs=on_policy+off_policy<line_sep>random.shuffle(epochs)<line_sep>info={}<for_stmt>e epochs<block_start><if_stmt>e<eq>0<block_start>data_generator=rollouts.feed_forward_generator(<none> self.num_mini_batch on_policy=<true>)<block_end><else_stmt><block_start>data_generator=rollouts.feed_forward_generator(<none> self.num_mini_batch on_policy=<false>)<block_end><for_stmt>sample data_generator<block_start>observations_batch,states_batch,actions_batch,return_batch,masks_batch,old_action_log_probs_batch,adv_targ=sample<line_sep># Reshape to do in a single forward pass for all steps cache={}<line_sep>values,action_log_probs,dist_entropy,states=self.actor_critic.evaluate_actions(observations_batch states_batch masks_batch actions_batch cache)<line_sep>intrinsic_loss_dict=self.actor_critic.compute_intrinsic_losses(self.intrinsic_losses observations_batch states_batch masks_batch actions_batch cache)<line_sep>ratio=torch.exp(action_log_probs-old_action_log_probs_batch)<line_sep>surr1=ratio<times>adv_targ<line_sep>surr2=torch.clamp(ratio 1.0-self.clip_param 1.0+self.clip_param)<times>adv_targ<line_sep>action_loss=-torch.min(surr1 surr2).mean()<line_sep>value_loss=F.mse_loss(values return_batch)<line_sep>self.optimizer.zero_grad()<line_sep>total_loss=value_loss<times>self.value_loss_coef+action_loss-dist_entropy<times>self.entropy_coef<for_stmt>loss_name,loss_val intrinsic_loss_dict.items()<block_start>total_loss<augadd>loss_val<times>self.intrinsic_loss_coef<block_end>total_loss.backward()<line_sep>self.last_grad_norm=nn.utils.clip_grad_norm_(self.actor_critic.parameters() self.max_grad_norm)<line_sep>self.optimizer.step()<line_sep>value_loss_epoch<augadd>value_loss.item()<line_sep>action_loss_epoch<augadd>action_loss.item()<line_sep>dist_entropy_epoch<augadd>dist_entropy.item()<for_stmt>loss self.intrinsic_losses<block_start><try_stmt><block_start>info[loss]<augadd>intrinsic_loss_dict[loss].item()<block_end><except_stmt><block_start>info[loss]=intrinsic_loss_dict[loss].item()<block_end><block_end>max_importance_weight_epoch=max(torch.max(ratio).item() max_importance_weight_epoch)<block_end><block_end>num_updates=2<times>self.ppo_epoch<times>self.num_mini_batch# twice since on_policy and off_policy value_loss_epoch<augdiv>num_updates<line_sep>action_loss_epoch<augdiv>num_updates<line_sep>dist_entropy_epoch<augdiv>num_updates<for_stmt>loss self.intrinsic_losses<block_start>info[loss]<augdiv>num_updates<block_end><return>value_loss_epoch action_loss_epoch dist_entropy_epoch max_importance_weight_epoch info<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>os<import_stmt>tempfile<import_from_stmt>collections defaultdict<import_stmt>shutil<import_from_stmt>cosrlib.config config<import_from_stmt>cosrlib.url URL<import_from_stmt>. BaseDataProvider<class_stmt>DataProvider(BaseDataProvider)<block_start>""" Return the UT1 categories in which the URL belongs. https://dsi.ut-capitole.fr/blacklists/index_en.php """<line_sep>dump_testdata="tests/testdata/ut1_blacklists"<line_sep>dump_url="ftp://ftp.ut-capitole.fr/pub/reseau/cache/squidguard_contrib/blacklists.tar.gz"<line_sep>dump_batch_size=<none><def_stmt>iter_rows self<block_start><if_stmt>config["TESTDATA"]<eq>"1"<block_start>extract_dir=self.dump_testdata<line_sep>clean=<false><block_end><else_stmt><block_start>extract_dir=tempfile.mkdtemp(suffix="cosr-ut1-import")<line_sep>clean=<true><line_sep>os.system("curl %s > %s/blacklists.tar.gz"%(self.dump_url extract_dir))<line_sep>os.system("cd %s && tar zxf blacklists.tar.gz"%extract_dir)<line_sep>extract_dir<augadd>"/blacklists"<block_end>data=defaultdict(list)<for_stmt>fp os.listdir(extract_dir)<block_start>fullpath=os.path.join(extract_dir fp)<if_stmt>os.path.isdir(fullpath)<and><not>os.path.islink(fullpath)<block_start>cnt=0<with_stmt>open(fullpath+"/domains" 'r')<as>f<block_start><for_stmt>line f.readlines()<block_start>url=URL(line.strip()).normalized<if_stmt>url<block_start>data[url].append(fp)<line_sep>cnt<augadd>1<block_end><block_end><block_end><if_stmt>os.path.isfile(fullpath+"/urls")<block_start><with_stmt>open(fullpath+"/urls" 'r')<as>f<block_start><for_stmt>line f.readlines()<block_start>url=URL(line.strip()).normalized<if_stmt>url<block_start>data[url].append(fp)<line_sep>cnt<augadd>1<block_end><block_end><block_end><block_end>print("Done %s (%s entries)"%(fp cnt))<block_end><block_end><if_stmt>clean<block_start>shutil.rmtree(os.path.dirname(extract_dir))<block_end><for_stmt>key,value data.iteritems()<block_start><yield>key {"ut1_blacklist":value}<block_end><block_end><block_end>
<import_stmt>random<import_from_stmt>abc abstractmethod<import_stmt>torch.nn<as>nn<line_sep>__all__=['RandomModule' 'RandomChoice' 'RandomDepth']<class_stmt>RandomModule(nn.Module)<block_start>@abstractmethod<def_stmt>random_sample self<block_start><pass><block_end>@abstractmethod<def_stmt>clear_sample self<block_start><pass><block_end>@abstractmethod<def_stmt>manual_select self sample<block_start><pass><block_end><def_stmt>forward self *inputs<block_start><return>self.determinize()(*inputs)<block_end>@abstractmethod<def_stmt>determinize self<block_start><pass><block_end><block_end><class_stmt>RandomChoice(RandomModule)<block_start><def_stmt>__init__ self *choices<block_start>super().__init__()<line_sep>self.choices=nn.ModuleList(choices)<block_end><def_stmt>random_sample self<block_start>self.index=random.randint(0 len(self.choices)-1)<line_sep><return>self.index<block_end><def_stmt>clear_sample self<block_start>self.index=<none><block_end><def_stmt>manual_select self index<block_start>self.index=index<block_end><def_stmt>determinize self<block_start><return>self.choices[self.index]<block_end><block_end><class_stmt>RandomDepth(RandomModule)<block_start><def_stmt>__init__ self *layers depth_min=<none> depth_max=<none><block_start>super().__init__()<line_sep>self.layers=nn.ModuleList(layers)<line_sep>self.depth_min=depth_min<line_sep>self.depth_max=depth_max<block_end><def_stmt>random_sample self<block_start><if_stmt>self.depth_min<is><not><none><block_start>depth_min=self.depth_min<block_end><else_stmt><block_start>depth_min=0<block_end><if_stmt>self.depth_max<is><not><none><block_start>depth_max=self.depth_max<block_end><else_stmt><block_start>depth_max=len(self.layers)<block_end>self.depth=random.randint(depth_min depth_max)<line_sep><return>self.depth<block_end><def_stmt>clear_sample self<block_start>self.depth=<none><block_end><def_stmt>status self<block_start><return>self.depth<block_end><def_stmt>manual_select self depth<block_start>self.depth=depth<block_end># fixme: support tuples as input <def_stmt>forward self x<block_start><for_stmt>k range(self.depth)<block_start>x=self.layers[k](x)<block_end><return>x<block_end><def_stmt>determinize self<block_start><return>nn.Sequential(*self.layers[:self.depth])<block_end><block_end>
<import_from_stmt>.distributed_sampler ClassSpecificDistributedSampler DistributedSampler <line_sep>__all__=['DistributedSampler' 'ClassSpecificDistributedSampler']<line_sep>
<import_from_stmt>config.settings quotes_storage<line_sep>""" This file imports data read/write methods for a local storage depending on the user's choice. These methods are used in core.contract_store. """<if_stmt>quotes_storage<eq>'hdf5'<block_start><import_from_stmt>core.hdfstore read_symbol read_contract write_data drop_symbol<block_end>
<import_from_future_stmt> with_statement absolute_import<import_stmt>os.path<import_from_stmt>yaml load<try_stmt><block_start><import_from_stmt>yaml CLoader<as>Loader CDumper<as>Dumper<block_end><except_stmt>ImportError<block_start><import_from_stmt>yaml Loader Dumper<block_end><import_from_stmt>.errors ConfigurationFileInitialized ConfigurationFileNotFound<class_stmt>ConfigBase(object)<block_start><def_stmt>__init__ self config_file_path<block_start>self.options=load(open(config_file_path))<block_end><block_end><class_stmt>Config(ConfigBase)<block_start><def_stmt>__init__ self config_file_path generate_if_not_found=<true><block_start><if_stmt><not>os.path.isfile(config_file_path)<block_start><if_stmt>generate_if_not_found<block_start>self.reset_configfile(config_file_path)<block_end><if_stmt>os.path.isfile(config_file_path)<block_start><raise>ConfigurationFileInitialized("""No configuration file found. A new file has been initialized at: %s Please review the configuration and retry..."""%config_file_path)<block_end><else_stmt><block_start><raise>ConfigurationFileNotFound("cannot load config file %s"%config_file_path)<block_end><block_end>super(Config self).__init__(config_file_path)<block_end><def_stmt>reset_configfile self file_path<block_start><with_stmt>open(file_path 'w')<as>f<block_start>f.write(CONFIG_TEMPLATE)<block_end><block_end><block_end>CONFIG_TEMPLATE=""" # a socket connection will be selected if a 'socket' is specified # also 'localhost' is a special 'hostname' for MySQL that overrides the 'port' option # and forces it to use a local socket connection # if tcp is chosen, you can use compression mysql: hostname: localhost port: 3306 socket: /tmp/mysql.sock username: mysql2psql password: database: mysql2psql_test compress: false destination: # if file is given, output goes to file, else postgres file: postgres: hostname: localhost port: 5432 username: mysql2psql password: database: mysql2psql_test # if tables is given, only the listed tables will be converted. leave empty to convert all tables. #only_tables: #- table1 #- table2 # if exclude_tables is given, exclude the listed tables from the conversion. #exclude_tables: #- table3 #- table4 # if supress_data is true, only the schema definition will be exported/migrated, and not the data supress_data: false # if supress_ddl is true, only the data will be exported/imported, and not the schema supress_ddl: false # if force_truncate is true, forces a table truncate before table loading force_truncate: false # if timezone is true, forces to append/convert to UTC tzinfo mysql data timezone: false # if index_prefix is given, indexes will be created whith a name prefixed with index_prefix index_prefix: """<line_sep>
<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>mayan.apps.common.serializers ContentTypeSerializer<import_from_stmt>mayan.apps.rest_api serializers<import_from_stmt>mayan.apps.rest_api.relations MultiKwargHyperlinkedIdentityField<import_from_stmt>..models.document_version_models DocumentVersion<import_from_stmt>..models.document_version_page_models DocumentVersionPage<class_stmt>DocumentVersionPageSerializer(serializers.HyperlinkedModelSerializer)<block_start>content_type=ContentTypeSerializer(read_only=<true>)<line_sep>content_type_id=serializers.IntegerField(help_text=_('Content type ID of the source object for the page.') write_only=<true>)<line_sep>document_version_url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_version.document.pk' 'lookup_url_kwarg':'document_id' } {'lookup_field':'document_version_id' 'lookup_url_kwarg':'document_version_id' }) view_name='rest_api:documentversion-detail')<line_sep>image_url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_version.document.pk' 'lookup_url_kwarg':'document_id' } {'lookup_field':'document_version_id' 'lookup_url_kwarg':'document_version_id' } {'lookup_field':'pk' 'lookup_url_kwarg':'document_version_page_id' }) view_name='rest_api:documentversionpage-image')<line_sep>url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_version.document.pk' 'lookup_url_kwarg':'document_id' } {'lookup_field':'document_version_id' 'lookup_url_kwarg':'document_version_id' } {'lookup_field':'pk' 'lookup_url_kwarg':'document_version_page_id' }) view_name='rest_api:documentversionpage-detail')<class_stmt>Meta<block_start>fields=('content_type' 'content_type_id' 'document_version_id' 'document_version_url' 'id' 'image_url' 'object_id' 'page_number' 'url')<line_sep>model=DocumentVersionPage<line_sep>read_only_fields=('content_type' 'document_version_id' 'document_version_url' 'id' 'image_url' 'url')<block_end><block_end><class_stmt>DocumentVersionSerializer(serializers.HyperlinkedModelSerializer)<block_start>document_url=serializers.HyperlinkedIdentityField(lookup_field='document_id' lookup_url_kwarg='document_id' view_name='rest_api:document-detail')<line_sep>export_url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_id' 'lookup_url_kwarg':'document_id' } {'lookup_field':'pk' 'lookup_url_kwarg':'document_version_id' } ) view_name='rest_api:documentversion-export')<line_sep>page_list_url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_id' 'lookup_url_kwarg':'document_id' } {'lookup_field':'pk' 'lookup_url_kwarg':'document_version_id' } ) view_name='rest_api:documentversionpage-list')<line_sep>pages_first=DocumentVersionPageSerializer(many=<false> read_only=<true>)<line_sep>url=MultiKwargHyperlinkedIdentityField(view_kwargs=({'lookup_field':'document_id' 'lookup_url_kwarg':'document_id' } {'lookup_field':'pk' 'lookup_url_kwarg':'document_version_id' } ) view_name='rest_api:documentversion-detail')<class_stmt>Meta<block_start>fields=('active' 'comment' 'document_id' 'document_url' 'export_url' 'id' 'page_list_url' 'pages_first' 'timestamp' 'url')<line_sep>model=DocumentVersion<line_sep>read_only_fields=('document_id' 'document_url' 'export_url' 'id' 'page_list_url' 'pages_first' 'timestamp' 'url')<block_end><block_end>
"""Various utility functions for word and character n-gram extraction."""<import_from_stmt>collections Counter<import_from_stmt>typing List Tuple<def_stmt>extract_all_word_ngrams line:str min_order:int max_order:int<arrow>Tuple[Counter int]<block_start>"""Extracts all ngrams (min_order <= n <= max_order) from a sentence. :param line: A string sentence. :param min_order: Minimum n-gram order. :param max_order: Maximum n-gram order. :return: a Counter object with n-grams counts and the sequence length. """<line_sep>ngrams=[]<line_sep>tokens=line.split()<for_stmt>n range(min_order max_order+1)<block_start><for_stmt>i range(0 len(tokens)-n+1)<block_start>ngrams.append(tuple(tokens[i:i+n]))<block_end><block_end><return>Counter(ngrams) len(tokens)<block_end><def_stmt>extract_word_ngrams tokens:List[str] n:int<arrow>Counter<block_start>"""Extracts n-grams with order `n` from a list of tokens. :param tokens: A list of tokens. :param n: The order of n-grams. :return: a Counter object with n-grams counts. """<line_sep><return>Counter([' '.join(tokens[i:i+n])<for>i range(len(tokens)-n+1)])<block_end><def_stmt>extract_char_ngrams line:str n:int include_whitespace:bool=<false><arrow>Counter<block_start>"""Yields counts of character n-grams from a sentence. :param line: A segment containing a sequence of words. :param n: The order of the n-grams. :param include_whitespace: If given, will not strip whitespaces from the line. :return: a dictionary containing ngrams and counts """<if_stmt><not>include_whitespace<block_start>line=''.join(line.split())<block_end><return>Counter([line[i:i+n]<for>i range(len(line)-n+1)])<block_end><def_stmt>extract_all_char_ngrams line:str max_order:int include_whitespace:bool=<false><arrow>List[Counter]<block_start>"""Extracts all character n-grams at once for convenience. :param line: A segment containing a sequence of words. :param max_order: The maximum order of the n-grams. :param include_whitespace: If given, will not strip whitespaces from the line. :return: a list of Counter objects containing ngrams and counts. """<line_sep>counters=[]<if_stmt><not>include_whitespace<block_start>line=''.join(line.split())<block_end><for_stmt>n range(1 max_order+1)<block_start>ngrams=Counter([line[i:i+n]<for>i range(len(line)-n+1)])<line_sep>counters.append(ngrams)<block_end><return>counters<block_end>
<import_from_stmt>datetime datetime timedelta<class_stmt>DBStore<block_start><def_stmt>__init__ self db name="py4web_session"<block_start>self.__prerequisites__=[db]<line_sep>Field=db.Field<line_sep>self.db=db<if_stmt><not>name<in>db.tables<block_start>db.define_table(name Field("rkey" "string") Field("rvalue" "text") Field("expiration" "integer") Field("created_on" "datetime") Field("expires_on" "datetime") )<line_sep>db.commit()<block_end>self.table=db[name]<block_end><def_stmt>get self key<block_start>db,table,now=self.db self.table datetime.utcnow()<line_sep>row=db(table.rkey<eq>key).select().first()<if_stmt><not>row<block_start><return><none><block_end><if_stmt>row.expiration<block_start>row.update_record(expires_on=now+timedelta(row.expiration))<block_end><return>row.rvalue<block_end><def_stmt>set self key value expiration=<none><block_start>db,table,now=self.db self.table datetime.utcnow()<line_sep>db(table.expires_on<l>now).delete()<line_sep>row=db(table.rkey<eq>key).select().first()<line_sep>expires_on=(now+timedelta(expiration)<if>expiration<else>datetime(2999 12 31))<if_stmt>row<block_start>row.update_record(rvalue=value expires_on=expires_on expiration=expiration)<block_end><else_stmt><block_start>table.insert(rkey=key rvalue=value expires_on=expires_on expiration=expiration ceated_on=<none> )<block_end>db.commit()<block_end><block_end>
<import_from_stmt>typing Callable Dict Optional<import_stmt>uvicorn<import_from_stmt>fastapi FastAPI File UploadFile<import_from_stmt>chitra.__about__ documentation_url<import_from_stmt>chitra.serve schema<import_from_stmt>chitra.serve.base ModelServer<import_from_stmt>chitra.serve.constants IMAGE_CLF OBJECT_DETECTION QNA TXT_CLF<class_stmt>API(ModelServer)<block_start><def_stmt>__init__ self api_type:str model:Callable preprocess_fn:Optional[Callable]=<none> preprocess_conf:Optional[Dict]=<none> postprocess_fn:Optional[Callable]=<none> postprocess_conf:Optional[Dict]=<none> **kwargs <block_start>""" Creates FastAPI app for `api_type` Args: api_type: Type of the API. See `API.available_api_types()` model: Any ML/DL model preprocess_fn: Override Data Preprocessing Function, data will be processed with this function before calling model. postprocess_fn: Override Data Postprocessing Function, model output will be passed into this function. **kwargs: """<line_sep>super(API self).__init__(api_type model preprocess_fn postprocess_fn **kwargs)<line_sep>docs_url=kwargs.get("docs_url" "/docs")<line_sep>title=kwargs.get("title" "Chitra Model Server 🔥")<line_sep>desc=kwargs.get("description" f"<a href={documentation_url}>Goto Chitra Docs</a> 🔗" )<line_sep>self.app:FastAPI=FastAPI(title=title description=desc docs_url=docs_url)<if_stmt><not>preprocess_conf<block_start>preprocess_conf={}<block_end><if_stmt><not>postprocess_conf<block_start>postprocess_conf={}<block_end>self.preprocess_conf=preprocess_conf<line_sep>self.postprocess_conf=postprocess_conf<line_sep>self.setup(**kwargs)<block_end><async_keyword><def_stmt>predict_image self file:UploadFile=File(<ellipsis>)<block_start>preprocess_fn=self.data_processor.preprocess_fn<line_sep>postprocess_fn=self.data_processor.postprocess_fn<line_sep>x=preprocess_fn(<await>file.read())<line_sep>x=self.model(x)<line_sep>x=postprocess_fn(x)<line_sep><return>x<block_end><async_keyword><def_stmt>predict_text self data:schema.Query<block_start>data_processor=self.data_processor<line_sep>x=data.query<if_stmt>data_processor.preprocess_fn<block_start>x=data_processor.preprocess(x)<block_end>x=self.model(x)<if_stmt>data_processor.postprocess_fn<block_start>x=data_processor.postprocess(x)<block_end><return>x<block_end><async_keyword><def_stmt>predict_question_answer self data:schema.QnARequest<block_start>data_processor=self.data_processor<line_sep>x=data.query data.question<if_stmt>data_processor.preprocess_fn<block_start>x=data_processor.preprocess(x)<block_end>x=self.model(x)<if_stmt>data_processor.postprocess_fn<block_start>x=data_processor.postprocess(x)<block_end><return>x<block_end><def_stmt>setup self **_<block_start><if_stmt>self.api_type<in>(IMAGE_CLF OBJECT_DETECTION)<block_start>self.app.post("/api/predict-image")(self.predict_image)<block_end><elif_stmt>self.api_type<eq>TXT_CLF<block_start>self.app.post("/api/predict-text")(self.predict_text)<block_end><elif_stmt>self.api_type<eq>QNA<block_start>self.app.post("/api/QnA")(self.predict_question_answer)<block_end><block_end><def_stmt>run self<block_start>uvicorn.run(self.app)<block_end><block_end><def_stmt>create_api model:Callable api_type:str="IMAGE-CLASSIFICATION" preprocess_fn:Callable=<none> preprocess_conf:Optional[Dict]=<none> postprocess_fn:Callable=<none> postprocess_conf:Optional[Dict]=<none> run:bool=<false> **kwargs <arrow>API<block_start>""" Launch FastAPI app Args: model: Any ML/DL model api_type: Type of the API task, see `chitra.serve.get_available_api_types()` preprocess_fn: Override default preprocessing function preprocess_conf: Arguments for preprocessing function postprocess_fn: Override default postprocessing function postprocess_conf: Arguments for postprocessing function run: Set True to run the app **kwargs: Returns: Object of `chitra.serve.API` class """<line_sep>api=API(api_type model preprocess_fn=preprocess_fn preprocess_conf=preprocess_conf postprocess_fn=postprocess_fn postprocess_conf=postprocess_conf **kwargs )<if_stmt>run<block_start>api.run()<block_end><return>api<block_end>
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved. """ english letter probabilities table from http://en.algoritmy.net/article/40379/Letter-frequency-English """<line_sep>english_letter_probs_percent=[['a' 8.167] ['b' 1.492] ['c' 2.782] ['d' 4.253] ['e' 12.702] ['f' 2.228] ['g' 2.015] ['h' 6.094] ['i' 6.966] ['j' 0.153] ['k' 0.772] ['l' 4.025] ['m' 2.406] ['n' 6.749] ['o' 7.507] ['p' 1.929] ['q' 0.095] ['r' 5.987] ['s' 6.327] ['t' 9.056] ['u' 2.758] ['v' 0.978] ['w' 2.360] ['x' 0.150] ['y' 1.974] ['z' 0.074]]<line_sep>english_letter_probs={lt:(per<times>0.01)<for>lt,per english_letter_probs_percent}<line_sep>""" Scrabble Scores table from https://en.wikipedia.org/wiki/Scrabble_letter_distributions """<line_sep>scrabble_dict={"a":1 "b":3 "c":3 "d":2 "e":1 "f":4 "g":2 "h":4 "i":1 "j":8 "k":5 "l":1 "m":3 "n":1 "o":1 "p":3 "q":10 "r":1 "s":1 "t":1 "u":1 "v":4 "w":4 "x":8 "y":4 "z":10}<line_sep>
# # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. # # Licensed under the MIT License. See the LICENSE accompanying this file # for the specific language governing permissions and limitations under # the License. # <import_stmt>watchdog<line_sep>MOUNT_FMT_LINE='{address}:/ {mountpoint} {fs_type} {options} 0 0'<line_sep>DEFAULT_OPTS='rw,port=12345'<def_stmt>_create_mount_file tmpdir lines<block_start>mount_file=tmpdir.join('mounts')<line_sep>mount_file.write('\n'.join(lines))<line_sep><return>str(mount_file)<block_end><def_stmt>test_no_mounts tmpdir<block_start>mount_file=_create_mount_file(tmpdir [])<line_sep>mounts=watchdog.get_current_local_nfs_mounts(mount_file)<assert_stmt>{}<eq>mounts<block_end><def_stmt>test_no_local_mounts tmpdir<block_start>mount_file=_create_mount_file(tmpdir [MOUNT_FMT_LINE.format(address='10.1.0.1' mountpoint='/mnt' fs_type='nfs4' options=DEFAULT_OPTS)])<line_sep>mounts=watchdog.get_current_local_nfs_mounts(mount_file)<assert_stmt>{}<eq>mounts<block_end><def_stmt>test_no_local_nfs_mounts tmpdir<block_start>mount_file=_create_mount_file(tmpdir [MOUNT_FMT_LINE.format(address='127.0.0.1' mountpoint='/mnt' fs_type='ext4' options=DEFAULT_OPTS)])<line_sep>mounts=watchdog.get_current_local_nfs_mounts(mount_file)<assert_stmt>{}<eq>mounts<block_end><def_stmt>test_local_nfs_mount tmpdir<block_start>mount_file=_create_mount_file(tmpdir [MOUNT_FMT_LINE.format(address='127.0.0.1' mountpoint='/mnt' fs_type='nfs4' options=DEFAULT_OPTS)])<line_sep>mounts=watchdog.get_current_local_nfs_mounts(mount_file)<assert_stmt>1<eq>len(mounts)<assert_stmt>'mnt.12345'<in>mounts<block_end><def_stmt>test_local_nfs_mount_noresvport tmpdir<block_start>mount_file=_create_mount_file(tmpdir [MOUNT_FMT_LINE.format(address='127.0.0.1' mountpoint='/mnt' fs_type='nfs4' options='rw,noresvport,port=12345')])<line_sep>mounts=watchdog.get_current_local_nfs_mounts(mount_file)<assert_stmt>1<eq>len(mounts)<assert_stmt>'mnt.12345'<in>mounts<block_end>
# Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>modelstore.models managers<import_from_stmt>modelstore.models.catboost CatBoostManager<import_from_stmt>modelstore.models.pytorch PyTorchManager<import_from_stmt>modelstore.models.pytorch_lightning PyTorchLightningManager<import_from_stmt>modelstore.models.sklearn SKLearnManager<import_from_stmt>modelstore.models.xgboost XGBoostManager<def_stmt>test_iter_libraries <block_start>mgrs={library:manager<for>library,manager managers.iter_libraries()}<assert_stmt>len(mgrs)<eq>16<assert_stmt>isinstance(mgrs["sklearn"] SKLearnManager)<assert_stmt>isinstance(mgrs["pytorch"] PyTorchManager)<assert_stmt>isinstance(mgrs["xgboost"] XGBoostManager)<assert_stmt>isinstance(mgrs["catboost"] CatBoostManager)<assert_stmt>isinstance(mgrs["pytorch_lightning"] PyTorchLightningManager)<block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>scipy.io<as>sio<import_from_stmt>PIL Image<import_from_stmt>deephar.utils *<def_stmt>load_mpii_mat_annotation filename<block_start>mat=sio.loadmat(filename)<line_sep>annot_tr=mat['annot_tr']<line_sep>annot_val=mat['annot_val']<line_sep># Respect the order of TEST (0), TRAIN (1), and VALID (2) rectidxs=[<none> annot_tr[0 :] annot_val[0 :]]<line_sep>images=[<none> annot_tr[1 :] annot_val[1 :]]<line_sep>annorect=[<none> annot_tr[2 :] annot_val[2 :]]<line_sep><return>rectidxs images annorect<block_end><def_stmt>serialize_annorect rectidxs annorect<block_start><assert_stmt>len(rectidxs)<eq>len(annorect)<line_sep>sample_list=[]<for_stmt>i range(len(rectidxs))<block_start>rec=rectidxs[i]<for_stmt>j range(rec.size)<block_start>idx=rec[j 0]-1# Convert idx from Matlab ann=annorect[i][idx 0]<line_sep>annot={}<line_sep>annot['head']=ann['head'][0 0][0]<line_sep>annot['objpos']=ann['objpos'][0 0][0]<line_sep>annot['scale']=ann['scale'][0 0][0 0]<line_sep>annot['pose']=ann['pose'][0 0]<line_sep>annot['imgidx']=i<line_sep>sample_list.append(annot)<block_end><block_end><return>sample_list<block_end><def_stmt>calc_head_size head_annot<block_start>head=np.array([float(head_annot[0]) float(head_annot[1]) float(head_annot[2]) float(head_annot[3])])<line_sep><return>0.6<times>np.linalg.norm(head[0:2]-head[2:4])<block_end><class_stmt>MpiiSinglePerson(object)<block_start>"""Implementation of the MPII dataset for single person. """<def_stmt>__init__ self dataset_path dataconf poselayout=pa16j2d remove_outer_joints=<true><block_start>self.dataset_path=dataset_path<line_sep>self.dataconf=dataconf<line_sep>self.poselayout=poselayout<line_sep>self.remove_outer_joints=remove_outer_joints<line_sep>self.load_annotations(os.path.join(dataset_path 'annotations.mat'))<block_end><def_stmt>load_annotations self filename<block_start><try_stmt><block_start>rectidxs,images,annorect=load_mpii_mat_annotation(filename)<line_sep>self.samples={}<line_sep>self.samples[TEST_MODE]=[]# No samples for test self.samples[TRAIN_MODE]=serialize_annorect(rectidxs[TRAIN_MODE] annorect[TRAIN_MODE])<line_sep>self.samples[VALID_MODE]=serialize_annorect(rectidxs[VALID_MODE] annorect[VALID_MODE])<line_sep>self.images=images<block_end><except_stmt><block_start>warning('Error loading the MPII dataset!')<line_sep><raise><block_end><block_end><def_stmt>load_image self key mode<block_start><try_stmt><block_start>annot=self.samples[mode][key]<line_sep>image=self.images[mode][annot['imgidx']][0]<line_sep>imgt=T(Image.open(os.path.join(self.dataset_path 'images' image)))<block_end><except_stmt><block_start>warning('Error loading sample key/mode: %d/%d'%(key mode))<line_sep><raise><block_end><return>imgt<block_end><def_stmt>get_data self key mode fast_crop=<false><block_start>output={}<if_stmt>mode<eq>TRAIN_MODE<block_start>dconf=self.dataconf.random_data_generator()<block_end><else_stmt><block_start>dconf=self.dataconf.get_fixed_config()<block_end>imgt=self.load_image(key mode)<line_sep>annot=self.samples[mode][key]<line_sep>scale=1.25<times>annot['scale']<line_sep>objpos=np.array([annot['objpos'][0] annot['objpos'][1]+12<times>scale])<line_sep>objpos<augadd>scale<times>np.array([dconf['transx'] dconf['transy']])<line_sep>winsize=200<times>dconf['scale']<times>scale<line_sep>winsize=(winsize winsize)<line_sep>output['bbox']=objposwin_to_bbox(objpos winsize)<if_stmt>fast_crop<block_start>"""Slightly faster method, but gives lower precision."""<line_sep>imgt.crop_resize_rotate(objpos winsize self.dataconf.crop_resolution dconf['angle'])<block_end><else_stmt><block_start>imgt.rotate_crop(dconf['angle'] objpos winsize)<line_sep>imgt.resize(self.dataconf.crop_resolution)<block_end><if_stmt>dconf['hflip']<eq>1<block_start>imgt.horizontal_flip()<block_end>imgt.normalize_affinemap()<line_sep>output['frame']=normalize_channels(imgt.asarray() channel_power=dconf['chpower'])<line_sep>p=np.empty((self.poselayout.num_joints self.poselayout.dim))<line_sep>p[:]=np.nan<line_sep>head=annot['head']<line_sep>p[self.poselayout.map_to_mpii 0:2]=transform_2d_points(imgt.afmat annot['pose'].T transpose=<true>)<if_stmt>imgt.hflip<block_start>p=p[self.poselayout.map_hflip :]<block_end># Set invalid joints and NaN values as an invalid value p[np.isnan(p)]=-1e9<line_sep>v=np.expand_dims(get_visible_joints(p[: 0:2]) axis=-1)<if_stmt>self.remove_outer_joints<block_start>p[(v<eq>0)[: 0] :]=-1e9<block_end>output['pose']=np.concatenate((p v) axis=-1)<line_sep>output['headsize']=calc_head_size(annot['head'])<line_sep>output['afmat']=imgt.afmat.copy()<line_sep><return>output<block_end><def_stmt>get_shape self dictkey<block_start><if_stmt>dictkey<eq>'frame'<block_start><return>self.dataconf.input_shape<block_end><if_stmt>dictkey<eq>'pose'<block_start><return>(self.poselayout.num_joints self.poselayout.dim+1)<block_end><if_stmt>dictkey<eq>'headsize'<block_start><return>(1 )<block_end><if_stmt>dictkey<eq>'afmat'<block_start><return>(3 3)<block_end><raise>Exception('Invalid dictkey on get_shape!')<block_end><def_stmt>get_length self mode<block_start><return>len(self.samples[mode])<block_end><block_end>
<import_stmt>asyncio<import_stmt>logging<import_stmt>math<import_stmt>time<import_from_stmt>decimal Decimal<import_from_stmt>typing Any AsyncIterable Dict List Optional<import_from_stmt>hummingbot.connector.exchange.digifinex digifinex_utils<import_from_stmt>hummingbot.connector.exchange.digifinex.digifinex_global DigifinexGlobal<import_from_stmt>hummingbot.connector.exchange.digifinex.digifinex_in_flight_order DigifinexInFlightOrder<import_from_stmt>hummingbot.connector.exchange.digifinex.digifinex_order_book_tracker DigifinexOrderBookTracker<import_from_stmt>hummingbot.connector.exchange.digifinex.digifinex_user_stream_tracker DigifinexUserStreamTracker<import_from_stmt>hummingbot.connector.exchange_base ExchangeBase<import_from_stmt>hummingbot.connector.trading_rule TradingRule<import_from_stmt>hummingbot.core.clock Clock<import_from_stmt>hummingbot.core.data_type.cancellation_result CancellationResult<import_from_stmt>hummingbot.core.data_type.common OpenOrder<import_from_stmt>hummingbot.core.data_type.limit_order LimitOrder<import_from_stmt>hummingbot.core.data_type.order_book OrderBook<import_from_stmt>hummingbot.core.data_type.trade_fee AddedToCostTradeFee<import_from_stmt>hummingbot.core.event.events BuyOrderCompletedEvent BuyOrderCreatedEvent MarketEvent MarketOrderFailureEvent OrderCancelledEvent OrderFilledEvent SellOrderCompletedEvent SellOrderCreatedEvent <import_from_stmt>hummingbot.core.data_type.common OrderType TradeType<import_from_stmt>hummingbot.core.network_iterator NetworkStatus<import_from_stmt>hummingbot.core.utils.async_utils safe_ensure_future safe_gather<import_from_stmt>hummingbot.core.utils.estimate_fee estimate_fee<import_from_stmt>hummingbot.logger HummingbotLogger<line_sep>ctce_logger=<none><line_sep>s_decimal_NaN=Decimal("nan")<class_stmt>DigifinexExchange(ExchangeBase)<block_start>""" DigifinexExchange connects with digifinex.com exchange and provides order book pricing, user account tracking and trading functionality. """<line_sep>API_CALL_TIMEOUT=10.0<line_sep>SHORT_POLL_INTERVAL=5.0<line_sep>UPDATE_ORDER_STATUS_MIN_INTERVAL=10.0<line_sep>LONG_POLL_INTERVAL=120.0<line_sep>@classmethod<def_stmt>logger cls<arrow>HummingbotLogger<block_start><global>ctce_logger<if_stmt>ctce_logger<is><none><block_start>ctce_logger=logging.getLogger(__name__)<block_end><return>ctce_logger<block_end><def_stmt>__init__ self digifinex_api_key:str digifinex_secret_key:str trading_pairs:Optional[List[str]]=<none> trading_required:bool=<true><block_start>""" :param key: The API key to connect to private digifinex.com APIs. :param secret: The API secret. :param trading_pairs: The market trading pairs which to track order book data. :param trading_required: Whether actual trading is needed. """<line_sep>super().__init__()<line_sep>self._trading_required=trading_required<line_sep>self._trading_pairs=trading_pairs<line_sep>self._global=DigifinexGlobal(digifinex_api_key digifinex_secret_key)<line_sep># self._rest_api = DigifinexRestApi(self._digifinex_auth, self._http_client) self._order_book_tracker=DigifinexOrderBookTracker(trading_pairs=trading_pairs)<line_sep>self._user_stream_tracker=DigifinexUserStreamTracker(self._global trading_pairs)<line_sep>self._ev_loop=asyncio.get_event_loop()<line_sep>self._poll_notifier=asyncio.Event()<line_sep>self._last_timestamp=0<line_sep>self._in_flight_orders:Dict[str DigifinexInFlightOrder]={}# Dict[client_order_id:str, DigifinexInFlightOrder] self._order_not_found_records={}# Dict[client_order_id:str, count:int] self._trading_rules={}# Dict[trading_pair:str, TradingRule] self._status_polling_task=<none><line_sep>self._user_stream_event_listener_task=<none><line_sep>self._trading_rules_polling_task=<none><line_sep>self._last_poll_timestamp=0<block_end>@property<def_stmt>name self<arrow>str<block_start><return>"digifinex"<block_end>@property<def_stmt>order_books self<arrow>Dict[str OrderBook]<block_start><return>self._order_book_tracker.order_books<block_end>@property<def_stmt>trading_rules self<arrow>Dict[str TradingRule]<block_start><return>self._trading_rules<block_end>@property<def_stmt>in_flight_orders self<arrow>Dict[str DigifinexInFlightOrder]<block_start><return>self._in_flight_orders<block_end>@property<def_stmt>status_dict self<arrow>Dict[str bool]<block_start>""" A dictionary of statuses of various connector's components. """<line_sep><return>{"order_books_initialized":self._order_book_tracker.ready "account_balance":len(self._account_balances)<g>0<if>self._trading_required<else><true> "trading_rule_initialized":len(self._trading_rules)<g>0 "user_stream_initialized":self._user_stream_tracker.data_source.last_recv_time<g>0<if>self._trading_required<else><true> }<block_end>@property<def_stmt>ready self<arrow>bool<block_start>""" :return True when all statuses pass, this might take 5-10 seconds for all the connector's components and services to be ready. """<line_sep><return>all(self.status_dict.values())<block_end>@property<def_stmt>limit_orders self<arrow>List[LimitOrder]<block_start><return>[in_flight_order.to_limit_order()<for>in_flight_order self._in_flight_orders.values()]<block_end>@property<def_stmt>tracking_states self<arrow>Dict[str any]<block_start>""" :return active in-flight orders in json format, is used to save in sqlite db. """<line_sep><return>{key:value.to_json()<for>key,value self._in_flight_orders.items()<if><not>value.is_done}<block_end><def_stmt>restore_tracking_states self saved_states:Dict[str any]<block_start>""" Restore in-flight orders from saved tracking states, this is st the connector can pick up on where it left off when it disconnects. :param saved_states: The saved tracking_states. """<line_sep>self._in_flight_orders.update({key:DigifinexInFlightOrder.from_json(value)<for>key,value saved_states.items()})<block_end><def_stmt>supported_order_types self<arrow>List[OrderType]<block_start>""" :return a list of OrderType supported by this connector. Note that Market order type is no longer required and will not be used. """<line_sep><return>[OrderType.LIMIT OrderType.LIMIT_MAKER]<block_end><def_stmt>start self clock:Clock timestamp:float<block_start>""" This function is called automatically by the clock. """<line_sep>super().start(clock timestamp)<block_end><def_stmt>stop self clock:Clock<block_start>""" This function is called automatically by the clock. """<line_sep>super().stop(clock)<block_end><async_keyword><def_stmt>start_network self<block_start>""" This function is required by NetworkIterator base class and is called automatically. It starts tracking order book, polling trading rules, updating statuses and tracking user data. """<line_sep>self._order_book_tracker.start()<line_sep>self._trading_rules_polling_task=safe_ensure_future(self._trading_rules_polling_loop())<if_stmt>self._trading_required<block_start>self._status_polling_task=safe_ensure_future(self._status_polling_loop())<line_sep>self._user_stream_tracker_task=safe_ensure_future(self._user_stream_tracker.start())<line_sep>self._user_stream_event_listener_task=safe_ensure_future(self._user_stream_event_listener())<block_end><block_end><async_keyword><def_stmt>stop_network self<block_start>""" This function is required by NetworkIterator base class and is called automatically. """<line_sep>self._order_book_tracker.stop()<if_stmt>self._status_polling_task<is><not><none><block_start>self._status_polling_task.cancel()<line_sep>self._status_polling_task=<none><block_end><if_stmt>self._trading_rules_polling_task<is><not><none><block_start>self._trading_rules_polling_task.cancel()<line_sep>self._trading_rules_polling_task=<none><block_end><if_stmt>self._status_polling_task<is><not><none><block_start>self._status_polling_task.cancel()<line_sep>self._status_polling_task=<none><block_end><if_stmt>self._user_stream_tracker_task<is><not><none><block_start>self._user_stream_tracker_task.cancel()<line_sep>self._user_stream_tracker_task=<none><block_end><if_stmt>self._user_stream_event_listener_task<is><not><none><block_start>self._user_stream_event_listener_task.cancel()<line_sep>self._user_stream_event_listener_task=<none><block_end><block_end><async_keyword><def_stmt>check_network self<arrow>NetworkStatus<block_start>""" This function is required by NetworkIterator base class and is called periodically to check the network connection. Simply ping the network (or call any light weight public API). """<try_stmt><block_start><await>self._global.rest_api.request("get" "ping")<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>_=e<line_sep>self.logger().exception('check_network' stack_info=<true>)<line_sep><return>NetworkStatus.NOT_CONNECTED<block_end><return>NetworkStatus.CONNECTED<block_end><async_keyword><def_stmt>_trading_rules_polling_loop self<block_start>""" Periodically update trading rule. """<while_stmt><true><block_start><try_stmt><block_start><await>self._update_trading_rules()<line_sep><await>asyncio.sleep(60)<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>self.logger().network(f"Unexpected error while fetching trading rules. Error: {str(e)}" exc_info=<true> app_warning_msg="Could not fetch new trading rules from digifinex.com. "<concat>"Check network connection.")<line_sep><await>asyncio.sleep(0.5)<block_end><block_end><block_end><async_keyword><def_stmt>_update_trading_rules self<block_start>instruments_info=<await>self._global.rest_api.request("get" path_url="markets")<line_sep>self._trading_rules.clear()<line_sep>self._trading_rules=self._format_trading_rules(instruments_info)<block_end><def_stmt>_format_trading_rules self instruments_info:Dict[str Any]<arrow>Dict[str TradingRule]<block_start>""" Converts json API response into a dictionary of trading rules. :param instruments_info: The json API response :return A dictionary of trading rules. Response Example: { "data": [{ "volume_precision": 4, "price_precision": 2, "market": "btc_usdt", "min_amount": 2, "min_volume": 0.0001 }], "date": 1589873858, "code": 0 } """<line_sep>result={}<for_stmt>rule instruments_info["data"]<block_start><try_stmt><block_start>trading_pair=digifinex_utils.convert_from_exchange_trading_pair(rule["market"])<line_sep>price_decimals=Decimal(str(rule["price_precision"]))<line_sep>quantity_decimals=Decimal(str(rule["volume_precision"]))<line_sep># E.g. a price decimal of 2 means 0.01 incremental. price_step=Decimal("1")/Decimal(str(math.pow(10 price_decimals)))<line_sep>quantity_step=Decimal("1")/Decimal(str(math.pow(10 quantity_decimals)))<line_sep>result[trading_pair]=TradingRule(trading_pair min_price_increment=price_step min_base_amount_increment=quantity_step)<block_end><except_stmt>Exception<block_start>self.logger().error(f"Error parsing the trading pair rule {rule}. Skipping." exc_info=<true>)<block_end><block_end><return>result<block_end><def_stmt>get_order_price_quantum self trading_pair:str price:Decimal<block_start>""" Returns a price step, a minimum price increment for a given trading pair. """<line_sep>trading_rule=self._trading_rules[trading_pair]<line_sep><return>trading_rule.min_price_increment<block_end><def_stmt>get_order_size_quantum self trading_pair:str order_size:Decimal<block_start>""" Returns an order amount step, a minimum amount increment for a given trading pair. """<line_sep>trading_rule=self._trading_rules[trading_pair]<line_sep><return>Decimal(trading_rule.min_base_amount_increment)<block_end><def_stmt>get_order_book self trading_pair:str<arrow>OrderBook<block_start><if_stmt>trading_pair<not><in>self._order_book_tracker.order_books<block_start><raise>ValueError(f"No order book exists for '{trading_pair}'.")<block_end><return>self._order_book_tracker.order_books[trading_pair]<block_end><def_stmt>buy self trading_pair:str amount:Decimal order_type=OrderType.MARKET price:Decimal=s_decimal_NaN **kwargs<arrow>str<block_start>""" Buys an amount of base asset (of the given trading pair). This function returns immediately. To see an actual order, you'll have to wait for BuyOrderCreatedEvent. :param trading_pair: The market (e.g. BTC-USDT) to buy from :param amount: The amount in base token value :param order_type: The order type :param price: The price (note: this is no longer optional) :returns A new internal order id """<line_sep>order_id:str=digifinex_utils.get_new_client_order_id(<true> trading_pair)<line_sep>safe_ensure_future(self._create_order(TradeType.BUY order_id trading_pair amount order_type price))<line_sep><return>order_id<block_end><def_stmt>sell self trading_pair:str amount:Decimal order_type=OrderType.MARKET price:Decimal=s_decimal_NaN **kwargs<arrow>str<block_start>""" Sells an amount of base asset (of the given trading pair). This function returns immediately. To see an actual order, you'll have to wait for SellOrderCreatedEvent. :param trading_pair: The market (e.g. BTC-USDT) to sell from :param amount: The amount in base token value :param order_type: The order type :param price: The price (note: this is no longer optional) :returns A new internal order id """<line_sep>order_id:str=digifinex_utils.get_new_client_order_id(<false> trading_pair)<line_sep>safe_ensure_future(self._create_order(TradeType.SELL order_id trading_pair amount order_type price))<line_sep><return>order_id<block_end><def_stmt>cancel self trading_pair:str order_id:str<block_start>""" Cancel an order. This function returns immediately. To get the cancellation result, you'll have to wait for OrderCancelledEvent. :param trading_pair: The market (e.g. BTC-USDT) of the order. :param order_id: The internal order id (also called client_order_id) """<line_sep>tracked_order=self._in_flight_orders.get(order_id)<if_stmt>tracked_order<is><none><block_start><raise>ValueError(f"Failed to cancel order - {order_id}. Order not found.")<block_end><if_stmt>tracked_order.exchange_order_id<is><none><block_start>self.ev_loop.run_until_complete(tracked_order.get_exchange_order_id())<block_end>safe_ensure_future(self._execute_cancel(tracked_order))<line_sep><return>order_id<block_end><async_keyword><def_stmt>_create_order self trade_type:TradeType order_id:str trading_pair:str amount:Decimal order_type:OrderType price:Decimal<block_start>""" Calls create-order API end point to place an order, starts tracking the order and triggers order created event. :param trade_type: BUY or SELL :param order_id: Internal order id (also called client_order_id) :param trading_pair: The market to place order :param amount: The order amount (in base token value) :param order_type: The order type :param price: The order price """<if_stmt><not>order_type.is_limit_type()<block_start><raise>Exception(f"Unsupported order type: {order_type}")<block_end>trading_rule=self._trading_rules[trading_pair]<line_sep>amount=self.quantize_order_amount(trading_pair amount)<line_sep>price=self.quantize_order_price(trading_pair price)<if_stmt>amount<l>trading_rule.min_order_size<block_start><raise>ValueError(f"Buy order amount {amount} is lower than the minimum order size "<concat>f"{trading_rule.min_order_size}.")<block_end>symbol=digifinex_utils.convert_to_exchange_trading_pair(trading_pair)<line_sep>api_params={"symbol":symbol "type":trade_type.name.lower() "price":f"{price:f}" "amount":f"{amount:f}" # "client_oid": order_id }<if_stmt>order_type<is>OrderType.LIMIT_MAKER<block_start>api_params["post_only"]=1<block_end>self.start_tracking_order(order_id <none> trading_pair trade_type price amount order_type)<try_stmt><block_start>order_result=<await>self._global.rest_api.request("post" "spot/order/new" api_params <true>)<line_sep>exchange_order_id=str(order_result["order_id"])<line_sep>tracked_order=self._in_flight_orders.get(order_id)<if_stmt>tracked_order<is><not><none><block_start>self.logger().info(f"Created {order_type.name} {trade_type.name} order {order_id} for "<concat>f"{amount} {trading_pair}.")<line_sep>tracked_order.update_exchange_order_id(exchange_order_id)<block_end>event_tag=MarketEvent.BuyOrderCreated<if>trade_type<is>TradeType.BUY<else>MarketEvent.SellOrderCreated<line_sep>event_class=BuyOrderCreatedEvent<if>trade_type<is>TradeType.BUY<else>SellOrderCreatedEvent<line_sep>self.trigger_event(event_tag event_class(self.current_timestamp order_type trading_pair amount price order_id tracked_order.creation_timestamp ))<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>self.stop_tracking_order(order_id)<line_sep>self.logger().network(f"Error submitting {trade_type.name} {order_type.name} order to Digifinex for "<concat>f"{amount} {trading_pair} "<concat>f"{price}." exc_info=<true> app_warning_msg=str(e))<line_sep>self.trigger_event(MarketEvent.OrderFailure MarketOrderFailureEvent(self.current_timestamp order_id order_type))<block_end><block_end><def_stmt>start_tracking_order self order_id:str exchange_order_id:str trading_pair:str trade_type:TradeType price:Decimal amount:Decimal order_type:OrderType<block_start>""" Starts tracking an order by simply adding it into _in_flight_orders dictionary. """<line_sep>self._in_flight_orders[order_id]=DigifinexInFlightOrder(client_order_id=order_id exchange_order_id=exchange_order_id trading_pair=trading_pair order_type=order_type trade_type=trade_type price=price amount=amount creation_timestamp=self.current_timestamp)<block_end><def_stmt>stop_tracking_order self order_id:str<block_start>""" Stops tracking an order by simply removing it from _in_flight_orders dictionary. """<if_stmt>order_id<in>self._in_flight_orders<block_start><del_stmt>self._in_flight_orders[order_id]<block_end><block_end><async_keyword><def_stmt>_execute_cancel self o:DigifinexInFlightOrder<arrow>str<block_start>""" Executes order cancellation process by first calling cancel-order API. The API result doesn't confirm whether the cancellation is successful, it simply states it receives the request. :param trading_pair: The market trading pair :param order_id: The internal order id order.last_state to change to CANCELED """<try_stmt><block_start><await>self._global.rest_api.request("post" "spot/order/cancel" {"order_id":o.exchange_order_id} <true>)<if_stmt>o.client_order_id<in>self._in_flight_orders<block_start>self.trigger_event(MarketEvent.OrderCancelled OrderCancelledEvent(self.current_timestamp o.client_order_id))<del_stmt>self._in_flight_orders[o.client_order_id]<block_end><return>o.exchange_order_id<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>self.logger().network(f"Failed to cancel order {o.exchange_order_id}: {str(e)}" exc_info=<true> app_warning_msg=f"Failed to cancel the order {o.exchange_order_id} on Digifinex. "<concat>f"Check API key and network connection.")<block_end><block_end><async_keyword><def_stmt>_status_polling_loop self<block_start>""" Periodically update user balances and order status via REST API. This serves as a fallback measure for web socket API updates. """<while_stmt><true><block_start><try_stmt><block_start>self._poll_notifier=asyncio.Event()<line_sep><await>self._poll_notifier.wait()<line_sep><await>safe_gather(self._update_balances() self._update_order_status() )<line_sep>self._last_poll_timestamp=self.current_timestamp<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>self.logger().error(str(e) exc_info=<true>)<line_sep>self.logger().network("Unexpected error while fetching account updates." exc_info=<true> app_warning_msg="Could not fetch account updates from Digifinex. "<concat>"Check API key and network connection.")<line_sep><await>asyncio.sleep(0.5)<block_end><block_end><block_end><async_keyword><def_stmt>_update_balances self<block_start>local_asset_names=set(self._account_balances.keys())<line_sep>remote_asset_names=set()<line_sep>account_info=<await>self._global.rest_api.get_balance()<for_stmt>account account_info["list"]<block_start>asset_name=account["currency"]<line_sep>self._account_available_balances[asset_name]=Decimal(str(account["free"]))<line_sep>self._account_balances[asset_name]=Decimal(str(account["total"]))<line_sep>remote_asset_names.add(asset_name)<block_end><try_stmt><block_start>asset_names_to_remove=local_asset_names.difference(remote_asset_names)<for_stmt>asset_name asset_names_to_remove<block_start><del_stmt>self._account_available_balances[asset_name]<del_stmt>self._account_balances[asset_name]<block_end><block_end><except_stmt>Exception<as>e<block_start>self.logger().error(e)<block_end><block_end><async_keyword><def_stmt>_update_order_status self<block_start>""" Calls REST API to get status update for each in-flight order. """<line_sep>last_tick=int(self._last_poll_timestamp/self.UPDATE_ORDER_STATUS_MIN_INTERVAL)<line_sep>current_tick=int(self.current_timestamp/self.UPDATE_ORDER_STATUS_MIN_INTERVAL)<if_stmt>current_tick<g>last_tick<and>len(self._in_flight_orders)<g>0<block_start>tracked_orders=list(self._in_flight_orders.values())<line_sep>tasks=[]<for_stmt>tracked_order tracked_orders<block_start>order_id=<await>tracked_order.get_exchange_order_id()<line_sep>tasks.append(self._global.rest_api.request("get" "spot/order/detail" {"order_id":order_id} <true>))<block_end>self.logger().debug(f"Polling for order status updates of {len(tasks)} orders.")<line_sep>update_results=<await>safe_gather(*tasks return_exceptions=<true>)<for_stmt>update_result update_results<block_start><if_stmt>isinstance(update_result Exception)<block_start><raise>update_result<block_end><if_stmt>"data"<not><in>update_result<block_start>self.logger().info(f"_update_order_status result not in resp: {update_result}")<line_sep><continue><block_end>order_data=update_result["data"]<line_sep>self._process_rest_trade_details(order_data)<line_sep>self._process_order_status(order_data.get('order_id') order_data.get('status'))<block_end><block_end><block_end><def_stmt>_process_order_status self exchange_order_id:str status:int<block_start>""" Updates in-flight order and triggers cancellation or failure event if needed. """<line_sep>tracked_order=self.find_exchange_order(exchange_order_id)<if_stmt>tracked_order<is><none><block_start><return><block_end>client_order_id=tracked_order.client_order_id<line_sep># Update order execution status tracked_order.last_state=str(status)<if_stmt>tracked_order.is_cancelled<block_start>self.logger().info(f"Successfully canceled order {client_order_id}.")<line_sep>self.trigger_event(MarketEvent.OrderCancelled OrderCancelledEvent(self.current_timestamp client_order_id))<line_sep>tracked_order.cancelled_event.set()<line_sep>self.stop_tracking_order(client_order_id)<block_end># elif tracked_order.is_failure: # self.logger().info(f"The market order {client_order_id} has failed according to order status API. " # f"Reason: {digifinex_utils.get_api_reason(order_msg['reason'])}") # self.trigger_event(MarketEvent.OrderFailure, # MarketOrderFailureEvent( # self.current_timestamp, # client_order_id, # tracked_order.order_type # )) # self.stop_tracking_order(client_order_id) <block_end><def_stmt>_process_rest_trade_details self order_detail_msg:Any<block_start><for_stmt>trade_msg order_detail_msg['detail']<block_start>""" Updates in-flight order and trigger order filled event for trade message received. Triggers order completed event if the total executed amount equals to the specified order amount. """<line_sep># for order in self._in_flight_orders.values(): # await order.get_exchange_order_id() tracked_order=self.find_exchange_order(trade_msg['order_id'])<if_stmt>tracked_order<is><none><block_start><return><block_end>updated=tracked_order.update_with_rest_order_detail(trade_msg)<if_stmt><not>updated<block_start><return><block_end>self.trigger_event(MarketEvent.OrderFilled OrderFilledEvent(self.current_timestamp tracked_order.client_order_id tracked_order.trading_pair tracked_order.trade_type tracked_order.order_type Decimal(str(trade_msg["executed_price"])) Decimal(str(trade_msg["executed_amount"])) estimate_fee(self.name tracked_order.order_type<in>[OrderType.LIMIT OrderType.LIMIT_MAKER]) # TradeFee(0.0, [(trade_msg["fee_currency"], Decimal(str(trade_msg["fee"])))]), exchange_trade_id=trade_msg["tid"]))<if_stmt>math.isclose(tracked_order.executed_amount_base tracked_order.amount)<or>tracked_order.executed_amount_base<ge>tracked_order.amount<block_start>tracked_order.last_state="FILLED"<line_sep>self.logger().info(f"The {tracked_order.trade_type.name} order "<concat>f"{tracked_order.client_order_id} has completed "<concat>f"according to order status API.")<line_sep>event_tag=MarketEvent.BuyOrderCompleted<if>tracked_order.trade_type<is>TradeType.BUY<else>MarketEvent.SellOrderCompleted<line_sep>event_class=BuyOrderCompletedEvent<if>tracked_order.trade_type<is>TradeType.BUY<else>SellOrderCompletedEvent<line_sep>self.trigger_event(event_tag event_class(self.current_timestamp tracked_order.client_order_id tracked_order.base_asset tracked_order.quote_asset tracked_order.executed_amount_base tracked_order.executed_amount_quote tracked_order.order_type))<line_sep>self.stop_tracking_order(tracked_order.client_order_id)<block_end><block_end><block_end><def_stmt>find_exchange_order self exchange_order_id:str<block_start><for_stmt>o self._in_flight_orders.values()<block_start><if_stmt>o.exchange_order_id<eq>exchange_order_id<block_start><return>o<block_end><block_end><block_end><def_stmt>_process_order_message_traded self order_msg<block_start>tracked_order:DigifinexInFlightOrder=self.find_exchange_order(order_msg['id'])<if_stmt>tracked_order<is><none><block_start><return><block_end>(delta_trade_amount delta_trade_price)=tracked_order.update_with_order_update(order_msg)<if_stmt><not>delta_trade_amount<block_start><return><block_end>self.trigger_event(MarketEvent.OrderFilled OrderFilledEvent(self.current_timestamp tracked_order.client_order_id tracked_order.trading_pair tracked_order.trade_type tracked_order.order_type delta_trade_price delta_trade_amount estimate_fee(self.name tracked_order.order_type<in>[OrderType.LIMIT OrderType.LIMIT_MAKER]) # TradeFee(0.0, [(trade_msg["fee_currency"], Decimal(str(trade_msg["fee"])))]), exchange_trade_id=str(int(self._time()<times>1e6))))<if_stmt>math.isclose(tracked_order.executed_amount_base tracked_order.amount)<or>tracked_order.executed_amount_base<ge>tracked_order.amount<block_start>tracked_order.last_state="2"<line_sep>self.logger().info(f"The {tracked_order.trade_type.name} order "<concat>f"{tracked_order.client_order_id} has completed "<concat>f"according to order status API.")<line_sep>event_tag=MarketEvent.BuyOrderCompleted<if>tracked_order.trade_type<is>TradeType.BUY<else>MarketEvent.SellOrderCompleted<line_sep>event_class=BuyOrderCompletedEvent<if>tracked_order.trade_type<is>TradeType.BUY<else>SellOrderCompletedEvent<line_sep>self.trigger_event(event_tag event_class(self.current_timestamp tracked_order.client_order_id tracked_order.base_asset tracked_order.quote_asset tracked_order.executed_amount_base tracked_order.executed_amount_quote tracked_order.order_type))<line_sep>self.stop_tracking_order(tracked_order.client_order_id)<block_end><block_end><async_keyword><def_stmt>cancel_all self timeout_seconds:float<block_start>""" Cancels all in-flight orders and waits for cancellation results. Used by bot's top level stop and exit commands (cancelling outstanding orders on exit) :param timeout_seconds: The timeout at which the operation will be canceled. :returns List of CancellationResult which indicates whether each order is successfully cancelled. """<if_stmt>self._trading_pairs<is><none><block_start><raise>Exception("cancel_all can only be used when trading_pairs are specified.")<block_end>cancellation_results=[]<try_stmt># for trading_pair in self._trading_pairs: # await self._global.rest_api.request( # "post", # "private/cancel-all-orders", # {"instrument_name": digifinex_utils.convert_to_exchange_trading_pair(trading_pair)}, # True # ) <block_start>open_orders=list(self._in_flight_orders.values())<for_stmt>o open_orders<block_start><await>self._execute_cancel(o)<block_end><for_stmt>cl_order_id,tracked_order self._in_flight_orders.items()<block_start>open_order=[o<for>o open_orders<if>o.exchange_order_id<eq>tracked_order.exchange_order_id]<if_stmt><not>open_order<block_start>cancellation_results.append(CancellationResult(cl_order_id <true>))<line_sep># self.trigger_event(MarketEvent.OrderCancelled, # OrderCancelledEvent(self.current_timestamp, cl_order_id)) <block_end><else_stmt><block_start>cancellation_results.append(CancellationResult(cl_order_id <false>))<block_end><block_end><block_end><except_stmt>Exception<block_start>self.logger().network("Failed to cancel all orders." exc_info=<true> app_warning_msg="Failed to cancel all orders on Digifinex. Check API key and network connection.")<block_end><return>cancellation_results<block_end><def_stmt>tick self timestamp:float<block_start>""" Is called automatically by the clock for each clock's tick (1 second by default). It checks if status polling task is due for execution. """<line_sep>now=time.time()<line_sep>poll_interval=(self.SHORT_POLL_INTERVAL<if>now-self._user_stream_tracker.last_recv_time<g>60.0<else>self.LONG_POLL_INTERVAL)<line_sep>last_tick=int(self._last_timestamp/poll_interval)<line_sep>current_tick=int(timestamp/poll_interval)<if_stmt>current_tick<g>last_tick<block_start><if_stmt><not>self._poll_notifier.is_set()<block_start>self._poll_notifier.set()<block_end><block_end>self._last_timestamp=timestamp<block_end><def_stmt>get_fee self base_currency:str quote_currency:str order_type:OrderType order_side:TradeType amount:Decimal price:Decimal=s_decimal_NaN is_maker:Optional[bool]=<none><arrow>AddedToCostTradeFee<block_start>""" To get trading fee, this function is simplified by using fee override configuration. Most parameters to this function are ignore except order_type. Use OrderType.LIMIT_MAKER to specify you want trading fee for maker order. """<line_sep>is_maker=order_type<is>OrderType.LIMIT_MAKER<line_sep><return>AddedToCostTradeFee(percent=self.estimate_fee_pct(is_maker))<block_end><async_keyword><def_stmt>_iter_user_event_queue self<arrow>AsyncIterable[Dict[str any]]<block_start><while_stmt><true><block_start><try_stmt><block_start><yield><await>self._user_stream_tracker.user_stream.get()<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<block_start>self.logger().network("Unknown error. Retrying after 1 seconds." exc_info=<true> app_warning_msg="Could not fetch user events from Digifinex. Check API key and network connection.")<line_sep><await>asyncio.sleep(1.0)<block_end><block_end><block_end><async_keyword><def_stmt>_user_stream_event_listener self<block_start>""" Listens to message in _user_stream_tracker.user_stream queue. The messages are put in by DigifinexAPIUserStreamDataSource. """<async_keyword><for_stmt>event_message self._iter_user_event_queue()<block_start><try_stmt><block_start><if_stmt>"method"<not><in>event_message<block_start><continue><block_end>channel=event_message["method"]<line_sep># if "user.trade" in channel: # for trade_msg in event_message["result"]["data"]: # await self._process_trade_message(trade_msg) <if_stmt>"order.update"<in>channel<block_start><for_stmt>order_msg event_message["params"]<block_start>self._process_order_status(order_msg['id'] order_msg['status'])<line_sep>self._process_order_message_traded(order_msg)<block_end><block_end><elif_stmt>channel<eq>"balance.update"<block_start>balances=event_message["params"]<for_stmt>balance_entry balances<block_start>asset_name=balance_entry["currency"]<line_sep>self._account_balances[asset_name]=Decimal(str(balance_entry["total"]))<line_sep>self._account_available_balances[asset_name]=Decimal(str(balance_entry["free"]))<block_end><block_end><block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><except_stmt>Exception<block_start>self.logger().error("Unexpected error in user stream listener loop." exc_info=<true>)<line_sep><await>asyncio.sleep(5.0)<block_end><block_end><block_end><async_keyword><def_stmt>get_open_orders self<arrow>List[OpenOrder]<block_start>result=<await>self._global.rest_api.request("get" "spot/order/current" {} <true>)<line_sep>ret_val=[]<for_stmt>order result["data"]# if digifinex_utils.HBOT_BROKER_ID not in order["client_oid"]: # continue <block_start><if_stmt>order["type"]<not><in>["buy" "sell"]<block_start><raise>Exception(f"Unsupported order type {order['type']}")<block_end>ret_val.append(OpenOrder(client_order_id=<none> trading_pair=digifinex_utils.convert_from_exchange_trading_pair(order["symbol"]) price=Decimal(str(order["price"])) amount=Decimal(str(order["amount"])) executed_amount=Decimal(str(order["executed_amount"])) status=order["status"] order_type=OrderType.LIMIT is_buy=<true><if>order["type"]<eq>"buy"<else><false> time=int(order["created_date"]) exchange_order_id=order["order_id"]))<block_end><return>ret_val<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>re<import_stmt>pandas<as>pd<import_from_stmt>pandas DataFrame<import_from_stmt>dateutil.relativedelta relativedelta<import_from_stmt>arelle.ModelXbrl ModelXbrl<import_from_stmt>arelle XbrlConst<import_from_stmt>dart_fss.utils str_to_regex<import_from_stmt>dart_fss.xbrl.helper cls_label_check get_label_list cls_merge_type cls_datetime_check get_max_depth get_value_from_dataset generate_df_columns generate_df_rows flatten get_title <class_stmt>Table(object)<block_start>""" XBRL Table XBRL 파일에서 추출된 데이터를 기반으로 재무제표에 관한 정보를 담고 있는 클래스 Attributes ---------- parent: str 로드한 파일 이름 xbrl: ModelXbrl arelle Xbrl 클래스 """<def_stmt>__init__ self parent xbrl code definition uri<block_start>self.parent=parent<line_sep>self.code=code<line_sep>self.definition=definition<line_sep>self.uri=uri<line_sep>self._xbrl=xbrl<line_sep>self._facts=<none><line_sep>self._dataset=<none><line_sep>self._cls=<none><line_sep>self._labels=<none><block_end>@property<def_stmt>facts self<block_start>"""list of modelFact: """<if_stmt>self._facts<is><none><block_start>arcrole=XbrlConst.parentChild<line_sep>relation=self._xbrl.relationshipSet(arcrole self.uri)<line_sep>facts=[]<for_stmt>fact self._xbrl.facts<block_start><if_stmt>relation.fromModelObject(fact.concept)<or>relation.toModelObject(fact.concept)<block_start>facts.append(fact)<block_end><block_end>self._facts=facts<block_end><return>self._facts<block_end>@property<def_stmt>dataset self<block_start>"""dict of modelFact: """<if_stmt>self._dataset<is><none><block_start>dataset=dict()<for_stmt>fact self.facts<block_start>object_id=fact.context.objectId()<if_stmt>dataset.get(object_id)<is><none><block_start>dataset[object_id]=[]<block_end>dataset[object_id].append(fact)<block_end>self._dataset=dataset<block_end><return>self._dataset<block_end>@property<def_stmt>cls self<block_start>"""classification 반환"""<if_stmt>self._cls<is><none><block_start>self._get_cls()<block_end><return>self._cls<block_end><def_stmt>cls_filter self start_dt=<none> end_dt=<none> label=<none><block_start>""" classification 필터링 함수 Parameters ---------- start_dt: str 검색 시작 일자 end_dt: str 검색 종료 일자 label: str 포함할 label 명 Returns ------- list of cls 필터된 classification """<line_sep><return>[item<for>item self.cls<if>cls_datetime_check(item start_dt end_dt)<and>cls_label_check(item label)]<block_end><def_stmt>_get_cls self<block_start>""" classification 정보 추출 함수"""<line_sep>contexts=set()<for_stmt>data self.facts<block_start>context=data.context<line_sep>contexts.add(context)<block_end>cls=list()<for_stmt>context contexts<block_start>object_id=context.objectId()<line_sep># data가 없을때 무시 <if_stmt>len(self.dataset[object_id])<l>1<block_start><continue><block_end>instant_datetime=<none><line_sep>start_datetime=<none><line_sep>end_datetime=<none><if_stmt>context.isInstantPeriod<is><true><block_start>instant_datetime=context.instantDatetime-relativedelta(days=1)<block_end><else_stmt><block_start>start_datetime=context.startDatetime<line_sep>end_datetime=context.endDatetime-relativedelta(days=1)<block_end>label=dict()<line_sep>dims=context.qnameDims<if_stmt>len(dims)<g>0<block_start><for_stmt>dimQname sorted(dims.keys() key=<lambda>d:str(d) reverse=<true>)<block_start>dim_value=dims[dimQname]<line_sep>ko=dim_value.member.label(lang='ko')<line_sep>ko=re.sub(r'\[.*?\]' '' ko)<line_sep>en=dim_value.member.label(lang='en')<line_sep>en=re.sub(r'\[.*?\]' '' en)<line_sep>label[dimQname]={'ko':ko 'en':en}<block_end><block_end>_cls={'cls_id':object_id 'instant_datetime':instant_datetime 'start_datetime':start_datetime 'end_datetime':end_datetime 'label':label}<line_sep>cls.append(_cls)<block_end>cls.sort(key=<lambda>x:x.get('instant_datetime')<or>x.get('start_datetime') reverse=<true>)<line_sep>self._cls=cls<line_sep><return>self._cls<block_end>@property<def_stmt>labels self<block_start>"""labels 반환"""<if_stmt>self._labels<is><none><block_start>arcrole=XbrlConst.parentChild<line_sep>relationship_set=self._xbrl.relationshipSet(arcrole self.uri)<line_sep>root_concept=relationship_set.rootConcepts[0]<line_sep>labels=get_label_list(relationship_set root_concept)<line_sep>self._labels=labels<block_end><return>self._labels<block_end><def_stmt>to_DataFrame self cls=<none> lang='ko' start_dt=<none> end_dt=<none> label=<none> show_abstract=<false> show_class=<true> show_depth=10 show_concept=<true> separator=<true><block_start>""" Pandas DataFrame으로 변환하는 함수 Parameters ---------- cls: dict, optional classification lang: str, optional 'ko' 한글 or 'en' 영문 start_dt: str, optional 검색 시작 일자 end_dt: str, optional 검색 종료 일자 label: str, optional Column Label에 포함될 단어 show_abstract: bool, optional abtract 표시 여부 show_class: bool, optional class 표시여부 show_depth: int, optional class 표시 깊이 show_concept: bool, optional concept_id 표시 여부 separator: bool, optional 숫자 첫단위 표시 여부 Returns ------- DataFrame 재무제표 DataFrame """<if_stmt>cls<is><none><block_start>cls=self.cls_filter(start_dt end_dt label)<block_end>cls=cls_merge_type(cls)<line_sep>depth=get_max_depth(self.labels show_abstract=show_abstract)<line_sep>depth=depth<if>depth<l>show_depth<else>show_depth<line_sep>table=self.parent.get_table_by_code('d999004')<line_sep>unit=get_value_from_dataset(table.cls table.dataset 'dart-gcd_EntityReportingCurrencyISOCode')<line_sep>definition=self.definition+' (Unit: {})'.format(unit[0])<line_sep>columns=generate_df_columns(definition cls depth lang show_concept=show_concept show_class=show_class)<if_stmt>separator<block_start>pd.options.display.float_format='{:,}'.format<block_end><else_stmt><block_start>pd.options.display.float_format='{:}'.format<block_end>df=pd.DataFrame(columns=columns)<line_sep>rows=generate_df_rows(self.labels cls self.dataset depth lang=lang show_abstract=show_abstract show_concept=show_concept show_class=show_class)<line_sep>data=flatten(rows)<for_stmt>idx,r enumerate(data)<block_start>df.loc[idx]=r<block_end>regex_pass=str_to_regex('concept_id OR label_ko OR label_en OR class')<line_sep>df_count=df.count()<line_sep>drop_columns=[]<for_stmt>key,count df_count.items()<block_start><if_stmt>regex_pass.search(' '.join(key[1]))<block_start><pass><block_end><elif_stmt>count<le>1<block_start>drop_columns.append(key)<block_end><block_end>df=df.drop(drop_columns axis=1)<line_sep><return>df<block_end><def_stmt>get_value_by_concept_id self concept_id start_dt=<none> end_dt=<none> label=<none> lang='en'<block_start>""" concept_id을 이용하여 값을 찾아 주는 함수 Parameters ---------- concept_id: str 재무제표 계정의 concept_id start_dt: str 검색 시작 일자 end_dt: str 검색 종료 일자 label: str 검색 포함 label lang: str 'ko' 한글 / 'en' 영문 Returns ------- dict of (str or float) { column 이름 : 값 } """<line_sep>cls=self.cls_filter(start_dt end_dt label)<line_sep>data=get_value_from_dataset(classification=cls dataset=self.dataset concept_id=concept_id)<line_sep>results=dict()<for_stmt>c,d zip(cls data)<block_start>title=get_title(c lang=lang)<line_sep>results[title]=d<block_end><return>results<block_end><def_stmt>__repr__ self<block_start>info={'code':self.code 'definition':self.definition}<line_sep><return>str(info)<block_end><block_end>
# model model=Model()<line_sep>i1=Input("op1" "TENSOR_FLOAT32" "{2, 2, 2, 2}")<line_sep>i3=Output("op3" "TENSOR_FLOAT32" "{2, 2, 2, 2}")<line_sep>model=model.Operation("RSQRT" i1).To(i3)<line_sep># Example 1. Input in operand 0, input0={i1:# input 0 [1.0 36.0 2.0 90 4.0 16.0 25.0 100.0 23.0 19.0 40.0 256.0 4.0 43.0 8.0 36.0]}<line_sep>output0={i3:# output 0 [1.0 0.166667 0.70710678118 0.105409 0.5 0.25 0.2 0.1 0.208514 0.229416 0.158114 0.0625 0.5 0.152499 0.35355339059 0.166667]}<line_sep># Instantiate an example Example((input0 output0))<line_sep>
"""Javascript callbacks integration tests."""<import_from_future_stmt> unicode_literals<import_from_stmt>endless_pagination.tests.integration SeleniumTestCase<class_stmt>CallbacksTest(SeleniumTestCase)<block_start>view_name='callbacks'<def_stmt>notifications_loaded self driver<block_start><return>driver.find_elements_by_id('fragment')<block_end><def_stmt>assertNotificationsEqual self notifications<block_start>"""Assert the given *notifications* equal the ones in the DOM."""<line_sep>self.wait_ajax().until(self.notifications_loaded)<line_sep>find=self.selenium.find_element_by_id<for_stmt>key,value notifications.items()<block_start>self.assertEqual(value find(key).text)<block_end><block_end><def_stmt>test_on_click self# Ensure the onClick callback is correctly called. <block_start>self.get()<line_sep>self.click_link(2)<line_sep>self.assertNotificationsEqual({'onclick':'Object 1' 'onclick-label':'2' 'onclick-url':'/callbacks/?page=2' 'onclick-key':'page' })<block_end><def_stmt>test_on_completed self# Ensure the onCompleted callback is correctly called. <block_start>self.get(page=10)<line_sep>self.click_link(1)<line_sep>self.assertNotificationsEqual({'oncompleted':'Object 1' 'oncompleted-label':'1' 'oncompleted-url':'/callbacks/' 'oncompleted-key':'page' 'fragment':'Object 3' })<block_end><block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>AwrDbParameterSummary(object)<block_start>""" The summary of the AWR change history data for a single database parameter. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new AwrDbParameterSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param name: The value to assign to the name property of this AwrDbParameterSummary. :type name: str :param instance_number: The value to assign to the instance_number property of this AwrDbParameterSummary. :type instance_number: int :param begin_value: The value to assign to the begin_value property of this AwrDbParameterSummary. :type begin_value: str :param end_value: The value to assign to the end_value property of this AwrDbParameterSummary. :type end_value: str :param is_changed: The value to assign to the is_changed property of this AwrDbParameterSummary. :type is_changed: bool :param value_modified: The value to assign to the value_modified property of this AwrDbParameterSummary. :type value_modified: str :param is_default: The value to assign to the is_default property of this AwrDbParameterSummary. :type is_default: bool """<line_sep>self.swagger_types={'name':'str' 'instance_number':'int' 'begin_value':'str' 'end_value':'str' 'is_changed':'bool' 'value_modified':'str' 'is_default':'bool'}<line_sep>self.attribute_map={'name':'name' 'instance_number':'instanceNumber' 'begin_value':'beginValue' 'end_value':'endValue' 'is_changed':'isChanged' 'value_modified':'valueModified' 'is_default':'isDefault'}<line_sep>self._name=<none><line_sep>self._instance_number=<none><line_sep>self._begin_value=<none><line_sep>self._end_value=<none><line_sep>self._is_changed=<none><line_sep>self._value_modified=<none><line_sep>self._is_default=<none><block_end>@property<def_stmt>name self<block_start>""" **[Required]** Gets the name of this AwrDbParameterSummary. The name of the parameter. :return: The name of this AwrDbParameterSummary. :rtype: str """<line_sep><return>self._name<block_end>@name.setter<def_stmt>name self name<block_start>""" Sets the name of this AwrDbParameterSummary. The name of the parameter. :param name: The name of this AwrDbParameterSummary. :type: str """<line_sep>self._name=name<block_end>@property<def_stmt>instance_number self<block_start>""" Gets the instance_number of this AwrDbParameterSummary. The database instance number. :return: The instance_number of this AwrDbParameterSummary. :rtype: int """<line_sep><return>self._instance_number<block_end>@instance_number.setter<def_stmt>instance_number self instance_number<block_start>""" Sets the instance_number of this AwrDbParameterSummary. The database instance number. :param instance_number: The instance_number of this AwrDbParameterSummary. :type: int """<line_sep>self._instance_number=instance_number<block_end>@property<def_stmt>begin_value self<block_start>""" Gets the begin_value of this AwrDbParameterSummary. The parameter value when the period began. :return: The begin_value of this AwrDbParameterSummary. :rtype: str """<line_sep><return>self._begin_value<block_end>@begin_value.setter<def_stmt>begin_value self begin_value<block_start>""" Sets the begin_value of this AwrDbParameterSummary. The parameter value when the period began. :param begin_value: The begin_value of this AwrDbParameterSummary. :type: str """<line_sep>self._begin_value=begin_value<block_end>@property<def_stmt>end_value self<block_start>""" Gets the end_value of this AwrDbParameterSummary. The parameter value when the period ended. :return: The end_value of this AwrDbParameterSummary. :rtype: str """<line_sep><return>self._end_value<block_end>@end_value.setter<def_stmt>end_value self end_value<block_start>""" Sets the end_value of this AwrDbParameterSummary. The parameter value when the period ended. :param end_value: The end_value of this AwrDbParameterSummary. :type: str """<line_sep>self._end_value=end_value<block_end>@property<def_stmt>is_changed self<block_start>""" Gets the is_changed of this AwrDbParameterSummary. Indicates whether the parameter value changed within the period. :return: The is_changed of this AwrDbParameterSummary. :rtype: bool """<line_sep><return>self._is_changed<block_end>@is_changed.setter<def_stmt>is_changed self is_changed<block_start>""" Sets the is_changed of this AwrDbParameterSummary. Indicates whether the parameter value changed within the period. :param is_changed: The is_changed of this AwrDbParameterSummary. :type: bool """<line_sep>self._is_changed=is_changed<block_end>@property<def_stmt>value_modified self<block_start>""" Gets the value_modified of this AwrDbParameterSummary. Indicates whether the parameter has been modified after instance startup: - MODIFIED - Parameter has been modified with ALTER SESSION - SYSTEM_MOD - Parameter has been modified with ALTER SYSTEM (which causes all the currently logged in sessions\u2019 values to be modified) - FALSE - Parameter has not been modified after instance startup :return: The value_modified of this AwrDbParameterSummary. :rtype: str """<line_sep><return>self._value_modified<block_end>@value_modified.setter<def_stmt>value_modified self value_modified<block_start>""" Sets the value_modified of this AwrDbParameterSummary. Indicates whether the parameter has been modified after instance startup: - MODIFIED - Parameter has been modified with ALTER SESSION - SYSTEM_MOD - Parameter has been modified with ALTER SYSTEM (which causes all the currently logged in sessions\u2019 values to be modified) - FALSE - Parameter has not been modified after instance startup :param value_modified: The value_modified of this AwrDbParameterSummary. :type: str """<line_sep>self._value_modified=value_modified<block_end>@property<def_stmt>is_default self<block_start>""" Gets the is_default of this AwrDbParameterSummary. Indicates whether the parameter value in the end snapshot is the default. :return: The is_default of this AwrDbParameterSummary. :rtype: bool """<line_sep><return>self._is_default<block_end>@is_default.setter<def_stmt>is_default self is_default<block_start>""" Sets the is_default of this AwrDbParameterSummary. Indicates whether the parameter value in the end snapshot is the default. :param is_default: The is_default of this AwrDbParameterSummary. :type: bool """<line_sep>self._is_default=is_default<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
<import_stmt>subprocess<import_from_stmt>..utils ThreadedSegment decode<class_stmt>Segment(ThreadedSegment)<block_start><def_stmt>run self<block_start>self.version=<none><try_stmt><block_start>output=decode(subprocess.check_output(['php' '-r' 'echo PHP_VERSION;'] stderr=subprocess.STDOUT))<line_sep>self.version=output.split('-')[0]<if>'-'<in>output<else>output<block_end><except_stmt>OSError<block_start>self.version=<none><block_end><block_end><def_stmt>add_to_powerline self<block_start>self.join()<if_stmt><not>self.version<block_start><return><block_end># FIXME no hard-coded colors self.powerline.append(" "+self.version+" " 15 4)<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of HMAC key management command for GCS. NOTE: Any modification to this file or corresponding HMAC logic should be submitted in its own PR and release to avoid concurrency issues in testing. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>gslib.command Command<import_from_stmt>gslib.command_argument CommandArgument<import_from_stmt>gslib.cs_api_map ApiSelector<import_from_stmt>gslib.exception CommandException<import_from_stmt>gslib.help_provider CreateHelpText<import_from_stmt>gslib.metrics LogCommandParams<import_from_stmt>gslib.project_id PopulateProjectId<import_from_stmt>gslib.utils.cloud_api_helper GetCloudApiInstance<import_from_stmt>gslib.utils.text_util InsistAscii<line_sep>_CREATE_SYNOPSIS=""" gsutil hmac create [-p <project>] <service_account_email> """<line_sep>_DELETE_SYNOPSIS=""" gsutil hmac delete [-p <project>] <access_id> """<line_sep>_GET_SYNOPSIS=""" gsutil hmac get [-p <project>] <access_id> """<line_sep>_LIST_SYNOPSIS=""" gsutil hmac list [-a] [-l] [-p <project>] [-u <service_account_email>] """<line_sep>_UPDATE_SYNOPSIS=""" gsutil hmac update -s (ACTIVE|INACTIVE) [-e <etag>] [-p <project>] <access_id> """<line_sep>_CREATE_DESCRIPTION=""" <B>CREATE</B> The ``hmac create`` command creates an HMAC key for the specified service account: gsutil hmac create <EMAIL> The secret key material is only available upon creation, so be sure to store the returned secret along with the access_id. <B>CREATE OPTIONS</B> The ``create`` sub-command has the following option -p <project> Specify the ID or number of the project in which to create a key. """<line_sep>_DELETE_DESCRIPTION=""" <B>DELETE</B> The ``hmac delete`` command permanently deletes the specified HMAC key: gsutil hmac delete GOOG56JBMFZX6PMPTQ62VD2 Note that keys must be updated to be in the ``INACTIVE`` state before they can be deleted. <B>DELETE OPTIONS</B> The ``delete`` sub-command has the following option -p <project> Specify the ID or number of the project from which to delete a key. """<line_sep>_GET_DESCRIPTION=""" <B>GET</B> The ``hmac get`` command retrieves the specified HMAC key's metadata: gsutil hmac get GOOG56JBMFZX6PMPTQ62VD2 Note that there is no option to retrieve a key's secret material after it has been created. <B>GET OPTIONS</B> The ``get`` sub-command has the following option -p <project> Specify the ID or number of the project from which to get a key. """<line_sep>_LIST_DESCRIPTION=""" <B>LIST</B> The ``hmac list`` command lists the HMAC key metadata for keys in the specified project. If no project is specified in the command, the default project is used. <B>LIST OPTIONS</B> The ``list`` sub-command has the following options -a Show all keys, including recently deleted keys. -l Use long listing format. Shows each key's full metadata excluding the secret. -p <project> Specify the ID or number of the project from which to list keys. -u <service_account_email> Filter keys for a single service account. """<line_sep>_UPDATE_DESCRIPTION=""" <B>UPDATE</B> The ``hmac update`` command sets the state of the specified key: gsutil hmac update -s INACTIVE -e M42da= GOOG56JBMFZX6PMPTQ62VD2 Valid state arguments are ``ACTIVE`` and ``INACTIVE``. To set a key to state ``DELETED``, use the ``hmac delete`` command on an ``INACTIVE`` key. If an etag is set in the command, it will only succeed if the provided etag matches the etag of the stored key. <B>UPDATE OPTIONS</B> The ``update`` sub-command has the following options -s <ACTIVE|INACTIVE> Sets the state of the specified key to either ``ACTIVE`` or ``INACTIVE``. -e <etag> If provided, the update will only be performed if the specified etag matches the etag of the stored key. -p <project> Specify the ID or number of the project in which to update a key. """<line_sep>_SYNOPSIS=(_CREATE_SYNOPSIS+_DELETE_SYNOPSIS.lstrip('\n')+_GET_SYNOPSIS.lstrip('\n')+_LIST_SYNOPSIS.lstrip('\n')+_UPDATE_SYNOPSIS.lstrip('\n')+'\n\n')<line_sep>_DESCRIPTION=""" You can use the ``hmac`` command to interact with service account `HMAC keys <https://cloud.google.com/storage/docs/authentication/hmackeys>`_. The ``hmac`` command has five sub-commands: """+'\n'.join([_CREATE_DESCRIPTION _DELETE_DESCRIPTION _GET_DESCRIPTION _LIST_DESCRIPTION _UPDATE_DESCRIPTION ])<line_sep>_DETAILED_HELP_TEXT=CreateHelpText(_SYNOPSIS _DESCRIPTION)<line_sep>_VALID_UPDATE_STATES=['INACTIVE' 'ACTIVE']<line_sep>_TIME_FORMAT='%a, %d %b %Y %H:%M:%S GMT'<line_sep>_create_help_text=CreateHelpText(_CREATE_SYNOPSIS _CREATE_DESCRIPTION)<line_sep>_delete_help_text=CreateHelpText(_DELETE_SYNOPSIS _DELETE_DESCRIPTION)<line_sep>_get_help_text=CreateHelpText(_GET_SYNOPSIS _GET_DESCRIPTION)<line_sep>_list_help_text=CreateHelpText(_LIST_SYNOPSIS _LIST_DESCRIPTION)<line_sep>_update_help_text=CreateHelpText(_UPDATE_SYNOPSIS _UPDATE_DESCRIPTION)<def_stmt>_AccessIdException command_name subcommand synopsis<block_start><return>CommandException('%s %s requires an Access ID to be specified as the last argument.\n%s'%(command_name subcommand synopsis))<block_end><def_stmt>_KeyMetadataOutput metadata<block_start>"""Format the key metadata for printing to the console."""<def_stmt>FormatInfo name value new_line=<true><block_start>"""Format the metadata name-value pair into two aligned columns."""<line_sep>width=22<line_sep>info_str='\t%-*s %s'%(width name+':' value)<if_stmt>new_line<block_start>info_str<augadd>'\n'<block_end><return>info_str<block_end>message='Access ID %s:\n'%metadata.accessId<line_sep>message<augadd>FormatInfo('State' metadata.state)<line_sep>message<augadd>FormatInfo('Service Account' metadata.serviceAccountEmail)<line_sep>message<augadd>FormatInfo('Project' metadata.projectId)<line_sep>message<augadd>FormatInfo('Time Created' metadata.timeCreated.strftime(_TIME_FORMAT))<line_sep>message<augadd>FormatInfo('Time Last Updated' metadata.updated.strftime(_TIME_FORMAT))<line_sep>message<augadd>FormatInfo('Etag' metadata.etag new_line=<false>)<line_sep><return>message<block_end><class_stmt>HmacCommand(Command)<block_start>"""Implementation of gsutil hmac command."""<line_sep>command_spec=Command.CreateCommandSpec('hmac' min_args=1 max_args=8 supported_sub_args='ae:lp:s:u:' file_url_ok=<true> urls_start_arg=1 gs_api_support=[ApiSelector.JSON] gs_default_api=ApiSelector.JSON usage_synopsis=_SYNOPSIS argparse_arguments={'create':[CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()] 'delete':[CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()] 'get':[CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()] 'list':[CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()] 'update':[CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()] } )<line_sep>help_spec=Command.HelpSpec(help_name='hmac' help_name_aliases=[] help_type='command_help' help_one_line_summary=('CRUD operations on service account HMAC keys.') help_text=_DETAILED_HELP_TEXT subcommand_help_text={'create':_create_help_text 'delete':_delete_help_text 'get':_get_help_text 'list':_list_help_text 'update':_update_help_text })<def_stmt>_CreateHmacKey self thread_state=<none><block_start>"""Creates HMAC key for a service account."""<if_stmt>self.args<block_start>self.service_account_email=self.args[0]<block_end><else_stmt><block_start>err_msg=('%s %s requires a service account to be specified as the '<concat>'last argument.\n%s')<line_sep><raise>CommandException(err_msg%(self.command_name self.action_subcommand _CREATE_SYNOPSIS))<block_end>gsutil_api=GetCloudApiInstance(self thread_state=thread_state)<line_sep>response=gsutil_api.CreateHmacKey(self.project_id self.service_account_email provider='gs')<line_sep>print('%-12s %s'%('Access ID:' response.metadata.accessId))<line_sep>print('%-12s %s'%('Secret:' response.secret))<block_end><def_stmt>_DeleteHmacKey self thread_state=<none><block_start>"""Deletes an HMAC key."""<if_stmt>self.args<block_start>access_id=self.args[0]<block_end><else_stmt><block_start><raise>_AccessIdException(self.command_name self.action_subcommand _DELETE_SYNOPSIS)<block_end>gsutil_api=GetCloudApiInstance(self thread_state=thread_state)<line_sep>gsutil_api.DeleteHmacKey(self.project_id access_id provider='gs')<block_end><def_stmt>_GetHmacKey self thread_state=<none><block_start>"""Gets HMAC key from its Access Id."""<if_stmt>self.args<block_start>access_id=self.args[0]<block_end><else_stmt><block_start><raise>_AccessIdException(self.command_name self.action_subcommand _GET_SYNOPSIS)<block_end>gsutil_api=GetCloudApiInstance(self thread_state=thread_state)<line_sep>response=gsutil_api.GetHmacKey(self.project_id access_id provider='gs')<line_sep>print(_KeyMetadataOutput(response))<block_end><def_stmt>_ListHmacKeys self thread_state=<none><block_start>"""Lists HMAC keys for a project or service account."""<if_stmt>self.args<block_start><raise>CommandException('%s %s received unexpected arguments.\n%s'%(self.command_name self.action_subcommand _LIST_SYNOPSIS))<block_end>gsutil_api=GetCloudApiInstance(self thread_state=thread_state)<line_sep>response=gsutil_api.ListHmacKeys(self.project_id self.service_account_email self.show_all provider='gs')<line_sep>short_list_format='%s\t%-12s %s'<if_stmt>self.long_list<block_start><for_stmt>item response<block_start>print(_KeyMetadataOutput(item))<line_sep>print()<block_end><block_end><else_stmt><block_start><for_stmt>item response<block_start>print(short_list_format%(item.accessId item.state item.serviceAccountEmail))<block_end><block_end><block_end><def_stmt>_UpdateHmacKey self thread_state=<none><block_start>"""Update an HMAC key's state."""<if_stmt><not>self.state<block_start><raise>CommandException('A state flag must be supplied for %s %s\n%s'%(self.command_name self.action_subcommand _UPDATE_SYNOPSIS))<block_end><elif_stmt>self.state<not><in>_VALID_UPDATE_STATES<block_start><raise>CommandException('The state flag value must be one of %s'%', '.join(_VALID_UPDATE_STATES))<block_end><if_stmt>self.args<block_start>access_id=self.args[0]<block_end><else_stmt><block_start><raise>_AccessIdException(self.command_name self.action_subcommand _UPDATE_SYNOPSIS)<block_end>gsutil_api=GetCloudApiInstance(self thread_state=thread_state)<line_sep>response=gsutil_api.UpdateHmacKey(self.project_id access_id self.state self.etag provider='gs')<line_sep>print(_KeyMetadataOutput(response))<block_end><def_stmt>RunCommand self<block_start>"""Command entry point for the hmac command."""<if_stmt>self.gsutil_api.GetApiSelector(provider='gs')<ne>ApiSelector.JSON<block_start><raise>CommandException('The "hmac" command can only be used with the GCS JSON API')<block_end>self.action_subcommand=self.args.pop(0)<line_sep>self.ParseSubOpts(check_args=<true>)<line_sep># Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. LogCommandParams(sub_opts=self.sub_opts)<line_sep>self.service_account_email=<none><line_sep>self.state=<none><line_sep>self.show_all=<false><line_sep>self.long_list=<false><line_sep>self.etag=<none><if_stmt>self.sub_opts<block_start><for_stmt>o,a self.sub_opts<block_start><if_stmt>o<eq>'-u'<block_start>self.service_account_email=a<block_end><elif_stmt>o<eq>'-p'# Project IDs are sent as header values when using gs and s3 XML APIs. <block_start>InsistAscii(a 'Invalid non-ASCII character found in project ID')<line_sep>self.project_id=a<block_end><elif_stmt>o<eq>'-s'<block_start>self.state=a<block_end><elif_stmt>o<eq>'-a'<block_start>self.show_all=<true><block_end><elif_stmt>o<eq>'-l'<block_start>self.long_list=<true><block_end><elif_stmt>o<eq>'-e'<block_start>self.etag=a<block_end><block_end><block_end><if_stmt><not>self.project_id<block_start>self.project_id=PopulateProjectId(<none>)<block_end>method_for_arg={'create':self._CreateHmacKey 'delete':self._DeleteHmacKey 'get':self._GetHmacKey 'list':self._ListHmacKeys 'update':self._UpdateHmacKey }<if_stmt>self.action_subcommand<not><in>method_for_arg<block_start><raise>CommandException('Invalid subcommand "%s" for the %s command.\n'<concat>'See "gsutil help hmac".'%(self.action_subcommand self.command_name))<block_end>LogCommandParams(subcommands=[self.action_subcommand])<line_sep>method_for_arg[self.action_subcommand]()<line_sep><return>0<block_end><block_end>
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Send/Recv ops. The following _Send()/_Recv() are adapted from python op wrappers generated by python_op_gen_main. python_op_gen_main.cc's PrintAllPythonOps needs to be updated to export internal ops. """<import_from_stmt>lingvo compat<as>tf<line_sep># pylint: disable=g-direct-tensorflow-import <import_from_stmt>tensorflow.compiler.tf2xla.python xla<line_sep># pylint: enable=g-direct-tensorflow-import <def_stmt>_TpuCore device<block_start>"""Returns the TPU core represented by <device>, or -1 if not TPU."""<line_sep>prefix="device:TPU_REPLICATED_CORE:"<if_stmt>prefix<in>device<block_start><return>int(device[len(prefix):])<block_end><return>-1<block_end><class_stmt>Channel<block_start>"""A communication channel to transfer tensors in order."""<def_stmt>__init__ self dtype shape send_device recv_device name=<none><block_start>"""Construct a channel. Args: dtype: The dtype of tensors sent through the channel. shape: The shape of tensors sent through the channel. Must be a fully defined shape for TPUs. send_device: A fully-specified tensorflow device. recv_device: A fully-specified tensorflow device. name: A name for the channel (optional). """<line_sep>current_graph=tf.get_default_graph()<assert_stmt>current_graph "A channel is scoped within a tf.Graph"<line_sep>self._dtype=dtype<line_sep>self._send_device=send_device<line_sep>self._recv_device=recv_device<line_sep>self._name=current_graph.unique_name(name<if>name<else>"channel")<assert_stmt>shape<is><not><none><line_sep>shape=tf.TensorShape(shape)<line_sep>self._shape=shape<line_sep>self._send_tpu_core=_TpuCore(send_device)<line_sep>self._recv_tpu_core=_TpuCore(recv_device)<line_sep>self._send_called=<false><line_sep>self._recv_op=<none><assert_stmt>((self._send_tpu_core<eq>-1)<eq>(self._recv_tpu_core<eq>-1)) ("Mixing TPU and non-TPU: %s and %s"%(send_device recv_device))<if_stmt>self._send_tpu_core<ge>0<block_start><assert_stmt>self._shape.is_fully_defined() ("TPU channel must have fully defined shape. Name: %s, shape: %s"%(self._name self._shape))<assert_stmt>self._send_tpu_core<ne>self._recv_tpu_core ("TPU send/recv must be cross-core: %s and %s"%(send_device recv_device))<block_end><block_end><def_stmt>Send self tensor<block_start>"""Sends a tensor through the channel."""<assert_stmt>tensor.dtype<eq>self._dtype<assert_stmt><not>self._send_called ("Send called multiple times for %s"%self._name)<line_sep>self._send_called=<true><if_stmt>self._send_tpu_core<eq>-1<block_start><return>tf.raw_ops.Send(tensor=tensor tensor_name=self._name send_device=self._send_device send_device_incarnation=0 recv_device=self._recv_device)<block_end><else_stmt><block_start><with_stmt>tf.device(self._send_device)<block_start><return>xla.send(tensor tensor_name=self._name name="Send_"+self._name)<block_end><block_end><block_end><def_stmt>Recv self<block_start>"""Receives a tensor from the channel."""<if_stmt>self._send_tpu_core<eq>-1<block_start>received=tf.raw_ops.Recv(tensor_type=self._dtype tensor_name=self._name send_device=self._send_device send_device_incarnation=0 recv_device=self._recv_device)<line_sep>received.set_shape(self._shape)<line_sep><return>received<block_end><else_stmt><block_start><with_stmt>tf.device(self._recv_device)<block_start><return>xla.recv(self._dtype tensor_name=self._name shape=self._shape name="Recv_"+self._name)<block_end><block_end><block_end><block_end>
# -*- coding: utf8 -*- ######################################################################################## # This file is part of exhale. Copyright (c) 2017-2019, <NAME>. # # Full BSD 3-Clause license available here: # # # # https://github.com/svenevs/exhale/blob/master/LICENSE # ######################################################################################## ''' The ``configs`` module exists to contain the Sphinx Application configurations specific to this extension. Almost every ``global`` variable defined in this file can be modified using the ``exhale_args`` in ``conf.py``. The convention for this file is as follows: 1. Things that are **not** supposed to change, because their value is expected to be constant, are declared in ``ALL_CAPS``. See - :data:`~exhale.configs.SECTION_HEADING_CHAR` - :data:`~exhale.configs.SUB_SECTION_HEADING_CHAR` - :data:`~exhale.configs.SUB_SUB_SECTION_HEADING_CHAR` - :data:`~exhale.configs.DEFAULT_DOXYGEN_STDIN_BASE` 2. Internal / private variables that are **not** supposed to changed except for by this extension are declared as ``_lower_case_with_single_leading_underscore`` as is common in Python ;). 3. Every other variable is declared as ``camelCase``, indicating that it can be configured **indirectly** by using it as a key in the arguments to ``exhale_args`` present in your ``conf.py``. For example, one of the *required* arguments for this extension is :data:`~exhale.configs.containmentFolder`. This means that the key ``"containmentFolder"`` is *expected* to be present in ``exhale_args``. .. code-block:: py exhale_args = { "containmentFolder": "./api", # ... } Read the documentation for the various configs present to see what the various options are to modify the behavior of Exhale. '''<import_from_future_stmt> unicode_literals<import_stmt>os<import_stmt>six<import_stmt>textwrap<import_from_stmt>sphinx.errors ConfigError ExtensionError<import_from_stmt>sphinx.util logging<import_from_stmt>types FunctionType ModuleType<try_stmt># Python 2 StringIO <block_start><import_from_stmt>cStringIO StringIO<block_end><except_stmt>ImportError# Python 3 StringIO <block_start><import_from_stmt>io StringIO<block_end>logger=logging.getLogger(__name__)<line_sep>""" The |SphinxLoggerAdapter| for communicating with the sphinx build process. .. |SphinxLoggerAdapter| replace:: :class:`sphinx:sphinx.util.SphinxLoggerAdapter` """<line_sep>######################################################################################## ## # ## Required configurations, these get set indirectly via the dictionary argument # ## given to exhale in your conf.py. # ## # ######################################################################################## containmentFolder=<none><line_sep>''' **Required** The location where Exhale is going to generate all of the reStructuredText documents. **Value in** ``exhale_args`` (str) The value of key ``"containmentFolder"`` should be a string representing the (relative or absolute) path to the location where Exhale will be creating all of the files. **Relative paths are relative to the Sphinx application source directory**, which is almost always wherever the file ``conf.py`` is. .. note:: To better help you the user know what Exhale is generating (and therefore safe to delete), it is a **hard requirement** that ``containmentFolder`` is a **subdirectory** of the Sphinx Source Directory. AKA the path ``"."`` will be rejected, but the path ``"./api"`` will be accepted. The suggested value for ``"containmentFolder"`` is ``"./api"``, or ``"./source/api"`` if you have separate source and build directories with Sphinx. When the html is eventually generated, this will make for a more human friendly url being generated. .. warning:: The verbiage subdirectory means **direct** subdirectory. So the path ``"./library/api"`` will be rejected. This is because I make the assumption that ``containmentFolder`` is "owned" by Exhale / is safe to delete. '''<line_sep>rootFileName=<none><line_sep>''' **Required** The name of the file that **you** will be linking to from your reStructuredText documents. Do **not** include the ``containmentFolder`` path in this file name, Exhale will create the file ``"{contaimentFolder}/{rootFileName}"`` for you. **Value in** ``exhale_args`` (str) The value of key ``"rootFileName"`` should be a string representing the name of the file you will be including in your top-level ``toctree`` directive. In order for Sphinx to be happy, you should include a ``.rst`` suffix. All of the generated API uses reStructuredText, and that will not ever change. For example, if you specify - ``"containmentFolder" = "./api"``, and - ``"rootFileName" = "library_root.rst"`` Then exhale will generate the file ``./api/library_root.rst``. You would then include this file in a ``toctree`` directive (say in ``index.rst``) with: .. raw:: html <div class="highlight-rest"> <div class="highlight"> <pre> .. toctree:: :maxdepth: 2 about <b>api/library_root</b></pre> </div> </div> '''<line_sep>rootFileTitle=<none><line_sep>''' **Required** The title to be written at the top of ``rootFileName``, which will appear in your file including it in the ``toctree`` directive. **Value in** ``exhale_args`` (str) The value of the key ``"rootFileTitle"`` should be a string that has the title of the main library root document folder Exhale will be generating. The user is required to supply this value because its value directly affects the overall presentation of your documentation. For example, if you are including the Exhale generated library root file in your ``index.rst`` top-level ``toctree`` directive, the title you supply here will show up on both your main page, as well as in the navigation menus. An example value could be ``"Library API"``. '''<line_sep>doxygenStripFromPath=<none><line_sep>''' **Required** When building on Read the Docs, there seem to be issues regarding the Doxygen variable ``STRIP_FROM_PATH`` when built remotely. That is, it isn't stripped at all. This value enables Exhale to manually strip the path. **Value in** ``exhale_args`` (str) The value of the key ``"doxygenStripFromPath"`` should be a string representing the (relative or absolute) path to be stripped from the final documentation. As with :data:`~exhale.configs.containmentFolder`, relative paths are relative to the Sphinx source directory (where ``conf.py`` is). Consider the following directory structure:: my_project/ ├───docs/ │ conf.py │ └───include/ └───my_project/ common.hpp In this scenario, if you supplied ``"doxygenStripFromPath" = ".."``, then the file page for ``common.hpp`` would list its declaration as ``include/my_project/common.hpp``. If you instead set it to be ``"../include"``, then the file page for ``common.hpp`` would list its declaration as just ``my_project/common.hpp``. As a consequence, modification of this variable directly affects what shows up in the file view hierarchy. In the previous example, the difference would really just be whether or not all files are nestled underneath a global ``include`` folder or not. .. warning:: It is **your** responsibility to ensure that the value you provide for this configuration is valid. The file view hierarchy will almost certainly break if you give nonsense. .. note:: Depending on your project layout, some links may be broken in the above example if you use ``"../include"`` that work when you use ``".."``. To get your docs working, revert to ``".."``. If you're feeling nice, raise an issue on GitHub and let me know --- I haven't been able to track this one down yet :/ Particularly, this seems to happen with projects that have duplicate filenames in different folders, e.g.:: include/ └───my_project/ │ common.hpp │ └───viewing/ common.hpp '''<line_sep>######################################################################################## ## # ## Additional configurations available to further customize the output of exhale. # ## # ######################################################################################## # Build Process Logging, Colors, and Debugging # ######################################################################################## verboseBuild=<false><line_sep>''' **Optional** If you are having a hard time getting documentation to build, or say hierarchies are not appearing as they should be, set this to ``True``. **Value in** ``exhale_args`` (bool) Set the boolean value to be ``True`` to include colorized printing at various stages of the build process. .. warning:: There is only one level of verbosity: excessively verbose. **All logging is written to** ``sys.stderr``. See :data:`~exhale.configs.alwaysColorize`. .. tip:: Looking at the actual code of Exhale trying to figure out what is going on? All logging sections have a comment ``# << verboseBuild`` just before the logging section. So you can ``grep -r '# << verboseBuild' exhale/`` if you're working with the code locally. '''<line_sep>alwaysColorize=<true><line_sep>''' **Optional** Exhale prints various messages throughout the build process to both ``sys.stdout`` and ``sys.stderr``. The default behavior is to colorize output always, regardless of if the output is being directed to a file. This is because you can simply use ``cat`` or ``less -R``. By setting this to ``False``, when redirecting output to a file the color will not be included. **Value in** ``exhale_args`` (bool) The default is ``True`` because I find color to be something developers should embrace. Simply use ``less -R`` to view colorized output conveniently. While I have a love of all things color, I understand you may not. So just set this to ``False``. .. note:: There is not and will never be a way to remove the colorized logging from the console. This only controls when ``sys.stdout`` and ``sys.stderr`` are being redirected to a file. '''<line_sep>generateBreatheFileDirectives=<false><line_sep>''' **Optional** Append the ``.. doxygenfile::`` directive from Breathe for *every* file page generated in the API. **Value in** ``exhale_args`` (bool) If True, then the breathe directive (``doxygenfile``) will be incorporated at the bottom of the file. .. danger:: **This feature is not intended for production release of pages, only debugging.** This feature is "deprecated" in lieu of minimal parsing of the input Doxygen xml for a given documented file. This feature can be used to help determine if Exhale has made a mistake in parsing the file level documentation, but usage of this feature will create **many** duplicate id's and the Sphinx build process will be littered with complaints. **Usage of this feature will completely dismantle the links coordinated in all parts of Exhale**. Because duplicate id's are generated, Sphinx chooses where to link to. It seems to reliably choose the links generated by the Breathe File directive, meaning the majority of the navigational setup of Exhale is pretty much invalidated. '''<line_sep>######################################################################################## # Root API Document Customization and Treeview # ######################################################################################## afterTitleDescription=<none><line_sep>''' **Optional** Provide a description to appear just after :data:`~exhale.configs.rootFileTitle`. **Value in** ``exhale_args`` (str) If you want to provide a brief summary of say the layout of the API, or call attention to specific classes, functions, etc, use this. For example, if you had Python bindings but no explicit documentation for the Python side of the API, you could use something like .. code-block:: py exhale_args = { # ... other required arguments... "rootFileTitle": "Library API", "afterTitleDescription": textwrap.dedent(\'\'\' .. note:: The following documentation presents the C++ API. The Python API generally mirrors the C++ API, but some methods may not be available in Python or may perform different actions. \'\'\') } '''<line_sep>afterHierarchyDescription=<none><line_sep>''' **Optional** Provide a description that appears after the Class and File hierarchies, but before the full (and usually very long) API listing. **Value in** ``exhale_args`` (str) Similar to :data:`~exhale.configs.afterTitleDescription`, only it is included in the middle of the document. '''<line_sep>fullApiSubSectionTitle="Full API"<line_sep>''' **Optional** The title for the subsection that comes after the Class and File hierarchies, just before the enumeration of the full API. **Value in** ``exhale_args`` (str) The default value is simply ``"Full API"``. Change this to be something else if you so desire. '''<line_sep>afterBodySummary=<none><line_sep>''' **Optional** Provide a summary to be included at the bottom of the root library file. **Value in** ``exhale_args`` (str) Similar to :data:`~exhale.configs.afterTitleDescription`, only it is included at the bottom of the document. .. note:: The root library document generated can be quite long, depending on your framework. Important notes to developers should be included at the top of the file using :data:`~exhale.configs.afterTitleDescription`, or after the hierarchies using :data:`~exhale.configs.afterHierarchyDescription`. '''<line_sep>fullToctreeMaxDepth=5<line_sep>''' **Optional** The generated library root document performs ``.. include:: unabridged_api.rst`` at the bottom, after the Class and File hierarchies. Inside ``unabridged_api.rst``, every generated file is included using a ``toctree`` directive to prevent Sphinx from getting upset about documents not being included. This value controls the ``:maxdepth:`` for all of these ``toctree`` directives. **Value in** ``exhale_args`` (int) The default value is ``5``, but you may want to give a smaller value depending on the framework being documented. .. warning:: This value must be greater than or equal to ``1``. You are advised not to use a value greater than ``5``. '''<line_sep>listingExclude=[]<line_sep>''' **Optional** A list of regular expressions to exclude from both the class hierarchy and namespace page enumerations. This can be useful when you want to keep the listings for the hierarchy / namespace pages more concise, but **do** ultimately want the excluded items documented somewhere. Nodes whose ``name`` (fully qualified, e.g., ``namespace::ClassName``) matches any regular expression supplied here will: 1. Exclude this item from the class view hierarchy listing. 2. Exclude this item from the defining namespace's listing (where applicable). 3. The "excluded" item will still have it's own documentation **and** be linked in the "full API listing", as well as from the file page that defined the compound (if recovered). Otherwise Sphinx will explode with warnings about documents not being included in any ``toctree`` directives. This configuration variable is **one size fits all**. It was created as a band-aid fix for PIMPL frameworks. .. todo:: More fine-grained control will be available in the pickleable writer API sometime in Exhale 1.x. .. note:: If you want to skip documentation of a compound in your framework *entirely*, this configuration variable is **not** where you do it. See :ref:`Doxygen PREDEFINED <doxygen_predefined>` for information on excluding compounds entirely using the doxygen preprocessor. **Value in** ``exhale_args`` (list) The list can be of variable types, but each item will be compiled into an internal list using :func:`python:re.compile`. The arguments for ``re.compile(pattern, flags=0)`` should be specified in order, but for convenience if no ``flags`` are needed for your use case you can just specify a string. For example: .. code-block:: py exhale_args = { # These two patterns should be equitable for excluding PIMPL # objects in a framework that uses the ``XxxImpl`` naming scheme. "listingExclude": [r".*Impl$", (r".*impl$", re.IGNORECASE)] } Each item in ``listingExclude`` may either be a string (the regular expression pattern), or it may be a length two iterable ``(string pattern, int flags)``. '''<line_sep># Compiled regular expressions from listingExclude # TODO: moves into config object _compiled_listing_exclude=[]<line_sep>unabridgedOrphanKinds={"dir" "file"}<line_sep>""" **Optional** The list of node kinds to **exclude** from the unabridged API listing beneath the class and file hierarchies. **Value in** ``exhale_args`` (list or set of strings) The list of kinds (see :data:`~exhale.utils.AVAILABLE_KINDS`) that will **not** be included in the unabridged API listing. The default is to exclude directories and files (which are already in the file hierarchy). Note that if this variable is provided, it will overwrite the default ``{"dir", "file"}``, meaning if you want to exclude something in addition you need to include ``"dir"`` and ``"file"``: .. code-block:: py # In conf.py exhale_args = { # Case 1: _only_ exclude union "unabridgedOrphanKinds": {"union"} # Case 2: exclude union in addition to dir / file. "unabridgedOrphanKinds": {"dir", "file", "union"} } .. tip:: See :data:`~exhale.configs.fullToctreeMaxDepth`, users seeking to reduce the length of the unabridged API should set this value to ``1``. .. warning:: If **either** ``"class"`` **or** ``"struct"`` appear in ``unabridgedOrphanKinds`` then **both** will be excluded. The unabridged API will present classes and structs together. """<line_sep>######################################################################################## # Clickable Hierarchies <3 # ######################################################################################## createTreeView=<false><line_sep>''' **Optional** When set to ``True``, clickable hierarchies for the Class and File views will be generated. **Set this variable to** ``True`` **if you are generating html** output for much more attractive websites! **Value in** ``exhale_args`` (bool) When set to ``False``, the Class and File hierarches are just reStructuredText bullet lists. This is rather unattractive, but the default of ``False`` is to hopefully enable non-html writers to still be able to use ``exhale``. .. tip:: Using ``html_theme = "bootstrap"`` (the `Sphinx Bootstrap Theme`__)? Make sure you set :data:`~exhale.configs.treeViewIsBootstrap` to ``True``! __ https://ryan-roemer.github.io/sphinx-bootstrap-theme/ '''<line_sep>minifyTreeView=<true><line_sep>''' **Optional** When set to ``True``, the generated html and/or json for the class and file hierarchy trees will be minified. **Value in** ``exhale_args`` (bool) The default value is ``True``, which should help page load times for larger APIs. Setting to ``False`` should only really be necessary if there is a problem -- the minified version will be hard to parse as a human. '''<line_sep>treeViewIsBootstrap=<false><line_sep>''' **Optional** If the generated html website is using ``bootstrap``, make sure to set this to ``True``. The `Bootstrap Treeview`__ library will be used. __ http://jonmiles.github.io/bootstrap-treeview/ **Value in** ``exhale_args`` (bool) When set to ``True``, the clickable hierarchies will be generated using a Bootstrap friendly library. '''<line_sep>treeViewBootstrapTextSpanClass="text-muted"<line_sep>''' **Optional** What **span** class to use for the *qualifying* text after the icon, but before the hyperlink to the actual documentation page. For example, ``Struct Foo`` in the hierarchy would have ``Struct`` as the *qualifying* text (controlled by this variable), and ``Foo`` will be a hyperlink to ``Foo``'s actual documentation. **Value in** ``exhale_args`` (str) A valid class to apply to a ``span``. The actual HTML being generated is something like: .. code-block:: html <span class="{span_cls}">{qualifier}</span> {hyperlink text} So if the value of this input was ``"text-muted"``, and it was the hierarchy element for ``Struct Foo``, it would be .. code-block:: html <span class="text-muted">Struct</span> Foo The ``Foo`` portion will receive the hyperlink styling elsewhere. .. tip:: Easy choices to consider are the `contextual classes`__ provided by your bootstrap theme. Alternatively, add your own custom stylesheet to Sphinx directly and create a class with the color you want there. __ https://getbootstrap.com/docs/3.3/css/#helper-classes-colors .. danger:: No validity checks are performed. If you supply a class that cannot be used, there is no telling what will happen. '''<line_sep>treeViewBootstrapIconMimicColor="text-muted"<line_sep>''' **Optional** The **paragraph** CSS class to *mimic* for the icon color in the tree view. **Value in** ``exhale_args`` (str) This value must be a valid CSS class for a **paragraph**. The way that it is used is in JavaScript, on page-load, a "fake paragraph" is inserted with the class specified by this variable. The color is extracted, and then a force-override is applied to the page's stylesheet. This was necessary to override some aspects of what the ``bootstrap-treeview`` library does. It's full usage looks like this: .. code-block:: js /* Inspired by very informative answer to get color of links: https://stackoverflow.com/a/2707837/3814202 */ /* vvvvvvvvvv what you give */ var $fake_p = $('<p class="icon_mimic"></p>').hide().appendTo("body"); /* ^^^^^^^^^^ */ var iconColor = $fake_p.css("color"); $fake_p.remove(); /* later on */ // Part 2: override the style of the glyphicons by injecting some CSS $('<style type="text/css" id="exhaleTreeviewOverride">' + ' .treeview span[class~=icon] { ' + ' color: ' + iconColor + ' ! important;' + ' }' + '</style>').appendTo('head'); .. tip:: Easy choices to consider are the `contextual classes`__ provided by your bootstrap theme. Alternatively, add your own custom stylesheet to Sphinx directly and create a class with the color you want there. __ https://getbootstrap.com/docs/3.3/css/#helper-classes-colors .. danger:: No validity checks are performed. If you supply a class that cannot be used, there is no telling what will happen. '''<line_sep>treeViewBootstrapOnhoverColor="#F5F5F5"<line_sep>''' **Optional** The hover color for elements in the hierarchy trees. Default color is a light-grey, as specified by default value of ``bootstrap-treeview``'s `onhoverColor`_. *Value in** ``exhale_args`` (str) Any valid color. See `onhoverColor`_ for information. .. _onhoverColor: https://github.com/jonmiles/bootstrap-treeview#onhovercolor '''<line_sep>treeViewBootstrapUseBadgeTags=<true><line_sep>''' **Optional** When set to ``True`` (default), a Badge indicating the number of nested children will be included **when 1 or more children are present**. When enabled, each node in the json data generated has it's `tags`_ set, and the global `showTags`_ option is set to ``true``. .. _tags: https://github.com/jonmiles/bootstrap-treeview#tags .. _showTags: https://github.com/jonmiles/bootstrap-treeview#showtags **Value in** ``exhale_args`` (bool) Set to ``False`` to exclude the badges. Search for ``Tags as Badges`` on the `example bootstrap treeview page`__, noting that if a given node does not have any children, no badge will be added. This is simply because a ``0`` badge is likely more confusing than helpful. __ http://jonmiles.github.io/bootstrap-treeview/ '''<line_sep>treeViewBootstrapExpandIcon="glyphicon glyphicon-plus"<line_sep>''' **Optional** Global setting for what the "expand" icon is for the bootstrap treeview. The default value here is the default of the ``bootstrap-treeview`` library. **Value in** ``exhale_args`` (str) See the `expandIcon`_ description of ``bootstrap-treeview`` for more information. .. _expandIcon: https://github.com/jonmiles/bootstrap-treeview#expandicon .. note:: Exhale handles wrapping this in quotes, you just need to specify the class (making sure that it has spaces where it should). Exhale does **not** perform any validity checks on the value of this variable. For example, you could use something like: .. code-block:: py exhale_args = { # ... required / other optional args ... # you can set one, both, or neither. just showing both in same example # set the icon to show it can be expanded "treeViewBootstrapExpandIcon": "glyphicon glyphicon-chevron-right", # set the icon to show it can be collapsed "treeViewBootstrapCollapseIcon": "glyphicon glyphicon-chevron-down" } '''<line_sep>treeViewBootstrapCollapseIcon="glyphicon glyphicon-minus"<line_sep>''' **Optional** Global setting for what the "collapse" icon is for the bootstrap treeview. The default value here is the default of the ``bootstrap-treeview`` library. **Value in** ``exhale_args`` (str) See the `collapseIcon`_ description of ``bootstrap-treeview`` for more information. See :data:`~exhale.configs.treeViewBootstrapExpandIcon` for how to specify this CSS class value. .. _collapseIcon: https://github.com/jonmiles/bootstrap-treeview#collapseicon '''<line_sep>treeViewBootstrapLevels=1<line_sep>''' **Optional** The default number of levels to expand on page load. Note that the ``bootstrap-treeview`` default `levels`_ value is ``2``. ``1`` seems like a safer default for Exhale since the value you choose here largely depends on how you have structured your code. .. _levels: https://github.com/jonmiles/bootstrap-treeview#levels **Value in** ``exhale_args`` (int) An integer representing the number of levels to expand for **both** the Class and File hierarchies. **This value should be greater than or equal to** ``1``, but **no validity checks are performed** on your input. Buyer beware. '''<line_sep>_class_hierarchy_id="class-treeView"<line_sep>''' The ``id`` attribute of the HTML element associated with the **Class** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True``. 1. When :data:`~exhale.configs.treeViewIsBootstrap` is ``False``, this ``id`` is attached to the outer-most ``ul``. 2. For bootstrap, an empty ``div`` is inserted with this ``id``, which will be the anchor point for the ``bootstrap-treeview`` library. '''<line_sep>_file_hierarchy_id="file-treeView"<line_sep>''' The ``id`` attribute of the HTML element associated with the **Class** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True``. 1. When :data:`~exhale.configs.treeViewIsBootstrap` is ``False``, this ``id`` is attached to the outer-most ``ul``. 2. For bootstrap, an empty ``div`` is inserted with this ``id``, which will be the anchor point for the ``bootstrap-treeview`` library. '''<line_sep>_bstrap_class_hierarchy_fn_data_name="getClassHierarchyTree"<line_sep>''' The name of the JavaScript function that returns the ``json`` data associated with the **Class** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True`` **and** :data:`~exhale.configs.treeViewIsBootstrap` is ``True``. '''<line_sep>_bstrap_file_hierarchy_fn_data_name="getFileHierarchyTree"<line_sep>''' The name of the JavaScript function that returns the ``json`` data associated with the **File** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True`` **and** :data:`~exhale.configs.treeViewIsBootstrap` is ``True``. '''<line_sep>######################################################################################## # Page Level Customization # ######################################################################################## includeTemplateParamOrderList=<false><line_sep>''' **Optional** For Classes and Structs (only), Exhale can provide a numbered list enumeration displaying the template parameters in the order they should be specified. **Value in** ``exhale_args`` (bool) This feature can be useful when you have template classes that have **many** template parameters. The Breathe directives **will** include the parameters in the order they should be given. However, if you have a template class with more than say 5 parameters, it can become a little hard to read. .. note:: This configuration is all or nothing, and applies to every template Class / Struct. Additionally, **no** ``tparam`` documentation is displayed with this listing. Just the types / names they are declared as (and default values if provided). This feature really only exists as a historical accident. .. warning:: As a consequence of the (hacky) implementation, if you use this feature you commit to HTML output only. Where applicable, template parameters that generate links to other items being documented **only** work in HTML. '''<line_sep>pageLevelConfigMeta=<none><line_sep>''' **Optional** reStructuredText allows you to employ page-level configurations. These are included at the top of the page, before the title. **Value in** ``exhale_args`` (str) An example of one such feature would be ``":tocdepth: 5"``. To be honest, I'm not sure why you would need this feature. But it's easy to implement, you just need to make sure that you provide valid reStructuredText or *every* page will produce errors. See the `Field Lists`__ guide for more information. __ https://www.sphinx-doc.org/en/master/usage/restructuredtext/field-lists.html '''<line_sep>repoRedirectURL=<none><line_sep>''' .. todo:: **This feature is NOT implemented yet**! Hopefully soon. It definitely gets under my skin. It's mostly documented just to show up in the ``todolist`` for me ;) **Optional** When using the Sphinx RTD theme, there is a button placed in the top-right saying something like "Edit this on GitHub". Since the documents are all being generated dynamically (and not supposed to be tracked by ``git``), the links all go nowhere. Set this so Exhale can try and fix this. **Value in** ``exhale_args`` (str) The url of the repository your documentation is being generated from. .. warning:: Seriously this isn't implemented. I may not even need this from you. The harder part is figuring out how to map a given nodes "``def_in_file``" to the correct URL. I should be able to get the URL from ``git remote`` and construct the URL from that and ``git branch``. Probably just some path hacking with ``git rev-parse --show-toplevel`` and comparing that to :data:`~exhale.configs.doxygenStripFromPath`? Please feel free to `add your input here`__. __ https://github.com/svenevs/exhale/issues/2 '''<line_sep># Using Contents Directives ############################################################ contentsDirectives=<true><line_sep>''' **Optional** Include a ``.. contents::`` directive beneath the title on pages that have potential to link to a decent number of documents. **Value in** ``exhale_args`` (bool) By default, Exhale will include a ``.. contents::`` directive on the individual generated pages for the types specified by :data:`~exhale.configs.kindsWithContentsDirectives`. Set this to ``False`` to disable globally. See the :ref:`using_contents_directives` section for all pieces of the puzzle. '''<line_sep>contentsTitle="Contents"<line_sep>''' **Optional** The title of the ``.. contents::`` directive for an individual file page, when it's ``kind`` is in the list specified by :data:`~exhale.configs.kindsWithContentsDirectives` **and** :data:`~exhale.configs.contentsDirectives` is ``True``. **Value in** ``exhale_args`` (str) The default (for both Exhale and reStructuredText) is to label this as ``Contents``. You can choose whatever value you like. If you prefer to have **no title** for the ``.. contents::`` directives, **specify the empty string**. .. note:: Specifying the empty string only removes the title **when** ``":local:"`` **is present in** :data:`~exhale.configs.contentsSpecifiers`. See the :ref:`using_contents_directives` section for more information. '''<line_sep>contentsSpecifiers=[":local:" ":backlinks: none"]<line_sep>''' **Optional** The specifications to apply to ``.. contents::`` directives for the individual file pages when it's ``kind`` is in the list specified by :data:`~exhale.configs.kindsWithContentsDirectives` **and** :data:`~exhale.configs.contentsDirectives` is ``True``. **Value in** ``exhale_args`` (list) A (one-dimensional) list of strings that will be applied to any ``.. contents::`` directives generated. Provide the **empty list** if you wish to have no specifiers added to these directives. See the :ref:`using_contents_directives` section for more information. '''<line_sep>kindsWithContentsDirectives=["file" "namespace"]<line_sep>''' **Optional** The kinds of compounds that will include a ``.. contents::`` directive on their individual library page. The default is to generate one for Files and Namespaces. Only takes meaning when :data:`~exhale.configs.contentsDirectives` is ``True``. **Value in** ``exhale_args`` (list) Provide a (one-dimensional) ``list`` or ``tuple`` of strings of the kinds of compounds that should include a ``.. contents::`` directive. Each kind given must one of the entries in :data:`~exhale.utils.AVAILABLE_KINDS`. For example, if you wanted to enable Structs and Classes as well you would do something like: .. code-block:: py # in conf.py exhale_args = { # ... required / optional args ... "kindsWithContentsDirectives": ["file", "namespace", "class", "struct"] } .. note:: This is a "full override". So if you want to still keep the defaults of ``"file"`` and ``"namespace"``, **you** must include them yourself. '''<line_sep>######################################################################################## # Breathe Customization # ######################################################################################## customSpecificationsMapping=<none><line_sep>''' **Optional** See the :ref:`usage_customizing_breathe_output` section for how to use this. **Value in** ``exhale_args`` (dict) The dictionary produced by calling :func:`~exhale.utils.makeCustomSpecificationsMapping` with your custom function. '''<line_sep>_closure_map_sanity_check="blargh_BLARGH_blargh"<line_sep>''' See :func:`~exhale.utils.makeCustomSpecificationsMapping` implementation, this is inserted to help enforce that Exhale made the dictionary going into :data:`~exhale.configs.customSpecificationsMapping`. '''<line_sep>######################################################################################## # Doxygen Execution and Customization # ######################################################################################## _doxygen_xml_output_directory=<none><line_sep>''' The absolute path the the root level of the doxygen xml output. If the path to the ``index.xml`` file created by doxygen was ``./doxyoutput/xml/index.xml``, then this would simply be ``./doxyoutput/xml``. .. note:: This is the exact same path as ``breathe_projects[breathe_default_project]``, only it is an absolute path. '''<line_sep>exhaleExecutesDoxygen=<false><line_sep>''' **Optional** Have Exhale launch Doxygen when you execute ``make html``. **Value in** ``exhale_args`` (bool) Set to ``True`` to enable launching Doxygen. You must set either :data:`~exhale.configs.exhaleUseDoxyfile` or :data:`~exhale.configs.exhaleDoxygenStdin`. '''<line_sep>exhaleUseDoxyfile=<false><line_sep>''' **Optional** If :data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``, this tells Exhale to use your own ``Doxyfile``. The encouraged approach is to use :data:`~exhale.configs.exhaleDoxygenStdin`. **Value in** ``exhale_args`` (bool) Set to ``True`` to have Exhale use your ``Doxyfile``. .. note:: The ``Doxyfile`` must be in the **same** directory as ``conf.py``. Exhale will change directories to here before launching Doxygen when you have separate source and build directories for Sphinx configured. .. warning:: No sanity checks on the ``Doxyfile`` are performed. If you are using this option you need to verify two parameters in particular: 1. ``OUTPUT_DIRECTORY`` is configured so that ``breathe_projects[breathe_default_project]`` agrees. See the :ref:`Mapping of Project Names to Doxygen XML Output Paths <breathe_project>` section. 2. ``STRIP_FROM_PATH`` is configured to be identical to what is specified with :data:`~exhale.configs.doxygenStripFromPath`. I have no idea what happens when these conflict, but it likely will never result in valid documentation. '''<line_sep>exhaleDoxygenStdin=<none><line_sep>''' **Optional** If :data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``, this tells Exhale to use the (multiline string) value specified in this argument *in addition to* the :data:`~exhale.configs.DEFAULT_DOXYGEN_STDIN_BASE`. **Value in** ``exhale_args`` (str) This string describes your project's specific Doxygen configurations. At the very least, it must provide ``INPUT``. See the :ref:`usage_exhale_executes_doxygen` section for how to use this in conjunction with the default configurations, as well as how to override them. '''<line_sep>DEFAULT_DOXYGEN_STDIN_BASE=textwrap.dedent(r''' # If you need this to be YES, exhale will probably break. CREATE_SUBDIRS = NO # So that only Doxygen does not trim paths, which affects the File hierarchy FULL_PATH_NAMES = YES # Nested folders will be ignored without this. You may not need it. RECURSIVE = YES # Set to YES if you are debugging or want to compare. GENERATE_HTML = NO # Unless you want it... GENERATE_LATEX = NO # Both breathe and exhale need the xml. GENERATE_XML = YES # Set to NO if you do not want the Doxygen program listing included. XML_PROGRAMLISTING = YES # Allow for rst directives and advanced functions e.g. grid tables ALIASES = "rst=\verbatim embed:rst:leading-asterisk" ALIASES += "endrst=\endverbatim" # Enable preprocessing and related preprocessor necessities ENABLE_PREPROCESSING = YES MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = NO SKIP_FUNCTION_MACROS = NO # extra defs for to help with building the _right_ version of the docs PREDEFINED = DOXYGEN_DOCUMENTATION_BUILD PREDEFINED += DOXYGEN_SHOULD_SKIP_THIS ''')<line_sep>''' These are the default values sent to Doxygen along stdin when :data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``. This is sent to Doxygen immediately **before** the :data:`~exhale.configs.exhaleDoxygenStdin` provided to ``exhale_args`` in your ``conf.py``. In this way, you can override any of the specific defaults shown here. .. tip:: See the documentation for :data:`~exhale.configs.exhaleDoxygenStdin`, as well as :data:`~exhale.configs.exhaleUseDoxyfile`. Only **one** may be provided to the ``exhale_args`` in your ``conf.py``. .. include:: ../DEFAULT_DOXYGEN_STDIN_BASE_value.rst '''<line_sep>exhaleSilentDoxygen=<false><line_sep>''' **Optional** When set to ``True``, the Doxygen output is omitted from the build. **Value in** ``exhale_args`` (bool) Documentation generation can be quite verbose, especially when running both Sphinx and Doxygen in the same process. Use this to silence Doxygen. .. danger:: You are **heavily** discouraged from setting this to ``True``. Many problems that may arise through either Exhale or Breathe are because the Doxygen documentation itself has errors. It will be much more difficult to find these when you squelch the Doxygen output. The reason you would do this is for actual limitations on your specific ``stdout`` (e.g. you are getting a buffer maxed out). The likelihood of this being a problem for you is exceptionally small. '''<line_sep>######################################################################################## # Programlisting Customization # ######################################################################################## lexerMapping={}<line_sep>''' **Optional** When specified, and ``XML_PROGRAMLISTING`` is set to ``YES`` in Doxygen (either via your ``Doxyfile`` or :data:`exhaleDoxygenStdin <exhale.configs.exhaleDoxygenStdin>`), this mapping can be used to customize / correct the Pygments lexer used for the program listing page generated for files. Most projects will **not** need to use this setting. **Value in** ``exhale_args`` (dict) The keys and values are both strings. Each key is a regular expression that will be used to check with :func:`python:re.match`, noting that the primary difference between :func:`python:re.match` and :func:`python:re.search` that you should be aware of is that ``match`` searches from the **beginning** of the string. Each value should be a **valid** `Pygments lexer <http://pygments.org/docs/lexers/>`_. Example usage: .. code-block:: py exhale_args { # ... "lexerMapping": { r".*\.cuh": "cuda", r"path/to/exact_filename\.ext": "c" } } .. note:: The pattern is used to search the full path of a file, **as represented in Doxygen**. This is so that duplicate file names in separate folders can be distinguished if needed. The file path as represented in Doxygen is defined by the path to the file, with some prefix stripped out. The prefix stripped out depends entirely on what you provided to :data:`doxygenStripFromPath <exhale.configs.doxygenStripFromPath>`. .. tip:: This mapping is used in :func:`utils.doxygenLanguageToPygmentsLexer <exhale.utils.doxygenLanguageToPygmentsLexer>`, when provided it is queried first. If you are trying to get program listings for a file that is otherwise not supported directly by Doxygen, you typically want to tell Doxygen to interpret the file as a different language. Take the CUDA case. In my input to :data:`exhaleDoxygenStdin <exhale.configs.exhaleDoxygenStdin>`, I will want to set both ``FILE_PATTERNS`` and append to ``EXTENSION_MAPPING``: .. code-block:: make FILE_PATTERNS = *.hpp *.cuh EXTENSION_MAPPING += cuh=c++ By setting ``FILE_PATTERNS``, Doxygen will now try and process ``*.cuh`` files. By *appending* to ``EXTENSION_MAPPING``, it will treat ``*.cuh`` as C++ files. For CUDA, this is a reasonable choice because Doxygen is generally able to parse the file as C++ and get everything right in terms of member definitions, docstrings, etc. **However**, now the XML generated by doxygen looks like this: .. code-block:: xml <!-- >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> vvv --> <compounddef id="bilateral__filter_8cuh" kind="file" language="C++"> So Exhale would be default put the program listing in a ``.. code-block:: cpp``. By setting this variable in ``exhale_args``, you can bypass this and get the desired lexer of your choice. Some important notes for those not particularly comfortable or familiar with regular expressions in python: 1. Note that each key defines a *raw* string (prefix with ``r``): ``r"pattern"``. This is not entirely necessary for this case, but using raw strings makes it so that you do not have to escape as many things. It's a good practice to adopt, but for these purposes should not matter all that much. 2. Note the escaped ``.`` character. This means find the literal ``.``, rather than the regular expression wildcard for *any character*. Observe the difference with and without: .. code-block:: pycon >>> import re >>> if re.match(r".*.cuh", "some_filecuh.hpp"): print("Oops!") ... Oops! >>> if re.match(r".*\.cuh", "some_filecuh.hpp"): print("Oops!") ... >>> Without ``\.``, the ``.cuh`` matches ``ecuh`` since ``.`` is a wildcard for *any* character. You may also want to use ``$`` at the end of the expression if there are multiple file extensions involved: ``r".*\.cuh$"``. The ``$`` states "end-of-pattern", which in the usage of Exhale means end of line (the compiled regular expressions are not compiled with :data:`python:re.MULTILINE`). 3. Take special care at the beginning of your regular expression. The pattern ``r"*\.cuh"`` does **not** compile! You need to use ``r".*\.cuh"``, with the leading ``.`` being required. '''<line_sep>_compiled_lexer_mapping={}<line_sep>''' Internal mapping of compiled regular expression objects to Pygments lexer strings. This dictionary is created by compiling every key in :data:`lexerMapping <exhale.configs.lexerMapping>`. See implementation of :func:`utils.doxygenLanguageToPygmentsLexer <exhale.utils.doxygenLanguageToPygmentsLexer>` for usage. '''<line_sep>######################################################################################## ## # ## Utility variables. # ## # ######################################################################################## SECTION_HEADING_CHAR="="<line_sep>''' The restructured text H1 heading character used to underline sections. '''<line_sep>SUB_SECTION_HEADING_CHAR="-"<line_sep>''' The restructured text H2 heading character used to underline subsections. '''<line_sep>SUB_SUB_SECTION_HEADING_CHAR="*"<line_sep>''' The restructured text H3 heading character used to underline sub-subsections. '''<line_sep>MAXIMUM_FILENAME_LENGTH=255<line_sep>''' When a potential filename is longer than ``255``, a sha1 sum is used to shorten. Note that there is no ubiquitous and reliable way to query this information, as it depends on both the operating system, filesystem, **and** even the location (directory path) the file would be generated to (depending on the filesystem). As such, a conservative value of ``255`` should guarantee that the desired filename can always be created. '''<line_sep>MAXIMUM_WINDOWS_PATH_LENGTH=260<line_sep>r''' The file path length on Windows cannot be greater than or equal to ``260`` characters. Since Windows' pathetically antiquated filesystem cannot handle this, they have enabled a "magic" prefix they call an *extended-length path*. This is achieved by inserting the prefix ``\\?\`` which allows you to go up to a maximum path of ``32,767`` characters **but you may only do this for absolute paths**. See `Maximum Path Length Limitation`__ for more information. Dear Windows, did you know it is the 21st century? __ https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation '''<line_sep>_the_app=<none><line_sep>''' The Sphinx ``app`` object. Currently unused, saved for availability in future. '''<line_sep>_app_src_dir=<none><line_sep>''' **Do not modify**. The location of ``app.srcdir`` of the Sphinx application, once the build process has begun to execute. Saved to be able to run a few different sanity checks in different places. '''<line_sep>_on_rtd=os.environ.get('READTHEDOCS' <none>)<eq>'True'<line_sep>''' **Do not modify**. Signals whether or not the build is taking place on ReadTheDocs. If it is, then colorization of output is disabled, as well as the Doxygen output (where applicable) is directed to ``/dev/null`` as capturing it can cause the ``subprocess`` buffers to overflow. '''<line_sep>######################################################################################## ## # ## Secondary Sphinx Entry Point # ## Called from exhale/__init__.py:environment_ready during the sphinx build process. # ## # ######################################################################################## <def_stmt>apply_sphinx_configurations app<block_start>''' This method applies the various configurations users place in their ``conf.py``, in the dictionary ``exhale_args``. The error checking seems to be robust, and borderline obsessive, but there may very well be some glaring flaws. When the user requests for the ``treeView`` to be created, this method is also responsible for adding the various CSS / JavaScript to the Sphinx Application to support the hierarchical views. .. danger:: This method is **not** supposed to be called directly. See ``exhale/__init__.py`` for how this function is called indirectly via the Sphinx API. **Parameters** ``app`` (:class:`sphinx.application.Sphinx`) The Sphinx Application running the documentation build. '''<line_sep># Import local to function to prevent circular imports elsewhere in the framework. <import_from_stmt>. deploy<import_from_stmt>. utils<line_sep>#################################################################################### # Make sure they have the `breathe` configs setup in a way that we can use them. # #################################################################################### # Breathe allows users to have multiple projects to configure in one `conf.py` # A dictionary of keys := project names, values := path to Doxygen xml output dir breathe_projects=app.config.breathe_projects<if_stmt><not>breathe_projects<block_start><raise>ConfigError("You must set the `breathe_projects` in `conf.py`.")<block_end><elif_stmt>type(breathe_projects)<is><not>dict<block_start><raise>ConfigError("The type of `breathe_projects` in `conf.py` must be a dictionary.")<block_end># The breathe_default_project is required by `exhale` to determine where to look for # the doxygen xml. # # TODO: figure out how to allow multiple breathe projects? breathe_default_project=app.config.breathe_default_project<if_stmt><not>breathe_default_project<block_start><raise>ConfigError("You must set the `breathe_default_project` in `conf.py`.")<block_end><elif_stmt><not>isinstance(breathe_default_project six.string_types)<block_start><raise>ConfigError("The type of `breathe_default_project` must be a string.")<block_end><if_stmt>breathe_default_project<not><in>breathe_projects<block_start><raise>ConfigError("The given breathe_default_project='{0}' was not a valid key in `breathe_projects`:\n{1}".format(breathe_default_project breathe_projects))<block_end># Grab where the Doxygen xml output is supposed to go, make sure it is a string, # defer validation of existence until after potentially running Doxygen based on # the configs given to exhale doxy_xml_dir=breathe_projects[breathe_default_project]<if_stmt><not>isinstance(doxy_xml_dir six.string_types)<block_start><raise>ConfigError("The type of `breathe_projects[breathe_default_project]` from `conf.py` was not a string.")<block_end># Make doxy_xml_dir relative to confdir (where conf.py is) <if_stmt><not>os.path.isabs(doxy_xml_dir)<block_start>doxy_xml_dir=os.path.abspath(os.path.join(app.confdir doxy_xml_dir))<block_end>#################################################################################### # Initial sanity-check that we have the arguments needed. # #################################################################################### exhale_args=app.config.exhale_args<if_stmt><not>exhale_args<block_start><raise>ConfigError("You must set the `exhale_args` dictionary in `conf.py`.")<block_end><elif_stmt>type(exhale_args)<is><not>dict<block_start><raise>ConfigError("The type of `exhale_args` in `conf.py` must be a dictionary.")<block_end>#################################################################################### # In order to be able to loop through things below, we want to grab the globals # # dictionary (rather than needing to do `global containmentFolder` etc for every # # setting that is being changed). # #################################################################################### configs_globals=globals()<line_sep># Used for internal verification of available keys keys_available=[]<line_sep># At the end of input processing, fail out if unrecognized keys were found. keys_processed=[]<line_sep>#################################################################################### # Gather the mandatory input for exhale. # #################################################################################### key_error="Did not find required key `{key}` in `exhale_args`."<line_sep>val_error="The type of the value for key `{key}` must be `{exp}`, but was `{got}`."<line_sep>req_kv=[("containmentFolder" six.string_types <true>) ("rootFileName" six.string_types <false>) ("rootFileTitle" six.string_types <false>) ("doxygenStripFromPath" six.string_types <true>)]<for_stmt>key,expected_type,make_absolute req_kv# Used in error checking later <block_start>keys_available.append(key)<line_sep># Make sure we have the key <if_stmt>key<not><in>exhale_args<block_start><raise>ConfigError(key_error.format(key=key))<block_end># Make sure the value is at the very least the correct type val=exhale_args[key]<if_stmt><not>isinstance(val expected_type)<block_start>val_t=type(val)<line_sep><raise>ConfigError(val_error.format(key=key exp=expected_type got=val_t))<block_end># Make sure that a value was provided (e.g. no empty strings) <if_stmt><not>val<block_start><raise>ConfigError("Non-empty value for key [{0}] required.".format(key))<block_end># If the string represents a path, make it absolute <if_stmt>make_absolute# Directories are made absolute relative to app.confdir (where conf.py is) <block_start><if_stmt><not>os.path.isabs(val)<block_start>val=os.path.abspath(os.path.join(os.path.abspath(app.confdir) val))<block_end><block_end># Set the config for use later <try_stmt><block_start>configs_globals[key]=val<line_sep>keys_processed.append(key)<block_end><except_stmt>Exception<as>e<block_start><raise>ExtensionError("Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(key val e))<block_end><block_end>#################################################################################### # Validate what can be checked from the required arguments at this time. # #################################################################################### <global>_the_app<line_sep>_the_app=app<line_sep># Make sure they know this is a bad idea. The order of these checks is important. # This assumes the path given was not the empty string (3 will break if it is). # # 1. If containmentFolder and app.srcdir are the same, problem. # 2. If app.srcdir is not at the beginning of containmentFolder, problem. # 3. If the first two checks have not raised a problem, the final check is to make # sure that a subdirectory was actually used, as opposed to something that just # starts with the same path. # # Note for the third check lazy evaluation is the only thing that makes checking # _parts[1] acceptable ;) _one=containmentFolder<eq>app.srcdir<line_sep>_two=<not>containmentFolder.startswith(app.srcdir)<line_sep>_parts=containmentFolder.split(app.srcdir)<line_sep>_three=_parts[0]<ne>""<or>len(_parts[1].split(os.path.sep))<g>2<or>os.path.join(app.srcdir _parts[1].replace(os.path.sep "" 1))<ne>containmentFolder<line_sep># noqa # If they are equal, containmentFolder points somewhere entirely differently, or the # relative path (made absolute again) does not have the srcdir <if_stmt>_one<or>_two<or>_three<block_start><raise>ConfigError("The given `containmentFolder` [{0}] must be a *SUBDIRECTORY* of [{1}].".format(containmentFolder app.srcdir))<block_end><global>_app_src_dir<line_sep>_app_src_dir=os.path.abspath(app.srcdir)<line_sep># We *ONLY* generate reStructuredText, make sure Sphinx is expecting this as well as # the to-be-generated library root file is correctly suffixed. <if_stmt><not>rootFileName.endswith(".rst")<block_start><raise>ConfigError("The given `rootFileName` ({0}) did not end with '.rst'; Exhale is reStructuredText only.".format(rootFileName))<block_end><if_stmt>".rst"<not><in>app.config.source_suffix<block_start><raise>ConfigError("Exhale is reStructuredText only, but '.rst' was not found in `source_suffix` list of `conf.py`.")<block_end># Make sure the doxygen strip path is an exclude-able path <if_stmt><not>os.path.exists(doxygenStripFromPath)<block_start><raise>ConfigError("The path given as `doxygenStripFromPath` ({0}) does not exist!".format(doxygenStripFromPath))<block_end>#################################################################################### # Gather the optional input for exhale. # #################################################################################### # TODO: `list` -> `(list, tuple)`, update docs too. opt_kv=[# Build Process Logging, Colors, and Debugging ("verboseBuild" bool) ("alwaysColorize" bool) ("generateBreatheFileDirectives" bool) # Root API Document Customization and Treeview ("afterTitleDescription" six.string_types) ("afterHierarchyDescription" six.string_types) ("fullApiSubSectionTitle" six.string_types) ("afterBodySummary" six.string_types) ("fullToctreeMaxDepth" int) ("listingExclude" list) ("unabridgedOrphanKinds" (list set)) # Clickable Hierarchies <3 ("createTreeView" bool) ("minifyTreeView" bool) ("treeViewIsBootstrap" bool) ("treeViewBootstrapTextSpanClass" six.string_types) ("treeViewBootstrapIconMimicColor" six.string_types) ("treeViewBootstrapOnhoverColor" six.string_types) ("treeViewBootstrapUseBadgeTags" bool) ("treeViewBootstrapExpandIcon" six.string_types) ("treeViewBootstrapCollapseIcon" six.string_types) ("treeViewBootstrapLevels" int) # Page Level Customization ("includeTemplateParamOrderList" bool) ("pageLevelConfigMeta" six.string_types) ("repoRedirectURL" six.string_types) ("contentsDirectives" bool) ("contentsTitle" six.string_types) ("contentsSpecifiers" list) ("kindsWithContentsDirectives" list) # Breathe Customization ("customSpecificationsMapping" dict) # Doxygen Execution and Customization ("exhaleExecutesDoxygen" bool) ("exhaleUseDoxyfile" bool) ("exhaleDoxygenStdin" six.string_types) ("exhaleSilentDoxygen" bool) # Programlisting Customization ("lexerMapping" dict)]<for_stmt>key,expected_type opt_kv# Used in error checking later <block_start>keys_available.append(key)<line_sep># Override the default settings if the key was provided <if_stmt>key<in>exhale_args# Make sure the value is at the very least the correct type <block_start>val=exhale_args[key]<if_stmt><not>isinstance(val expected_type)<block_start>val_t=type(val)<line_sep><raise>ConfigError(val_error.format(key=key exp=expected_type got=val_t))<block_end># Set the config for use later <try_stmt><block_start>configs_globals[key]=val<line_sep>keys_processed.append(key)<block_end><except_stmt>Exception<as>e<block_start><raise>ExtensionError("Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(key val e))<block_end><block_end><block_end># These two need to be lists of strings, check to make sure <def_stmt>_list_of_strings lst title<block_start><for_stmt>spec lst<block_start><if_stmt><not>isinstance(spec six.string_types)<block_start><raise>ConfigError("`{title}` must be a list of strings. `{spec}` was of type `{spec_t}`".format(title=title spec=spec spec_t=type(spec)))<block_end><block_end><block_end>_list_of_strings(contentsSpecifiers "contentsSpecifiers")<line_sep>_list_of_strings(kindsWithContentsDirectives "kindsWithContentsDirectives")<line_sep>_list_of_strings(unabridgedOrphanKinds "unabridgedOrphanKinds")<line_sep># Make sure the kinds they specified are valid unknown="Unknown kind `{kind}` given in `{config}`. See utils.AVAILABLE_KINDS."<for_stmt>kind kindsWithContentsDirectives<block_start><if_stmt>kind<not><in>utils.AVAILABLE_KINDS<block_start><raise>ConfigError(unknown.format(kind=kind config="kindsWithContentsDirectives"))<block_end><block_end><for_stmt>kind unabridgedOrphanKinds<block_start><if_stmt>kind<not><in>utils.AVAILABLE_KINDS<block_start><raise>ConfigError(unknown.format(kind=kind config="unabridgedOrphanKinds"))<block_end><block_end># Make sure the listingExlcude is usable <if_stmt>"listingExclude"<in>exhale_args<block_start><import_stmt>re<line_sep># TODO: remove this once config objects are in. Reset needed for testing suite. configs_globals["_compiled_listing_exclude"]=[]<line_sep># used for error printing, tries to create string out of item otherwise # returns 'at index {idx}' <def_stmt>item_or_index item idx<block_start><try_stmt><block_start><return>"`{item}`".format(item=item)<block_end><except_stmt><block_start><return>"at index {idx}".format(idx=idx)<block_end><block_end>exclusions=exhale_args["listingExclude"]<for_stmt>idx range(len(exclusions))# Gather the `pattern` and `flags` parameters for `re.compile` <block_start>item=exclusions[idx]<if_stmt>isinstance(item six.string_types)<block_start>pattern=item<line_sep>flags=0<block_end><else_stmt><block_start><try_stmt><block_start>pattern,flags=item<block_end><except_stmt>Exception<as>e<block_start><raise>ConfigError("listingExclude item {0} cannot be unpacked as `pattern, flags = item`:\n{1}".format(item_or_index(item idx) e))<block_end><block_end># Compile the regular expression object. <try_stmt><block_start>regex=re.compile(pattern flags)<block_end><except_stmt>Exception<as>e<block_start><raise>ConfigError("Unable to compile specified listingExclude {0}:\n{1}".format(item_or_index(item idx) e))<block_end>configs_globals["_compiled_listing_exclude"].append(regex)<block_end><block_end># Make sure the lexerMapping is usable <if_stmt>"lexerMapping"<in>exhale_args<block_start><import_from_stmt>pygments lexers<import_stmt>re<line_sep># TODO: remove this once config objects are in. Reset needed for testing suite. configs_globals["_compiled_lexer_mapping"]={}<line_sep>lexer_mapping=exhale_args["lexerMapping"]<for_stmt>key lexer_mapping<block_start>val=lexer_mapping[key]<line_sep># Make sure both are strings <if_stmt><not>isinstance(key six.string_types)<or><not>isinstance(val six.string_types)<block_start><raise>ConfigError("All keys and values in `lexerMapping` must be strings.")<block_end># Make sure the key is a valid regular expression <try_stmt><block_start>regex=re.compile(key)<block_end><except_stmt>Exception<as>e<block_start><raise>ConfigError("The `lexerMapping` key [{0}] is not a valid regular expression: {1}".format(key e))<block_end># Make sure the provided lexer is available <try_stmt><block_start>lex=lexers.find_lexer_class_by_name(val)<block_end><except_stmt>Exception<as>e<block_start><raise>ConfigError("The `lexerMapping` value of [{0}] for key [{1}] is not a valid Pygments lexer.".format(val key))<block_end># Everything works, stash for later processing configs_globals["_compiled_lexer_mapping"][regex]=val<block_end><block_end>#################################################################################### # Internal consistency check to make sure available keys are accurate. # #################################################################################### # See naming conventions described at top of file for why this is ok! keys_expected=[]<for_stmt>key configs_globals.keys()<block_start>val=configs_globals[key]<line_sep># Ignore modules and functions <if_stmt><not>isinstance(val FunctionType)<and><not>isinstance(val ModuleType)<block_start><if_stmt>key<ne>"logger"# band-aid for logging api with Sphinx prior to config objects # Ignore specials like __name__ and internal variables like _the_app <block_start><if_stmt>"_"<not><in>key<and>len(key)<g>0# don't think there can be zero length ones... <block_start>first=key[0]<if_stmt>first.isalpha()<and>first.islower()<block_start>keys_expected.append(key)<block_end><block_end><block_end><block_end><block_end>keys_expected=set(keys_expected)<line_sep>keys_available=set(keys_available)<if_stmt>keys_expected<ne>keys_available<block_start>err=StringIO()<line_sep>err.write(textwrap.dedent(''' CRITICAL: Exhale encountered an internal error, please raise an Issue on GitHub: https://github.com/svenevs/exhale/issues Please paste the following in the issue report: Expected keys: '''))<for_stmt>key keys_expected<block_start>err.write("- {0}\n".format(key))<block_end>err.write(textwrap.dedent(''' Available keys: '''))<for_stmt>key keys_available<block_start>err.write("- {0}\n".format(key))<block_end>err.write(textwrap.dedent(''' The Mismatch(es): '''))<for_stmt>key (keys_available^keys_expected)<block_start>err.write("- {0}\n".format(key))<block_end>err_msg=err.getvalue()<line_sep>err.close()<line_sep><raise>ExtensionError(err_msg)<block_end>#################################################################################### # See if unexpected keys were presented. # #################################################################################### all_keys=set(exhale_args.keys())<line_sep>keys_processed=set(keys_processed)<if_stmt>all_keys<ne>keys_processed# Much love: https://stackoverflow.com/a/17388505/3814202 <block_start><import_from_stmt>difflib SequenceMatcher<def_stmt>similar a b<block_start><return>SequenceMatcher(<none> a b).ratio()<times>100.0<block_end># If there are keys left over after taking the differences of keys_processed # (which is all keys Exhale expects to see), inform the user of keys they might # have been trying to provide. # # Convert everything to lower case for better matching success potential_keys=keys_available-keys_processed<line_sep>potential_keys_lower={key.lower():key<for>key potential_keys}<line_sep>extras=all_keys-keys_processed<line_sep>extra_error=StringIO()<line_sep>extra_error.write("Exhale found unexpected keys in `exhale_args`:\n")<for_stmt>key extras<block_start>extra_error.write(" - Extra key: {0}\n".format(key))<line_sep>potentials=[]<for_stmt>mate potential_keys_lower<block_start>similarity=similar(key mate)<if_stmt>similarity<g>50.0# Output results with the non-lower version they should put in exhale_args <block_start>potentials.append((similarity potential_keys_lower[mate]))<block_end><block_end><if_stmt>potentials<block_start>potentials=reversed(sorted(potentials))<for_stmt>rank,mate potentials<block_start>extra_error.write(" - {0:2.2f}% match with: {1}\n".format(rank mate))<block_end><block_end><block_end>extra_error_str=extra_error.getvalue()<line_sep>extra_error.close()<line_sep><raise>ConfigError(extra_error_str)<block_end>#################################################################################### # Verify some potentially inconsistent or ignored settings. # #################################################################################### # treeViewIsBootstrap only takes meaning when createTreeView is True <if_stmt><not>createTreeView<and>treeViewIsBootstrap<block_start>logger.warning("Exhale: `treeViewIsBootstrap=True` ignored since `createTreeView=False`")<block_end># fullToctreeMaxDepth > 5 may produce other sphinx issues unrelated to exhale <if_stmt>fullToctreeMaxDepth<g>5<block_start>logger.warning("Exhale: `fullToctreeMaxDepth={0}` is greater than 5 and may build errors for non-html.".format(fullToctreeMaxDepth))<block_end># Make sure that we received a valid mapping created by utils.makeCustomSpecificationsMapping sanity=_closure_map_sanity_check<line_sep>insane="`customSpecificationsMapping` *MUST* be made using exhale.utils.makeCustomSpecificationsMapping"<if_stmt>customSpecificationsMapping# Sanity check to make sure exhale made this mapping <block_start><if_stmt>sanity<not><in>customSpecificationsMapping<block_start><raise>ConfigError(insane)<block_end><elif_stmt>customSpecificationsMapping[sanity]<ne>sanity# LOL <block_start><raise>ConfigError(insane)<block_end># Sanity check #2: enforce no new additions were made expected_keys=set([sanity])|set(utils.AVAILABLE_KINDS)<line_sep>provided_keys=set(customSpecificationsMapping.keys())<line_sep>diff=provided_keys-expected_keys<if_stmt>diff<block_start><raise>ConfigError("Found extra keys in `customSpecificationsMapping`: {0}".format(diff))<block_end># Sanity check #3: make sure the return values are all strings <for_stmt>key customSpecificationsMapping<block_start>val_t=type(customSpecificationsMapping[key])<if_stmt><not>isinstance(key six.string_types)<block_start><raise>ConfigError("`customSpecificationsMapping` key `{key}` gave value type `{val_t}` (need `str`).".format(key=key val_t=val_t))<block_end><block_end><block_end># Specify where the doxygen output should be going <global>_doxygen_xml_output_directory<line_sep>_doxygen_xml_output_directory=doxy_xml_dir<line_sep># If requested, the time is nigh for executing doxygen. The strategy: # 1. Execute doxygen if requested # 2. Verify that the expected doxy_xml_dir (specified to `breathe`) was created # 3. Assuming everything went to plan, let exhale take over and create all of the .rst docs <if_stmt>exhaleExecutesDoxygen# Cannot use both, only one or the other <block_start><if_stmt>exhaleUseDoxyfile<and>(exhaleDoxygenStdin<is><not><none>)<block_start><raise>ConfigError("You must choose one of `exhaleUseDoxyfile` or `exhaleDoxygenStdin`, not both.")<block_end># The Doxyfile *must* be at the same level as conf.py # This is done so that when separate source / build directories are being used, # we can guarantee where the Doxyfile is. <if_stmt>exhaleUseDoxyfile<block_start>doxyfile_path=os.path.abspath(os.path.join(app.confdir "Doxyfile"))<if_stmt><not>os.path.exists(doxyfile_path)<block_start><raise>ConfigError("The file [{0}] does not exist".format(doxyfile_path))<block_end><block_end>here=os.path.abspath(os.curdir)<if_stmt>here<eq>app.confdir<block_start>returnPath=<none><block_end><else_stmt><block_start>returnPath=here<block_end># All necessary information ready, go to where the Doxyfile is, run Doxygen # and then return back (where applicable) so sphinx can continue start=utils.get_time()<if_stmt>returnPath<block_start>logger.info(utils.info("Exhale: changing directories to [{0}] to execute Doxygen.".format(app.confdir)))<line_sep>os.chdir(app.confdir)<block_end>logger.info(utils.info("Exhale: executing doxygen."))<line_sep>status=deploy.generateDoxygenXML()<line_sep># Being overly-careful to put sphinx back where it was before potentially erroring out <if_stmt>returnPath<block_start>logger.info(utils.info("Exhale: changing directories back to [{0}] after Doxygen.".format(returnPath)))<line_sep>os.chdir(returnPath)<block_end><if_stmt>status<block_start><raise>ExtensionError(status)<block_end><else_stmt><block_start>end=utils.get_time()<line_sep>logger.info(utils.progress("Exhale: doxygen ran successfully in {0}.".format(utils.time_string(start end))))<block_end><block_end><else_stmt><block_start><if_stmt>exhaleUseDoxyfile<block_start>logger.warning("Exhale: `exhaleUseDoxyfile` ignored since `exhaleExecutesDoxygen=False`")<block_end><if_stmt>exhaleDoxygenStdin<is><not><none><block_start>logger.warning("Exhale: `exhaleDoxygenStdin` ignored since `exhaleExecutesDoxygen=False`")<block_end><if_stmt>exhaleSilentDoxygen<block_start>logger.warning("Exhale: `exhaleSilentDoxygen=True` ignored since `exhaleExecutesDoxygen=False`")<block_end><block_end># Either Doxygen was run prior to this being called, or we just finished running it. # Make sure that the files we need are actually there. <if_stmt><not>os.path.isdir(doxy_xml_dir)<block_start><raise>ConfigError("Exhale: the specified folder [{0}] does not exist. Has Doxygen been run?".format(doxy_xml_dir))<block_end>index=os.path.join(doxy_xml_dir "index.xml")<if_stmt><not>os.path.isfile(index)<block_start><raise>ConfigError("Exhale: the file [{0}] does not exist. Has Doxygen been run?".format(index))<block_end># Legacy / debugging feature, warn of its purpose <if_stmt>generateBreatheFileDirectives<block_start>logger.warning("Exhale: `generateBreatheFileDirectives` is a debugging feature not intended for production.")<block_end>#################################################################################### # If using a fancy treeView, add the necessary frontend files. # #################################################################################### <if_stmt>createTreeView<block_start><if_stmt>treeViewIsBootstrap<block_start>tree_data_static_base="treeView-bootstrap"<line_sep>tree_data_css=[os.path.join("bootstrap-treeview" "bootstrap-treeview.min.css")]<line_sep>tree_data_js=[os.path.join("bootstrap-treeview" "bootstrap-treeview.min.js") # os.path.join("bootstrap-treeview", "apply-bootstrap-treview.js") ]<line_sep>tree_data_ext=[]<block_end><else_stmt><block_start>tree_data_static_base="treeView"<line_sep>tree_data_css=[os.path.join("collapsible-lists" "css" "tree_view.css")]<line_sep>tree_data_js=[os.path.join("collapsible-lists" "js" "CollapsibleLists.compressed.js") os.path.join("collapsible-lists" "js" "apply-collapsible-lists.js")]<line_sep># The tree_view.css file uses these tree_data_ext=[os.path.join("collapsible-lists" "css" "button-closed.png") os.path.join("collapsible-lists" "css" "button-open.png") os.path.join("collapsible-lists" "css" "button.png") os.path.join("collapsible-lists" "css" "list-item-contents.png") os.path.join("collapsible-lists" "css" "list-item-last-open.png") os.path.join("collapsible-lists" "css" "list-item-last.png") os.path.join("collapsible-lists" "css" "list-item-open.png") os.path.join("collapsible-lists" "css" "list-item.png") os.path.join("collapsible-lists" "css" "list-item-root.png") ]<block_end># Make sure we have everything we need collapse_data=os.path.join(os.path.abspath(os.path.dirname(__file__)) "data" tree_data_static_base)<if_stmt><not>os.path.isdir(collapse_data)<block_start><raise>ExtensionError("Exhale: the path to [{0}] was not found, possible installation error.".format(collapse_data))<block_end><else_stmt><block_start>all_files=tree_data_css+tree_data_js+tree_data_ext<line_sep>missing=[]<for_stmt>file all_files<block_start>path=os.path.join(collapse_data file)<if_stmt><not>os.path.isfile(path)<block_start>missing.append(path)<block_end><block_end><if_stmt>missing<block_start><raise>ExtensionError("Exhale: the path(s) {0} were not found, possible installation error.".format(missing))<block_end><block_end># We have all the files we need, the extra files will be copied automatically by # sphinx to the correct _static/ location, but stylesheets and javascript need # to be added explicitly logger.info(utils.info("Exhale: adding tree view css / javascript."))<line_sep>app.config.html_static_path.append(collapse_data)<line_sep># In Sphinx 1.8+ these have been renamed. # - app.add_stylesheet -> app.add_css_file # - app.add_javascript -> app.add_js_file # # RemovedInSphinx40Warning: # - The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead. # - The app.add_javascript() is deprecated. Please use app.add_js_file() instead. # # So we'll need to keep this funky `getattr` chain for a little while ;) # Or else pin min sphinx version to 1.8 or higher. Probably when 2.0 is out? add_css_file=getattr(app "add_css_file" getattr(app "add_stylesheet" <none>))<line_sep>add_js_file=getattr(app "add_js_file" getattr(app "add_javascript" <none>))<line_sep># Add the stylesheets <for_stmt>css tree_data_css<block_start>add_css_file(css)<block_end># Add the javascript <for_stmt>js tree_data_js<block_start>add_js_file(js)<block_end>logger.info(utils.progress("Exhale: added tree view css / javascript."))<block_end><block_end>
<import_from_stmt>urllib request<import_from_stmt>PyQt5.QtCore QThread<class_stmt>Downloader(QThread)<block_start><def_stmt>__init__ self wrapper icon path<block_start>QThread.__init__(self)<line_sep>self.wrapper=wrapper<line_sep>self.icon=icon<line_sep>self.path=path<block_end><def_stmt>run self<block_start><try_stmt><block_start>file_name,headers=request.urlretrieve(self.icon self.path)<line_sep>self.wrapper.icon=file_name<block_end><except_stmt><block_start><pass><block_end><block_end><block_end>
# -*- coding: ISO-8859-1 -*- <import_from_future_stmt> absolute_import<import_from_stmt>.RawInstreamFile RawInstreamFile<import_from_stmt>.MidiFileParser MidiFileParser<class_stmt>MidiInFile<block_start>""" Parses a midi file, and triggers the midi events on the outStream object. Get example data from a minimal midi file, generated with cubase. >>> test_file = 'minimal-cubase-type0.mid' Do parsing, and generate events with MidiToText, so we can see what a minimal midi file contains >>> from opendeep.utils.midi.MidiToText import MidiToText >>> midi_in = MidiInFile(MidiToText(), test_file) >>> midi_in.read() format: 0, nTracks: 1, division: 480 ---------------------------------- <BLANKLINE> Start - track #0 sequence_name: Type 0 tempo: 500000 time_signature: 4 2 24 8 note_on - ch:00, note:48, vel:64 time:0 note_off - ch:00, note:48, vel:40 time:480 End of track <BLANKLINE> End of file """<def_stmt>__init__ self outStream infile# these could also have been mixins, would that be better? Nah! <block_start>self.raw_in=RawInstreamFile(infile)<line_sep>self.parser=MidiFileParser(self.raw_in outStream)<block_end><def_stmt>read self<block_start>"Start parsing the file"<line_sep>p=self.parser<line_sep>p.parseMThdChunk()<line_sep>p.parseMTrkChunks()<block_end><def_stmt>setData self data=''<block_start>"Sets the data from a plain string"<line_sep>self.raw_in.setData(data)<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_stmt>os<import_from_stmt>..conftest model_path<import_stmt>openvino.runtime.opset8<as>ops<import_from_stmt>openvino.runtime ConstOutput Shape PartialShape Type Output RTMap OVAny Core <line_sep>is_myriad=os.environ.get("TEST_DEVICE")<eq>"MYRIAD"<line_sep>test_net_xml,test_net_bin=model_path(is_myriad)<def_stmt>test_const_output_type device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input(0)<assert_stmt>isinstance(node ConstOutput)<block_end><def_stmt>test_const_output_docs device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input(0)<line_sep>exptected_string="openvino.runtime.ConstOutput represents port/node output."<assert_stmt>node.__doc__<eq>exptected_string<block_end><def_stmt>test_const_output_get_index device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input("data")<assert_stmt>node.get_index()<eq>0<assert_stmt>node.index<eq>0<block_end><def_stmt>test_const_output_get_element_type device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input("data")<assert_stmt>node.get_element_type()<eq>Type.f32<assert_stmt>node.element_type<eq>Type.f32<block_end><def_stmt>test_const_output_get_shape device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input("data")<line_sep>expected_shape=Shape([1 3 32 32])<assert_stmt>str(node.get_shape())<eq>str(expected_shape)<assert_stmt>str(node.shape)<eq>str(expected_shape)<block_end><def_stmt>test_const_output_get_partial_shape device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>node=exec_net.input("data")<line_sep>expected_partial_shape=PartialShape([1 3 32 32])<assert_stmt>node.get_partial_shape()<eq>expected_partial_shape<assert_stmt>node.partial_shape<eq>expected_partial_shape<block_end><def_stmt>test_const_output_get_target_inputs device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>outputs=exec_net.outputs<for_stmt>node outputs<block_start><assert_stmt>isinstance(node.get_target_inputs() set)<assert_stmt>isinstance(node.target_inputs set)<block_end><block_end><def_stmt>test_const_output_get_names device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>input_name="data"<line_sep>node=exec_net.input(input_name)<line_sep>expected_names=set()<line_sep>expected_names.add(input_name)<assert_stmt>node.get_names()<eq>expected_names<assert_stmt>node.names<eq>expected_names<assert_stmt>node.get_any_name()<eq>input_name<assert_stmt>node.any_name<eq>input_name<block_end><def_stmt>test_const_get_rf_info device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>output_node=exec_net.output(0)<line_sep>rt_info=output_node.get_rt_info()<assert_stmt>isinstance(rt_info RTMap)<block_end><def_stmt>test_const_output_runtime_info device<block_start>core=Core()<line_sep>func=core.read_model(model=test_net_xml weights=test_net_bin)<line_sep>exec_net=core.compile_model(func device)<line_sep>input_name="data"<line_sep>output_node=exec_net.input(input_name)<line_sep>rt_info=output_node.rt_info<assert_stmt>isinstance(rt_info RTMap)<block_end><def_stmt>test_update_rt_info device<block_start>relu=ops.relu(5)<line_sep>output_node=Output._from_node(relu)<line_sep>rt=output_node.get_rt_info()<line_sep>rt["test12345"]="test"<for_stmt>key,value output_node.get_rt_info().items()<block_start><assert_stmt>key<eq>"test12345"<assert_stmt>isinstance(value OVAny)<block_end><block_end><def_stmt>test_operations <block_start>data=ops.parameter([2])<line_sep>split=ops.split(data 0 2)<line_sep>outputs=split.outputs()<assert_stmt>outputs[0]<l>outputs[1]<assert_stmt>outputs[0]<eq>split.output(0)<assert_stmt>hash(outputs[0])<eq>hash(split.output(0))<assert_stmt>hash(outputs[0])<ne>hash(outputs[0].node)<block_end>
""" Contains Batch classes for images """<import_stmt>os<import_stmt>warnings<import_from_stmt>numbers Number<import_stmt>numpy<as>np<import_stmt>PIL<import_stmt>PIL.ImageOps<import_stmt>PIL.ImageChops<import_stmt>PIL.ImageFilter<import_stmt>PIL.ImageEnhance<import_from_stmt>scipy.ndimage.filters gaussian_filter<import_from_stmt>scipy.ndimage.interpolation map_coordinates<import_from_stmt>.batch Batch<import_from_stmt>.decorators action apply_parallel inbatch_parallel<import_from_stmt>.dsindex FilesIndex<class_stmt>BaseImagesBatch(Batch)<block_start>""" Batch class for 2D images. Note, that if any class method is wrapped with `@apply_parallel` decorator than for inner calls (i.e. from other class methods) should be used version of desired method with underscores. (For example, if there is a decorated `method` than you need to call `_method_` from inside of `other_method`). Same is applicable for all child classes of :class:`batch.Batch`. """<line_sep>components="images" "labels" "masks"<line_sep># Class-specific defaults for :meth:`.Batch.apply_parallel` apply_defaults=dict(target='for' post='_assemble' src='images' dst='images' )<def_stmt>_make_path self ix src=<none><block_start>""" Compose path. Parameters ---------- ix : str element's index (filename) src : str Path to folder with images. Used if `self.index` is not `FilesIndex`. Returns ------- path : str Full path to an element. """<if_stmt>isinstance(src FilesIndex)<block_start>path=src.get_fullpath(ix)<block_end><elif_stmt>isinstance(self.index FilesIndex)<block_start>path=self.index.get_fullpath(ix)<block_end><else_stmt><block_start>path=os.path.join(src str(ix))<block_end><return>path<block_end><def_stmt>_load_image self ix src=<none> fmt=<none> dst="images"<block_start>""" Loads image. .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here. Parameters ---------- src : str, dataset.FilesIndex, None path to the folder with an image. If src is None then it is determined from the index. dst : str Component to write images to. fmt : str Format of the an image Raises ------ NotImplementedError If this method is not defined in a child class """<line_sep>_=self ix src dst fmt<line_sep><raise>NotImplementedError("Must be implemented in a child class")<block_end>@action<def_stmt>load self *args src=<none> fmt=<none> dst=<none> **kwargs<block_start>""" Load data. .. note:: if `fmt='images'` than ``components`` must be a single component (str). .. note:: All parameters must be named only. Parameters ---------- src : str, None Path to the folder with data. If src is None then path is determined from the index. fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'} Format of the file to download. dst : str, sequence components to download. """<if_stmt>fmt<eq>'image'<block_start><return>self._load_image(src fmt=fmt dst=dst)<block_end><return>super().load(src=src fmt=fmt dst=dst *args **kwargs)<block_end><def_stmt>_dump_image self ix src='images' dst=<none> fmt=<none><block_start>""" Saves image to dst. .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here. Parameters ---------- src : str Component to get images from. dst : str Folder where to dump. If dst is None then it is determined from index. Raises ------ NotImplementedError If this method is not defined in a child class """<line_sep>_=self ix src dst fmt<line_sep><raise>NotImplementedError("Must be implemented in a child class")<block_end>@action<def_stmt>dump self *args dst=<none> fmt=<none> components="images" **kwargs<block_start>""" Dump data. .. note:: If `fmt='images'` than ``dst`` must be a single component (str). .. note:: All parameters must be named only. Parameters ---------- dst : str, None Path to the folder where to dump. If dst is None then path is determined from the index. fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'} Format of the file to save. components : str, sequence Components to save. ext: str Format to save images to. Returns ------- self """<if_stmt>fmt<eq>'image'<block_start><return>self._dump_image(components dst fmt=kwargs.pop('ext'))<block_end><return>super().dump(dst=dst fmt=fmt components=components *args **kwargs)<block_end><block_end><class_stmt>ImagesBatch(BaseImagesBatch)<block_start>""" Batch class for 2D images. Images are stored as numpy arrays of PIL.Image. PIL.Image has the following system of coordinates:: X 0 -------------- > | | | images's pixels | | Y v Pixel's position is defined as (x, y) Note, that if any class method is wrapped with `@apply_parallel` decorator than for inner calls (i.e. from other class methods) should be used version of desired method with underscores. (For example, if there is a decorated `method` than you need to call `_method_` from inside of `other_method`). Same is applicable for all child classes of :class:`batch.Batch`. """<line_sep>@classmethod<def_stmt>_get_image_shape cls image<block_start><if_stmt>isinstance(image PIL.Image.Image)<block_start><return>image.size<block_end><return>image.shape[:2]<block_end>@property<def_stmt>image_shape self<block_start>""": tuple - shape of the image"""<line_sep>_,shapes_count=np.unique([image.size<for>image self.images] return_counts=<true> axis=0)<if_stmt>len(shapes_count)<eq>1<block_start><if_stmt>isinstance(self.images[0] PIL.Image.Image)<block_start><return>(*self.images[0].size len(self.images[0].getbands()))<block_end><return>self.images[0].shape<block_end><raise>RuntimeError('Images have different shapes')<block_end>@inbatch_parallel(init='indices' post='_assemble')<def_stmt>_load_image self ix src=<none> fmt=<none> dst="images"<block_start>""" Loads image .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here. Parameters ---------- src : str, dataset.FilesIndex, None Path to the folder with an image. If src is None then it is determined from the index. dst : str Component to write images to. fmt : str Format of an image. """<line_sep><return>PIL.Image.open(self._make_path(ix src))<block_end>@inbatch_parallel(init='indices')<def_stmt>_dump_image self ix src='images' dst=<none> fmt=<none><block_start>""" Saves image to dst. .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here. Parameters ---------- src : str Component to get images from. dst : str Folder where to dump. fmt : str Format of saved image. """<if_stmt>dst<is><none><block_start><raise>RuntimeError('You must specify `dst`')<block_end>image=self.get(ix src)<line_sep>ix=str(ix)+'.'+fmt<if>fmt<is><not><none><else>str(ix)<line_sep>image.save(os.path.join(dst ix))<block_end><def_stmt>_assemble_component self result *args component='images' **kwargs<block_start>""" Assemble one component after parallel execution. Parameters ---------- result : sequence, array_like Results after inbatch_parallel. component : str component to assemble """<line_sep>_=args kwargs<if_stmt>isinstance(result[0] PIL.Image.Image)<block_start>setattr(self component np.asarray(result dtype=object))<block_end><else_stmt><block_start><try_stmt><block_start>setattr(self component np.stack(result))<block_end><except_stmt>ValueError<block_start>array_result=np.empty(len(result) dtype=object)<line_sep>array_result[:]=result<line_sep>setattr(self component array_result)<block_end><block_end><block_end>@apply_parallel<def_stmt>to_pil self image mode=<none><block_start>"""converts images in Batch to PIL format Parameters ---------- src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. """<if_stmt>isinstance(image PIL.Image.Image)<block_start><return>image<block_end><if_stmt>mode<is><none><block_start><if_stmt>len(image.shape)<eq>2<block_start>mode='L'<block_end><elif_stmt>len(image.shape)<eq>3<block_start><if_stmt>image.shape[-1]<eq>3<block_start>mode='RGB'<block_end><elif_stmt>image.shape[-1]<eq>1<block_start>mode='L'<line_sep>image=image[: : 0]<block_end><elif_stmt>image.shape[-1]<eq>2<block_start>mode='LA'<block_end><elif_stmt>image.shape[-1]<eq>4<block_start>mode='RGBA'<block_end><block_end><else_stmt><block_start><raise>ValueError('Unknown image type as image has' image.shape[-1] 'channels')<block_end><block_end><elif_stmt>mode<eq>'L'<and>len(image.shape)<eq>3<block_start>image=image[<ellipsis> 0]<block_end><return>PIL.Image.fromarray(image mode)<block_end><def_stmt>_calc_origin self image_shape origin background_shape<block_start>""" Calculate coordinate of the input image with respect to the background. Parameters ---------- image_shape : sequence shape of the input image. origin : array_like, sequence, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'} Position of the input image with respect to the background. Can be one of: - 'center' - place the center of the input image on the center of the background and crop the input image accordingly. - 'top_left' - place the upper-left corner of the input image on the upper-left of the background and crop the input image accordingly. - 'top_right' - crop an image such that upper-right corners of an image and the cropping box coincide - 'bottom_left' - crop an image such that lower-left corners of an image and the cropping box coincide - 'bottom_right' - crop an image such that lower-right corners of an image and the cropping box coincide - 'random' - place the upper-left corner of the input image on the randomly sampled position in the background. Position is sampled uniformly such that there is no need for cropping. - other - sequence of ints or sequence of floats in [0, 1) interval; place the upper-left corner of the input image on the given position in the background. If `origin` is a sequence of floats in [0, 1), it defines a relative position of the origin in a valid region of image. background_shape : sequence shape of the background image. Returns ------- sequence : calculated origin in the form (column, row) """<if_stmt>isinstance(origin str)<block_start><if_stmt>origin<eq>'top_left'<block_start>origin=0 0<block_end><elif_stmt>origin<eq>'top_right'<block_start>origin=(background_shape[0]-image_shape[0]+1 0)<block_end><elif_stmt>origin<eq>'bottom_left'<block_start>origin=(0 background_shape[1]-image_shape[1]+1)<block_end><elif_stmt>origin<eq>'bottom_right'<block_start>origin=(background_shape[0]-image_shape[0]+1 background_shape[1]-image_shape[1]+1)<block_end><elif_stmt>origin<eq>'center'<block_start>origin=np.maximum(0 np.asarray(background_shape)-image_shape)<floordiv>2<block_end><elif_stmt>origin<eq>'random'<block_start>origin=(np.random.randint(background_shape[0]-image_shape[0]+1) np.random.randint(background_shape[1]-image_shape[1]+1))<block_end><else_stmt><block_start><raise>ValueError("If string, origin should be one of ['center', 'top_left', 'top_right', "<concat>"'bottom_left', 'bottom_right', 'random']. Got '{}'.".format(origin))<block_end><block_end><elif_stmt>all(0<le>elem<l>1<for>elem origin)<block_start>region=((background_shape[0]-image_shape[0]+1) (background_shape[1]-image_shape[1]+1))<line_sep>origin=np.asarray(origin)<times>region<block_end><elif_stmt><not>all(isinstance(elem int)<for>elem origin)<block_start><raise>ValueError('If not a string, origin should be either a sequence of ints or sequence of '<concat>'floats in [0, 1) interval. Got {}'.format(origin))<block_end><return>np.asarray(origin dtype=np.int)<block_end>@apply_parallel<def_stmt>scale self image factor preserve_shape=<false> origin='center' resample=0<block_start>""" Scale the content of each image in the batch. Resulting shape is obtained as original_shape * factor. Parameters ----------- factor : float, sequence resulting shape is obtained as original_shape * factor - float - scale all axes with the given factor - sequence (factor_1, factort_2, ...) - scale each axis with the given factor separately preserve_shape : bool whether to preserve the shape of the image after scaling origin : array-like, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'} Relevant only if `preserve_shape` is True. If `scale` < 1, defines position of the scaled image with respect to the original one's shape. If `scale` > 1, defines position of cropping box. Can be one of: - 'center' - place the center of the input image on the center of the background and crop the input image accordingly. - 'top_left' - place the upper-left corner of the input image on the upper-left of the background and crop the input image accordingly. - 'top_right' - crop an image such that upper-right corners of an image and the cropping box coincide - 'bottom_left' - crop an image such that lower-left corners of an image and the cropping box coincide - 'bottom_right' - crop an image such that lower-right corners of an image and the cropping box coincide - 'random' - place the upper-left corner of the input image on the randomly sampled position in the background. Position is sampled uniformly such that there is no need for cropping. - array_like - sequence of ints or sequence of floats in [0, 1) interval; place the upper-left corner of the input image on the given position in the background. If `origin` is a sequence of floats in [0, 1), it defines a relative position of the origin in a valid region of image. resample: int Parameter passed to PIL.Image.resize. Interpolation order src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. Notes ----- Using 'random' option for origin with `src` as list with multiple elements will not result in same crop for each element, as origin will be sampled independently for each `src` element. To randomly sample same origin for a number of components, use `R` named expression for `origin` argument. Returns ------- self """<line_sep>original_shape=self._get_image_shape(image)<line_sep>rescaled_shape=list(np.int32(np.ceil(np.asarray(original_shape)<times>factor)))<line_sep>rescaled_image=image.resize(rescaled_shape resample=resample)<if_stmt>preserve_shape<block_start>rescaled_image=self._preserve_shape(original_shape rescaled_image origin)<block_end><return>rescaled_image<block_end>@apply_parallel<def_stmt>crop self image origin shape crop_boundaries=<false><block_start>""" Crop an image. Extract image data from the window of the size given by `shape` and placed at `origin`. Parameters ---------- origin : sequence, str Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details. shape : sequence crop size in the form of (rows, columns) crop_boundaries : bool If `True` then crop is got only from image's area. Shape of the crop might diverge with the passed one src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. Notes ----- Using 'random' origin with `src` as list with multiple elements will not result in same crop for each element, as origin will be sampled independently for each `src` element. To randomly sample same origin for a number of components, use `R` named expression for `origin` argument. """<line_sep>origin=self._calc_origin(shape origin image.size)<line_sep>right_bottom=origin+shape<if_stmt>crop_boundaries<block_start>out_of_boundaries=origin<l>0<line_sep>origin[out_of_boundaries]=0<line_sep>image_shape=np.asarray(image.size)<line_sep>out_of_boundaries=right_bottom<g>image_shape<line_sep>right_bottom[out_of_boundaries]=image_shape[out_of_boundaries]<block_end><return>image.crop((*origin *right_bottom))<block_end>@apply_parallel<def_stmt>put_on_background self image background origin mask=<none><block_start>""" Put an image on a background at given origin Parameters ---------- background : PIL.Image, np.ndarray of np.uint8 Blank background to put image on. origin : sequence, str Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details. mask : None, PIL.Image, np.ndarray of np.uint8 mask passed to PIL.Image.paste Notes ----- Using 'random' origin with `src` as list with multiple elements will not result in same crop for each element, as origin will be sampled independently for each `src` element. To randomly sample same origin for a number of components, use `R` named expression for `origin` argument. """<if_stmt><not>isinstance(background PIL.Image.Image)<block_start>background=PIL.Image.fromarray(background)<block_end><else_stmt><block_start>background=background.copy()<block_end><if_stmt><not>isinstance(mask PIL.Image.Image)<block_start>mask=PIL.Image.fromarray(mask)<if>mask<is><not><none><else><none><block_end>origin=list(self._calc_origin(self._get_image_shape(image) origin self._get_image_shape(background)))<line_sep>background.paste(image origin mask)<line_sep><return>background<block_end><def_stmt>_preserve_shape self original_shape transformed_image origin='center'<block_start>""" Change the transformed image's shape by cropping and adding empty pixels to fit the shape of original image. Parameters ---------- original_shape : sequence transformed_image : np.ndarray input_origin : array-like, {'center', 'top_left', 'random'} Position of the scaled image with respect to the original one's shape. - 'center' - place the center of the input image on the center of the background and crop the input image accordingly. - 'top_left' - place the upper-left corner of the input image on the upper-left of the background and crop the input image accordingly. - 'top_right' - crop an image such that upper-right corners of an image and the cropping box coincide - 'bottom_left' - crop an image such that lower-left corners of an image and the cropping box coincide - 'bottom_right' - crop an image such that lower-right corners of an image and the cropping box coincide - 'random' - place the upper-left corner of the input image on the randomly sampled position in the background. Position is sampled uniformly such that there is no need for cropping. - array_like - sequence of ints or sequence of floats in [0, 1) interval; place the upper-left corner of the input image on the given position in the background. If `origin` is a sequence of floats in [0, 1), it defines a relative position of the origin in a valid region of image. crop_origin: array-like, {'center', 'top_left', 'random'} Position of crop from transformed image. Has same values as `input_origin`. Returns ------- np.ndarray : image after described actions """<line_sep>transformed_shape=self._get_image_shape(transformed_image)<if_stmt>np.any(np.array(transformed_shape)<l>np.array(original_shape))<block_start>n_channels=len(transformed_image.getbands())<if_stmt>n_channels<eq>1<block_start>background=np.zeros(original_shape dtype=np.uint8)<block_end><else_stmt><block_start>background=np.zeros((*original_shape n_channels) dtype=np.uint8)<block_end><return>self._put_on_background_(transformed_image background origin)<block_end><return>self._crop_(transformed_image origin original_shape <true>)<block_end>@apply_parallel<def_stmt>filter self image mode *args **kwargs<block_start>""" Filters an image. Calls ``image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))``. For more details see `ImageFilter <http://pillow.readthedocs.io/en/stable/reference/ImageFilter.html>_`. Parameters ---------- mode : str Name of the filter. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep><return>image.filter(getattr(PIL.ImageFilter mode)(*args **kwargs))<block_end>@apply_parallel<def_stmt>transform self image *args **kwargs<block_start>""" Calls ``image.transform(*args, **kwargs)``. For more information see `<http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform>_`. Parameters ---------- src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>size=kwargs.pop('size' self._get_image_shape(image))<line_sep><return>image.transform(*args size=size **kwargs)<block_end>@apply_parallel<def_stmt>resize self image size *args **kwargs<block_start>""" Calls ``image.resize(*args, **kwargs)``. For more details see `<https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize>_`. Parameters ---------- size : tuple the resulting size of the image. If one of the components of tuple is None, corresponding dimension will be proportionally resized. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<if_stmt>size[0]<is><none><and>size[1]<is><none><block_start><raise>ValueError('At least one component of the parameter "size" must be a number.')<block_end><if_stmt>size[0]<is><none><block_start>new_size=(int(image.size[0]<times>size[1]/image.size[1]) size[1])<block_end><elif_stmt>size[1]<is><none><block_start>new_size=(size[0] int(image.size[1]<times>size[0]/image.size[0]))<block_end><else_stmt><block_start>new_size=size<block_end><return>image.resize(new_size *args **kwargs)<block_end>@apply_parallel<def_stmt>shift self image offset mode='const'<block_start>""" Shifts an image. Parameters ---------- offset : (Number, Number) mode : {'const', 'wrap'} How to fill borders src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<if_stmt>mode<eq>'const'<block_start>image=image.transform(size=image.size method=PIL.Image.AFFINE data=(1 0 -offset[0] 0 1 -offset[1]))<block_end><elif_stmt>mode<eq>'wrap'<block_start>image=PIL.ImageChops.offset(image *offset)<block_end><else_stmt><block_start><raise>ValueError("mode must be one of ['const', 'wrap']")<block_end><return>image<block_end>@apply_parallel<def_stmt>pad self image *args **kwargs<block_start>""" Calls ``PIL.ImageOps.expand``. For more details see `<http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.expand>`_. Parameters ---------- offset : sequence Size of the borders in pixels. The order is (left, top, right, bottom). mode : {'const', 'wrap'} Filling mode src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep><return>PIL.ImageOps.expand(image *args **kwargs)<block_end>@apply_parallel<def_stmt>rotate self image *args **kwargs<block_start>""" Rotates an image. kwargs are passed to PIL.Image.rotate Parameters ---------- angle: Number In degrees counter clockwise. resample: int Interpolation order expand: bool Whether to expand the output to hold the whole image. Default is False. center: (Number, Number) Center of rotation. Default is the center of the image. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep><return>image.rotate(*args **kwargs)<block_end>@apply_parallel<def_stmt>flip self image mode='lr'<block_start>""" Flips image. Parameters ---------- mode : {'lr', 'ud'} - 'lr' - apply the left/right flip - 'ud' - apply the upside/down flip src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<if_stmt>mode<eq>'lr'<block_start><return>PIL.ImageOps.mirror(image)<block_end><return>PIL.ImageOps.flip(image)<block_end>@apply_parallel<def_stmt>invert self image channels='all'<block_start>""" Invert givn channels. Parameters ---------- channels : int, sequence Indices of the channels to invert. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<if_stmt>channels<eq>'all'<block_start>image=PIL.ImageChops.invert(image)<block_end><else_stmt><block_start>bands=list(image.split())<line_sep>channels=(channels )<if>isinstance(channels Number)<else>channels<for_stmt>channel channels<block_start>bands[channel]=PIL.ImageChops.invert(bands[channel])<block_end>image=PIL.Image.merge('RGB' bands)<block_end><return>image<block_end>@apply_parallel<def_stmt>salt self image p_noise=.015 color=255 size=(1 1)<block_start>""" Set random pixel on image to givan value. Every pixel will be set to ``color`` value with probability ``p_noise``. Parameters ---------- p_noise : float Probability of salting a pixel. color : float, int, sequence, callable Color's value. - int, float, sequence -- value of color - callable -- color is sampled for every chosen pixel (rules are the same as for int, float and sequence) size : int, sequence of int, callable Size of salt - int -- square salt with side ``size`` - sequence -- recangular salt in the form (row, columns) - callable -- size is sampled for every chosen pixel (rules are the same as for int and sequence) src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>mask_size=np.asarray(self._get_image_shape(image))<line_sep>mask_salt=np.random.binomial(1 p_noise size=mask_size).astype(bool)<line_sep>image=np.array(image)<if_stmt>isinstance(size (tuple int))<and>size<in>[1 (1 1)]<and><not>callable(color)<block_start>image[mask_salt]=color<block_end><else_stmt><block_start>size_lambda=size<if>callable(size)<else><lambda>:size<line_sep>color_lambda=color<if>callable(color)<else><lambda>:color<line_sep>mask_salt=np.where(mask_salt)<for_stmt>i range(len(mask_salt[0]))<block_start>current_size=size_lambda()<line_sep>current_size=(current_size current_size)<if>isinstance(current_size Number)<else>current_size<line_sep>left_top=np.asarray((mask_salt[0][i] mask_salt[1][i]))<line_sep>right_bottom=np.minimum(left_top+current_size self._get_image_shape(image))<line_sep>image[left_top[0]:right_bottom[0] left_top[1]:right_bottom[1]]=color_lambda()<block_end><block_end><return>PIL.Image.fromarray(image)<block_end>@apply_parallel<def_stmt>clip self image low=0 high=255<block_start>""" Truncate image's pixels. Parameters ---------- low : int, float, sequence Actual pixel's value is equal max(value, low). If sequence is given, then its length must coincide with the number of channels in an image and each channel is thresholded separately high : int, float, sequence Actual pixel's value is equal min(value, high). If sequence is given, then its length must coincide with the number of channels in an image and each channel is thresholded separately src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<if_stmt>isinstance(low Number)<block_start>low=tuple([low]<times>3)<block_end><if_stmt>isinstance(high Number)<block_start>high=tuple([high]<times>3)<block_end>high=PIL.Image.new('RGB' image.size high)<line_sep>low=PIL.Image.new('RGB' image.size low)<line_sep><return>PIL.ImageChops.lighter(PIL.ImageChops.darker(image high) low)<block_end>@apply_parallel<def_stmt>enhance self image layout='hcbs' factor=(1 1 1 1)<block_start>""" Apply enhancements from PIL.ImageEnhance to the image. Parameters ---------- layout : str defines layout of operations, default is `hcbs`: h - color c - contrast b - brightness s - sharpness factor : float or tuple of float factor of enhancement for each operation listed in `layout`. """<line_sep>enhancements={'h':'Color' 'c':'Contrast' 'b':'Brightness' 's':'Sharpness'}<if_stmt>isinstance(factor float)<block_start>factor=(factor )<times>len(layout)<block_end><if_stmt>len(layout)<ne>len(factor)<block_start><raise>ValueError("'layout' and 'factor' should be of same length!")<block_end><for_stmt>alias,multiplier zip(layout factor)<block_start>enhancement=enhancements.get(alias)<if_stmt>enhancement<is><none><block_start><raise>ValueError('Unknown enhancement alias: ' alias)<block_end>image=getattr(PIL.ImageEnhance enhancement)(image).enhance(multiplier)<block_end><return>image<block_end>@apply_parallel<def_stmt>multiply self image multiplier=1. clip=<false> preserve_type=<false><block_start>""" Multiply each pixel by the given multiplier. Parameters ---------- multiplier : float, sequence clip : bool whether to force image's pixels to be in [0, 255] or [0, 1.] preserve_type : bool Whether to preserve ``dtype`` of transformed images. If ``False`` is given then the resulting type will be ``np.float``. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>multiplier=np.float32(multiplier)<if_stmt>isinstance(image PIL.Image.Image)<block_start><if_stmt>preserve_type<is><false><block_start>warnings.warn("Note that some info might be lost during `multiply` transformation since PIL.image "<concat>"stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or "<concat>"consider using `to_array` action before multiplication.")<block_end><return>PIL.Image.fromarray(np.clip(multiplier<times>np.asarray(image) 0 255).astype(np.uint8))<block_end>dtype=image.dtype<if>preserve_type<else>np.float<if_stmt>clip<block_start>image=np.clip(multiplier<times>image 0 255<if>dtype<eq>np.uint8<else>1.)<block_end><else_stmt><block_start>image=multiplier<times>image<block_end><return>image.astype(dtype)<block_end>@apply_parallel<def_stmt>add self image term=1. clip=<false> preserve_type=<false><block_start>""" Add term to each pixel. Parameters ---------- term : float, sequence clip : bool whether to force image's pixels to be in [0, 255] or [0, 1.] preserve_type : bool Whether to preserve ``dtype`` of transformed images. If ``False`` is given then the resulting type will be ``np.float``. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>term=np.float32(term)<if_stmt>isinstance(image PIL.Image.Image)<block_start><return>PIL.Image.fromarray(np.clip(term+np.asarray(image) 0 255).astype(np.uint8))<block_end>dtype=image.dtype<if>preserve_type<else>np.float<if_stmt>clip<block_start>image=np.clip(term+image 0 255<if>dtype<eq>np.uint8<else>1.)<block_end><else_stmt><block_start>image=term+image<block_end><return>image.astype(dtype)<block_end>@apply_parallel<def_stmt>pil_convert self image mode="L"<block_start>""" Convert image. Actually calls ``image.convert(mode)``. Parameters ---------- mode : str Pass 'L' to convert to grayscale src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep><return>image.convert(mode)<block_end>@apply_parallel<def_stmt>posterize self image bits=4<block_start>""" Posterizes image. More concretely, it quantizes pixels' values so that they have``2^bits`` colors Parameters ---------- bits : int Number of bits used to store a color's component. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep><return>PIL.ImageOps.posterize(image bits)<block_end>@apply_parallel<def_stmt>cutout self image origin shape color<block_start>""" Fills given areas with color .. note:: It is assumed that ``origins``, ``shapes`` and ``colors`` have the same length. Parameters ---------- origin : sequence, str Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details. shape : sequence, int Shape of a filled box. Can be one of: - sequence - crop size in the form of (rows, columns) - int - shape has squared form color : sequence, number Color of a filled box. Can be one of: - sequence - (r,g,b) form - number - grayscale src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. Notes ----- Using 'random' origin with `src` as list with multiple elements will not result in same crop for each element, as origin will be sampled independently for each `src` element. To randomly sample same origin for a number of components, use `R` named expression for `origin` argument. """<line_sep>image=image.copy()<line_sep>shape=(shape shape)<if>isinstance(shape Number)<else>shape<line_sep>origin=self._calc_origin(shape origin self._get_image_shape(image))<line_sep>color=(color color color)<if>isinstance(color Number)<else>color<line_sep>image.paste(PIL.Image.new('RGB' tuple(shape) tuple(color)) tuple(origin))<line_sep><return>image<block_end><def_stmt>_assemble_patches self patches *args dst **kwargs<block_start>""" Assembles patches after parallel execution. Parameters ---------- patches : sequence Patches to gather. pathces.shape must be like (batch.size, patches_i, patch_height, patch_width, n_channels) dst : str Component to put patches in. """<line_sep>_=args kwargs<line_sep>new_items=np.concatenate(patches)<line_sep>setattr(self dst new_items)<line_sep><return>self<block_end>@action@inbatch_parallel(init='indices' post='_assemble_patches')<def_stmt>split_to_patches self ix patch_shape stride=1 drop_last=<false> src='images' dst=<none><block_start>""" Splits image to patches. Small images with the same shape (``patch_shape``) are cropped from the original one with stride ``stride``. Parameters ---------- patch_shape : int, sequence Patch's shape in the from (rows, columns). If int is given then patches have square shape. stride : int, square Step of the moving window from which patches are cropped. If int is given then the window has square shape. drop_last : bool Whether to drop patches whose window covers area out of the image. If False is passed then these patches are cropped from the edge of an image. See more in tutorials. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>_=dst<line_sep>image=self.get(ix src)<line_sep>image_shape=self._get_image_shape(image)<line_sep>image=np.array(image)<line_sep>stride=(stride stride)<if>isinstance(stride Number)<else>stride<line_sep>patch_shape=(patch_shape patch_shape)<if>isinstance(patch_shape Number)<else>patch_shape<line_sep>patches=[]<def_stmt>_iterate_columns row_from row_to<block_start>column=0<while_stmt>column<l>image_shape[1]-patch_shape[1]+1<block_start>patches.append(PIL.Image.fromarray(image[column:column+patch_shape[1] row_from:row_to]))<line_sep>column<augadd>stride[1]<block_end><if_stmt><not>drop_last<and>column+patch_shape[1]<ne>image_shape[1]<block_start>patches.append(PIL.Image.fromarray(image[image_shape[1]-patch_shape[1]:image_shape[1] row_from:row_to]))<block_end><block_end>row=0<while_stmt>row<l>image_shape[0]-patch_shape[0]+1<block_start>_iterate_columns(row row+patch_shape[0])<line_sep>row<augadd>stride[0]<block_end><if_stmt><not>drop_last<and>row+patch_shape[0]<ne>image_shape[0]<block_start>_iterate_columns(image_shape[0]-patch_shape[0] image_shape[0])<block_end>array=np.empty(len(patches) dtype=object)<for_stmt>i,patch enumerate(patches)<block_start>array[i]=patch<block_end><return>array<block_end>@apply_parallel<def_stmt>additive_noise self image noise clip=<false> preserve_type=<false><block_start>""" Add additive noise to an image. Parameters ---------- noise : callable Distribution. Must have ``size`` parameter. clip : bool whether to force image's pixels to be in [0, 255] or [0, 1.] preserve_type : bool Whether to preserve ``dtype`` of transformed images. If ``False`` is given then the resulting type will be ``np.float``. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>noise=noise(size=(*image.size len(image.getbands()))<if>isinstance(image PIL.Image.Image)<else>image.shape)<line_sep><return>self._add_(image noise clip preserve_type)<block_end>@apply_parallel<def_stmt>multiplicative_noise self image noise clip=<false> preserve_type=<false><block_start>""" Add multiplicative noise to an image. Parameters ---------- noise : callable Distribution. Must have ``size`` parameter. clip : bool whether to force image's pixels to be in [0, 255] or [0, 1.] preserve_type : bool Whether to preserve ``dtype`` of transformed images. If ``False`` is given then the resulting type will be ``np.float``. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>noise=noise(size=(*image.size len(image.getbands()))<if>isinstance(image PIL.Image.Image)<else>image.shape)<line_sep><return>self._multiply_(image noise clip preserve_type)<block_end>@apply_parallel<def_stmt>elastic_transform self image alpha sigma **kwargs<block_start>""" Deformation of images as described by Simard, Steinkraus and Platt, `Best Practices for Convolutional Neural Networks applied to Visual Document Analysis <http://cognitivemedium.com/assets/rmnist/Simard.pdf>_`. Code slightly differs from `<https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`_. Parameters ---------- alpha : number maximum of vectors' norms. sigma : number Smooth factor. src : str Component to get images from. Default is 'images'. dst : str Component to write images to. Default is 'images'. p : float Probability of applying the transform. Default is 1. """<line_sep>image=np.array(image)<line_sep># full shape is needed shape=image.shape<if_stmt>len(shape)<eq>2<block_start>image=image[<ellipsis> <none>]<line_sep>shape=image.shape<block_end>kwargs.setdefault('mode' 'constant')<line_sep>kwargs.setdefault('cval' 0)<line_sep>column_shift=gaussian_filter(np.random.uniform(-1 1 size=shape) sigma **kwargs)<times>alpha<line_sep>row_shift=gaussian_filter(np.random.uniform(-1 1 size=shape) sigma **kwargs)<times>alpha<line_sep>row,column,channel=np.meshgrid(range(shape[0]) range(shape[1]) range(shape[2]))<line_sep>indices=(column+column_shift row+row_shift channel)<line_sep>distored_image=map_coordinates(image indices order=1 mode='reflect')<if_stmt>shape[-1]<eq>1<block_start><return>PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape))[<ellipsis> 0])<block_end><return>PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape)))<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("TEST")<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.add_(cms.Service("ResourceEnforcer" maxVSize=cms.untracked.double(1.0) maxRSS=cms.untracked.double(1.0) maxTime=cms.untracked.double(1.0)))<line_sep>process.thing=cms.EDProducer("ThingProducer")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(10))<line_sep>process.p=cms.Path(process.thing)<line_sep>
<import_from_stmt>itertools count<import_stmt>numpy<as>np<import_from_stmt>numpy zeros arange dot cross searchsorted array eye ones<line_sep>#from pyNastran.bdf.field_writer_8 import print_card_8 <import_from_stmt>pyNastran.bdf.bdf_interface.assign_type integer<import_from_stmt>pyNastran.dev.bdf_vectorized.cards.elements.solid.solid_element SolidElement<def_stmt>volume4 xyz1 xyz2 xyz3 xyz4<block_start>r""" Gets the volume, :math:`V`, of the tetrahedron. .. math:: V = \frac{(a-d) \cdot \left( (b-d) \times (c-d) \right) }{6} """<line_sep>V=-dot((xyz1-xyz4) cross(xyz2-xyz4 xyz3-xyz4))/6.<line_sep>#V = 1/6. * np.det( #np.hstack( #[1., 1., 1., 1.], #np.vstack(n1, n2, n3, n4).T, #), #) <return>V<block_end><class_stmt>CTETRA4(SolidElement)<block_start>type='CTETRA4'<line_sep>nnodes=4<def_stmt>__init__ self model<block_start>""" Defines the CTETRA object. Parameters ---------- model : BDF the BDF object """<line_sep>SolidElement.__init__(self model)<block_end><def_stmt>add_card self card comment=''<block_start>i=self.i<line_sep>eid=integer(card 1 'element_id')<if_stmt>comment<block_start>self.set_comment(eid comment)<block_end>#: Element ID self.element_id[i]=eid<line_sep>#: Property ID self.property_id[i]=integer(card 2 'property_id')<line_sep>#: Node IDs nids=array([integer(card 3 'node_id_1') integer(card 4 'node_id_2') integer(card 5 'node_id_3') integer(card 6 'node_id_4') ] dtype='int32')<assert_stmt>0<not><in>nids '%s\n%s'%(nids card)<line_sep>self.node_ids[i :]=nids<assert_stmt>len(card)<eq>7 'len(CTETRA4 card) = %i\ncard=%s'%(len(card) card)<line_sep>self.i<augadd>1<block_end><def_stmt>update self maps<block_start>""" maps = { 'node_id' : nid_map, 'property' : pid_map, } """<if_stmt>self.n<block_start>eid_map=maps['element']<line_sep>nid_map=maps['node']<line_sep>pid_map=maps['property']<for_stmt>i,(eid pid nids) enumerate(zip(self.element_id self.property_id self.node_ids))<block_start>print(self.print_card(i))<line_sep>self.element_id[i]=eid_map[eid]<line_sep>self.property_id[i]=pid_map[pid]<line_sep>self.node_ids[i 0]=nid_map[nids[0]]<line_sep>self.node_ids[i 1]=nid_map[nids[1]]<line_sep>self.node_ids[i 2]=nid_map[nids[2]]<line_sep>self.node_ids[i 3]=nid_map[nids[3]]<block_end><block_end><block_end><def_stmt>get_mass_matrix self i model positions index0s<block_start>r""" A mass matrix is a discrete representation of a continuous mass distribution. To compute our mass matrix for a tetrahedral element with linear shape functions we need the formula (pp. 266 in Cook) a!b!c!d! \int_V N_1^a N_2^b N_3^c N_4^d dV = 6V -------------------------- (**) (3 + a + b +c + d)! A consistent element mass matrix (pp. 376 Cook) is defined as m = \int_V \rho N^T N dV (***) This equation can be derived from work balance, the details of which is unimportant here (look Cook pp. 375-376 for details). Assumping \rho is constant over each tetrahedral element and using the linear shape functions the above definition (***) results in |N_1| m = \rho \int_V |N_2| |N_1 N_2 N_3 N_4| dV |N_3| |N_4| |(N_1 N_1) (N_1 N_2) (N_1 N_3) (N_1 N_4)| m = \rho \int_V |(N_2 N_1) (N_2 N_2) (N_2 N_3) (N_2 N_4)| dV |(N_3 N_1) (N_3 N_2) (N_3 N_3) (N_3 N_4)| |(N_4 N_1) (N_4 N_2) (N_4 N_3) (N_4 N_4)| by (**) | 2 1 1 1| m = \rho V/20 | 1 2 1 1| (****) | 1 1 2 1| | 1 1 1 2| V m_ij = \rho --- (1+delta_ij) 20 in 3D this means that for the tetrahedral element | 2 2 2 1 1 1 1 1 1 1 1 1 | | 2 2 2 1 1 1 1 1 1 1 1 1 | | 2 2 2 1 1 1 1 1 1 1 1 1 | | | | 1 1 1 2 2 2 1 1 1 1 1 1 | | 1 1 1 2 2 2 1 1 1 1 1 1 | V | 1 1 1 2 2 2 1 1 1 1 1 1 | Me = \rho --- | | 20 | 1 1 1 1 1 1 2 2 2 1 1 1 | | 1 1 1 1 1 1 2 2 2 1 1 1 | | 1 1 1 1 1 1 2 2 2 1 1 1 | | | | 1 1 1 1 1 1 1 1 1 2 2 2 | | 1 1 1 1 1 1 1 1 1 2 2 2 | | 1 1 1 1 1 1 1 1 1 2 2 2 | Notice that in order to obtain the global/system mass matrix an assembly similar to the stiffness matrix assembly must be carried out. Further, the global M matrix will have the same sub-block pattern as the global K matrix. A consistent mass matrix is often not used in computer graphics. Instead and ad-hoc approach named ``lumped'' mass matrix is applied. The lumped mass matrix is obtained by placing particle masses at the nodes. This corresponds to shifting all the masses in the rows of (****) onto the diagonal. In 3D this yields the element mass matrix | 1 0 0 0 0 0 0 0 0 0 0 0 | | 0 1 0 0 0 0 0 0 0 0 0 0 | | 0 0 1 0 0 0 0 0 0 0 0 0 | | | | 0 0 0 1 0 0 0 0 0 0 0 0 | | 0 0 0 0 1 0 0 0 0 0 0 0 | V | 0 0 0 0 0 1 0 0 0 0 0 0 | Me = \rho --- | | 4 | 0 0 0 0 0 0 1 0 0 0 0 0 | | 0 0 0 0 0 0 0 1 0 0 0 0 | | 0 0 0 0 0 0 0 0 1 0 0 0 | | | | 0 0 0 0 0 0 0 0 0 1 0 0 | | 0 0 0 0 0 0 0 0 0 0 1 0 | | 0 0 0 0 0 0 0 0 0 0 0 1 | Thus a lumped mass matrix is diagonal whereas a consistent mass matrix is not. Observe that the global mass matrix would also diagonal and the assembly is simplified to an iteration over all tetrahedra, while incementing the nodal mass by one fourth of the tetrahedral mass. for each node n mass(n) = 0 next n for each tetrahedron e mass(n_i) += \rho_e Ve / 4 mass(n_j) += \rho_e Ve / 4 mass(n_k) += \rho_e Ve / 4 mass(n_m) += \rho_e Ve / 4 next e where n_i,n_j,n_k and n_m are the four nodes of the e'th tetrahedron. The advantage of lumping is less storage and higher performance. On the downside lumping introduces a discontinouty in the displacement field. Obrien.shen state that the errors in lumping is negligeble for small-size course meshes used in computer graphics. However, for finer meshes the errors becomes noticeable. There do exist other approaches for computing mass matrices, even methods which combine other methods. We refer the interested reader to Cook for more details. Here we have limited our selfes to the two most common methods. It is worthwhile to notice that under the reasonable assumptions that V and \rho are positive for all elements both the element mass matrices and the global mass matrices are symmetric positive definite matrices. http://image.diku.dk/svn/OpenTissue/archieve/silcowitz/OpenTissue/dynamics/fem/fem_compute_mass.h """<line_sep>is_lumped=<true><line_sep>is_consistent=<false><line_sep>nnodes=4<line_sep>ndof=3<times>nnodes<line_sep>pid=self.property_id[i]<line_sep>rho=self.model.elements.properties_solid.psolid.get_density_by_property_id(pid)[0]<line_sep>n0,n1,n2,n3=self.node_ids[i :]<line_sep>V=volume4(positions[self.node_ids[i 0]] positions[self.node_ids[i 1]] positions[self.node_ids[i 2]] positions[self.node_ids[i 3]])<line_sep>mass=rho<times>V<if_stmt>is_lumped<block_start>mi=mass/4.<line_sep>nnodes=4<line_sep>M=eye(ndof dtype='float32')<block_end><else_stmt><block_start>mi=mass/20.<line_sep>M=ones((ndof ndof) dtype='float32')<for_stmt>i range(nnodes)<block_start>j=i<times>3<line_sep>M[j:j+3 j:j+3]=2.<block_end><block_end>M<augmul>mi<line_sep>dofs,nijv=self.get_dofs_nijv(index0s n0 n1 n2 n3)<line_sep><return>M dofs nijv<block_end><def_stmt>get_stiffness_matrices self model positions index0s<block_start>out=[]<line_sep># volume coordinates # FEM: Volume I (Zienkiewicz) p.186 volume6=volume<times>6<line_sep>L1=(a1+b1<times>x+c1<times>y+d1<times>z)/volume6<line_sep>L2=(a2+b2<times>x+c2<times>y+d2<times>z)/volume6<line_sep>L3=(a3+b3<times>x+c3<times>y+d3<times>z)/volume6<line_sep># FEM: Volume I (Zienkiewicz) p.186 #x = L1*x1 + L2*x2 + L3*x3 + L4*x4 #y = L1*y1 + L2*y2 + L3*y3 + L4*y4 #z = L1*z1 + L2*z2 + L3*z3 + L4*z4 #1 = L1 + L2 + L3 + L4 <for_stmt>i range(self.n)<block_start>K,dofs,nijv=self.get_stiffness_matrix(i model self.positions index0s)<line_sep>out.append(K dofs nijv)<block_end>self.add_stiffness(K dofs nijv)<block_end><def_stmt>get_stiffness_matrix self i model positions index0s<block_start>nnodes=4<line_sep>ndof=3<times>nnodes<line_sep>pid=self.property_id[i]<line_sep>prop=self.model.elements.properties_solid.psolid<line_sep>rho=prop.get_density_by_property_id(pid)[0]<line_sep>n0,n1,n2,n3=self.node_ids[i :]<line_sep>xyz1=positions[self.node_ids[i 0]]<line_sep>xyz2=positions[self.node_ids[i 1]]<line_sep>xyz3=positions[self.node_ids[i 2]]<line_sep>xyz4=positions[self.node_ids[i 3]]<line_sep>vol=volume4(xyz1 xyz2 xyz3 xyz4)<line_sep>stiffness=rho<times>vol<line_sep>ki=stiffness/4.<line_sep>nnodes=4<line_sep>K=eye(ndof dtype='float32')# not done... u=0.<line_sep>v=0.<line_sep>#wts = [-0.57735, 0.57735] #for u in wts: #for v in wts: Ji=array([[v-1.0 -v+1.0 v+1.0 -v-1.0] [u-1.0 -u-1.0 u+1.0 -u+1.0] ])/4.<line_sep>#J = Ji @ xy #Jinv = np.linalg.inv(J) #det_j = np.linalg.det(J) #darea = det_j #B1 = Jinv @ Ji #print('B1 =\n', B1) #N1x, N2x, N3x, N4x = B1[0, :] #N1y, N2y, N3y, N4y = B1[1, :] #print('Nix =', B1[0, :]) vol_matrix=np.hstack([1. 1. 1. 1.] np.vstack([xyz1 xyz2 xyz3 xyz4]).T )<line_sep>ivol_matrix=np.linalg.inv(vol_matrix)<line_sep>a1,b1,c1=ivol_matrix[0 1:]<line_sep>a2,b2,c2=ivol_matrix[1 1:]<line_sep>a3,b3,c3=ivol_matrix[2 1:]<line_sep>a4,b4,c4=ivol_matrix[3 1:]<line_sep>#N1x, N2x, N3x, N4x = v - 1.0, -v + 1.0, v + 1.0, -v - 1.0 #N1y, N2y, N3y, N4y = u - 1.0, -u - 1.0, u + 1.0, -u + 1.0 B=array([[a1 0. 0. a2 0. 0. a3 0. 0. a4 0. 0.] [0. b1 0. 0. b2 0. 0. b3 0. 0. b4 0.] [0. 0. c1 0. 0. c2 0. 0. c3 0. 0. c4] [b1 a1 0. b2 a2 0. b3 a3 0. b4 a4 0.] [0. c1 b1 0. c2 b2 0. c3 b3 0. c4 b4] [c1 0. a1 c2 0. a2 c3 0. a3 c4 0. a4] ])/(6<times>vol)<line_sep>#N = array([ #[N1, 0., 0., N2, 0., 0., N3, 0., N4, 0., 0.], #[0., N1, 0., 0., N2, 0., 0., N3, 0., N4, 0.], #[0., 0., N1, 0., 0., N2, 0., 0., N3, 0., N4], #]) #print('B =\n', B) #E = 1.0 #nu = 0.25 mid1=prop.material_id[0]<line_sep>mat=self.model.materials.get_solid_material(mid1)<line_sep>print(mat)<line_sep>E=mat.E[0]<line_sep>nu=mat.nu[0]<line_sep>G=mat.G[0]<line_sep># [sigma] = [C] * [epsilon] #denom = 1 - nu**2 #C = np.zeros((6, 6), dtype='float64') #outside = E / ((1 + nu) * (1 - 2 * nu)) #C[0, 0] = C[1, 1] = C[2, 2] = (1 - nu) * outside #C[3, 3] = C[4, 4] = C[5, 5] = (0.5 - nu) * outside <if_stmt>0## [stress] = [E] [strain] #emat = np.zeros((5, 5), dtype='float64') #emat[0, 0] = emat[1, 1] = E / denom #emat[1, 0] = emat[0, 1] = (E * nu) / denom #emat[2, 2] = emat[3, 3] = emat[4, 4] = G ## [M] = [D] * [bending] #dmat = np.zeros((5, 5), dtype='float64') #D = E * h**3 / (12 * denom) #dmat[0, 0] = dmat[1, 1] = D #dmat[1, 0] = dmat[0, 1] = D * nu #dmat[2, 2] = D * (1. - nu) / 2. #dmat[3, 3] = emat[4, 4] = G * h # FEM: Volume I (Zienkiewicz) p.132 <block_start>dmat2=np.array(6 6)<line_sep>dmat2[0 0]=dmat2[1 1]=dmat2[2 2]=1-nu<line_sep>dmat2[0 1]=dmat2[0 2]=dmat2[1 0]=dmat2[2 0]=nu<line_sep>dmat2[3 3]=dmat2[4 4]=dmat[5 5]=(1-2<times>nu)/2.<line_sep>dmat2<augmul>E/((1+nu)<times>(1-2<times>nu))<block_end>#print('C =\n', C) #print('thickness =', thickness) Ki=B.T@C@B<line_sep>#print('Ki(%s,%s) =%s\n' % (u, v, Ki)) #print('Ki(%s,%s) =\n%s\n' % (u, v, list_print(Ki, '%.4e'))) K<augadd>Ki<line_sep>#K *= ki dofs,nijv=self.get_dofs_nijv(index0s n0 n1 n2 n3)<line_sep><return>K dofs nijv<block_end><def_stmt>get_dofs_nijv self index0s n0 n1 n2 n3<block_start>i0=index0s[n0]<line_sep>i1=index0s[n1]<line_sep>i2=index0s[n2]<line_sep>i3=index0s[n3]<line_sep>dofs=array([i0 i0+1 i0+2 i1 i1+1 i1+2 i2 i2+1 i2+2 i3 i3+1 i3+2 ] 'int32')<line_sep>nijv=[# translation (n0 1) (n0 2) (n0 3) (n1 1) (n1 2) (n1 3) (n2 1) (n2 2) (n2 3) (n3 1) (n3 2) (n3 3) ]<line_sep><return>dofs nijv<block_end><def_stmt>_verify self xref=<true><block_start>eid=self.eid<line_sep>pid=self.Pid()<line_sep>nids=self.node_ids<assert_stmt>isinstance(eid int)<assert_stmt>isinstance(pid int)<for_stmt>i,nid enumerate(nids)<block_start><assert_stmt>isinstance(nid int) 'nid%i is not an integer; nid=%s'%(i nid)<block_end><if_stmt>xref<block_start>c=self.centroid()<line_sep>v=self.volume()<assert_stmt>isinstance(v float)<for_stmt>i range(3)<block_start><assert_stmt>isinstance(c[i] float)<block_end><block_end><block_end><def_stmt>get_node_indicies self i=<none><block_start><if_stmt>i<is><none><block_start>i1=self.model.grid.get_node_index_by_node_id(self.node_ids[: 0])<line_sep>i2=self.model.grid.get_node_index_by_node_id(self.node_ids[: 1])<line_sep>i3=self.model.grid.get_node_index_by_node_id(self.node_ids[: 2])<line_sep>i4=self.model.grid.get_node_index_by_node_id(self.node_ids[: 3])<block_end><else_stmt><block_start>i1=self.model.grid.get_node_index_by_node_id(self.node_ids[i 0])<line_sep>i2=self.model.grid.get_node_index_by_node_id(self.node_ids[i 1])<line_sep>i3=self.model.grid.get_node_index_by_node_id(self.node_ids[i 2])<line_sep>i4=self.model.grid.get_node_index_by_node_id(self.node_ids[i 3])<block_end><return>i1 i2 i3 i4<block_end><def_stmt>_get_node_locations_by_index self i xyz_cid0<block_start>""" :param i: None or an array of node IDs :param xyz_cid0: the node positions as a dictionary """<line_sep>grid=self.model.grid<line_sep>get_node_index_by_node_id=self.model.grid.get_node_index_by_node_id<line_sep>node_ids=self.node_ids<line_sep>#msg = ', which is required by %s' % self.type i1,i2,i3,i4=self.get_node_indicies(i)<line_sep>n1=xyz_cid0[i1 :]<line_sep>n2=xyz_cid0[i2 :]<line_sep>n3=xyz_cid0[i3 :]<line_sep>n4=xyz_cid0[i4 :]<line_sep><return>n1 n2 n3 n4<block_end><def_stmt>get_volume_by_element_id self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the volume for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None -> all the elements to consider xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto) the positions of the GRIDs in CID=0 total : bool; default=False should the volume be summed """<line_sep>n1,n2,n3,n4=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>V=zeros(n1.shape[0] self.model.float_fmt)<for_stmt>i,n1i,n2i,n3i,n4i zip(count() n1 n2 n3 n4)<block_start>V[i]=volume4(n1i n2i n3i n4i)<line_sep>i<augadd>1<block_end><return>V<block_end><def_stmt>get_mass_by_element_id self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the mass for one or more CTETRA elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None -> all the elements to consider xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto) the positions of the GRIDs in CID=0 total : bool; default=False should the centroid be summed """<if_stmt>element_id<is><none><block_start>element_id=self.element_id<block_end><if_stmt>xyz_cid0<is><none><block_start>xyz_cid0=self.model.grid.get_position_by_node_index()<block_end>V=self.get_volume_by_element_id(element_id xyz_cid0)<line_sep>mid=self.model.properties_solid.get_material_id_by_property_id(self.property_id)<line_sep>rho=self.model.materials.get_density_by_material_id(mid)<line_sep>mass=V<times>rho<if_stmt>total<block_start>mass=mass.sum()<block_end><return>mass<block_end><def_stmt>get_centroid_volume self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the centroid and volume for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None -> all the elements to consider xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto) the positions of the GRIDs in CID=0 :param total: should the volume be summed; centroid be averaged (default=False) .. seealso:: CTETRA4.volume() and CTETRA4.centroid for more information. """<line_sep>n1,n2,n3,n4=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>n=len(element_id)<line_sep>volume=zeros(n self.model.float_fmt)<line_sep>i=0<for_stmt>n1i,n2i,n3i,n4i zip(n1 n2 n3 n4)<block_start>volume[i]=volume4(n1i n2i n3i n4i)<line_sep>i<augadd>1<block_end>centroid=(n1+n2+n3+n4)/4.0<if_stmt>total<block_start>centroid=centroid.mean()<line_sep>volume=abs(volume).sum()<block_end><else_stmt><block_start>volume=abs(volume)<block_end><assert_stmt>volume.min()<g>0.0 'volume.min() = %f'%volume.min()<line_sep><return>centroid volume<block_end><def_stmt>get_centroid_by_element_id self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the centroid for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None -> all the elements to consider xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto) the positions of the GRIDs in CID=0 total : bool; default=False should the centroid be averaged """<line_sep>n1,n2,n3,n4=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>centroid=(n1+n2+n3+n4)/4.0<if_stmt>total<block_start>centroid=centroid.mean(axis=0)<block_end><return>centroid<block_end>#def get_face_nodes(self, nid, nid_opposite): #raise NotImplementedError() #nids = self.node_ids[:4] #indx = nids.index(nid_opposite) #nids.pop(indx) #return nids <def_stmt>write_card self bdf_file size=8 element_id=<none><block_start><if_stmt>self.n<block_start><if_stmt>element_id<is><none><block_start>i=arange(self.n)<block_end><else_stmt><block_start>i=searchsorted(self.element_id element_id)<block_end><if_stmt>size<eq>16<or>max(self.element_id[i].max() self.property_id[i].max() self.node_ids[i :].max())<g>1000000000<block_start>msg=('CTETRA %16i%16i%16i%16i\n'<concat>' %16i%16i\n')<for_stmt>(eid pid n) zip(self.element_id[i] self.property_id[i] self.node_ids[i :])<block_start><if_stmt>eid<in>self._comments<block_start>bdf_file.write(self._comments[eid])<block_end>data=[eid pid]+list(n)<line_sep>bdf_file.write(msg)<block_end><block_end><else_stmt><block_start>msg='CTETRA %8i%8i%8i%8i%8i%8i\n'<for_stmt>(eid pid n) zip(self.element_id[i] self.property_id[i] self.node_ids[i :])<block_start><if_stmt>eid<in>self._comments<block_start>bdf_file.write(self._comments[eid])<block_end>data=[eid pid]+list(n)<line_sep>bdf_file.write(msg%tuple(data))<block_end><block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>signal<class_stmt>Screen<block_start>@staticmethod<def_stmt>wr s# TODO: When Python is 3.5, update this to use only bytes <block_start><if_stmt>isinstance(s str)<block_start>s=bytes(s "utf-8")<block_end>os.write(1 s)<block_end>@staticmethod<def_stmt>wr_fixedw s width# Write string in a fixed-width field <block_start>s=s[:width]<line_sep>Screen.wr(s)<line_sep>Screen.wr(" "<times>(width-len(s)))<line_sep># Doesn't work here, as it doesn't advance cursor #Screen.clear_num_pos(width - len(s)) <block_end>@staticmethod<def_stmt>cls <block_start>Screen.wr(b"\x1b[2J")<block_end>@staticmethod<def_stmt>goto x y# TODO: When Python is 3.5, update this to use bytes <block_start>Screen.wr("\x1b[%d;%dH"%(y+1 x+1))<block_end>@staticmethod<def_stmt>clear_to_eol <block_start>Screen.wr(b"\x1b[0K")<block_end># Clear specified number of positions @staticmethod<def_stmt>clear_num_pos num<block_start><if_stmt>num<g>0<block_start>Screen.wr("\x1b[%dX"%num)<block_end><block_end>@staticmethod<def_stmt>attr_color fg bg=-1<block_start><if_stmt>bg<eq>-1<block_start>bg=fg<rshift>4<line_sep>fg<augand>0xf<block_end># TODO: Switch to b"%d" % foo when py3.5 is everywhere <if_stmt>bg<is><none><block_start><if_stmt>(fg<g>8)<block_start>Screen.wr("\x1b[%d;1m"%(fg+30-8))<block_end><else_stmt><block_start>Screen.wr("\x1b[%dm"%(fg+30))<block_end><block_end><else_stmt><block_start><assert_stmt>bg<le>8<if_stmt>(fg<g>8)<block_start>Screen.wr("\x1b[%d;%d;1m"%(fg+30-8 bg+40))<block_end><else_stmt><block_start>Screen.wr("\x1b[0;%d;%dm"%(fg+30 bg+40))<block_end><block_end><block_end>@staticmethod<def_stmt>attr_reset <block_start>Screen.wr(b"\x1b[0m")<block_end>@staticmethod<def_stmt>cursor onoff<block_start><if_stmt>onoff<block_start>Screen.wr(b"\x1b[?25h")<block_end><else_stmt><block_start>Screen.wr(b"\x1b[?25l")<block_end><block_end><def_stmt>draw_box self left top width height# Use http://www.utf8-chartable.de/unicode-utf8-table.pl # for utf-8 pseudographic reference <block_start>bottom=top+height-1<line_sep>self.goto(left top)<line_sep># "┌" self.wr(b"\xe2\x94\x8c")<line_sep># "─" hor=b"\xe2\x94\x80"<times>(width-2)<line_sep>self.wr(hor)<line_sep># "┐" self.wr(b"\xe2\x94\x90")<line_sep>self.goto(left bottom)<line_sep># "└" self.wr(b"\xe2\x94\x94")<line_sep>self.wr(hor)<line_sep># "┘" self.wr(b"\xe2\x94\x98")<line_sep>top<augadd>1<while_stmt>top<l>bottom# "│" <block_start>self.goto(left top)<line_sep>self.wr(b"\xe2\x94\x82")<line_sep>self.goto(left+width-1 top)<line_sep>self.wr(b"\xe2\x94\x82")<line_sep>top<augadd>1<block_end><block_end><def_stmt>clear_box self left top width height# doesn't work #self.wr("\x1b[%s;%s;%s;%s$z" % (top + 1, left + 1, top + height, left + width)) <block_start>s=b" "<times>width<line_sep>bottom=top+height<while_stmt>top<l>bottom<block_start>self.goto(left top)<line_sep>self.wr(s)<line_sep>top<augadd>1<block_end><block_end><def_stmt>dialog_box self left top width height title=""<block_start>self.clear_box(left+1 top+1 width-2 height-2)<line_sep>self.draw_box(left top width height)<if_stmt>title#pos = (width - len(title)) / 2 <block_start>pos=1<line_sep>self.goto(left+pos top)<line_sep>self.wr(title)<block_end><block_end>@classmethod<def_stmt>init_tty cls<block_start><import_stmt>tty termios<line_sep>cls.org_termios=termios.tcgetattr(0)<line_sep>tty.setraw(0)<block_end>@classmethod<def_stmt>deinit_tty cls<block_start><import_stmt>termios<line_sep>termios.tcsetattr(0 termios.TCSANOW cls.org_termios)<block_end>@classmethod<def_stmt>enable_mouse cls# Mouse reporting - X10 compatibility mode <block_start>cls.wr(b"\x1b[?1000h")<block_end>@classmethod<def_stmt>disable_mouse cls# Mouse reporting - X10 compatibility mode <block_start>cls.wr(b"\x1b[?1000l")<block_end>@classmethod<def_stmt>screen_size cls<block_start><import_stmt>select<line_sep>cls.wr(b"\x1b[18t")<line_sep>res=select.select([0] [] [] 0.2)[0]<if_stmt><not>res<block_start><return>(80 24)<block_end>resp=os.read(0 32)<assert_stmt>resp.startswith(b"\x1b[8;")<and>resp[-1:]<eq>b"t"<line_sep>vals=resp[:-1].split(b";")<line_sep><return>(int(vals[2]) int(vals[1]))<block_end># Set function to redraw an entire (client) screen # This is called to restore original screen, as we don't save it. @classmethod<def_stmt>set_screen_redraw cls handler<block_start>cls.screen_redraw=handler<block_end>@classmethod<def_stmt>set_screen_resize cls handler<block_start>signal.signal(signal.SIGWINCH <lambda>sig stk:handler(cls))<block_end><block_end>
expected_output={"clock_state":{"system_status":{"associations_address":"10.16.2.2" "associations_local_mode":"client" "clock_offset":27.027 "clock_refid":"127.127.1.1" "clock_state":"synchronized" "clock_stratum":3 "root_delay":5.61 }} "peer":{"10.16.2.2":{"local_mode":{"client":{"delay":5.61 "jitter":3.342 "mode":"synchronized" "offset":27.027 "poll":64 "reach":7 "receive_time":25 "refid":"127.127.1.1" "remote":"10.16.2.2" "stratum":3 "configured":<true> "local_mode":"client" }}} "10.36.3.3":{"local_mode":{"client":{"delay":0.0 "jitter":15937.0 "mode":"unsynchronized" "offset":0.0 "poll":512 "reach":0 "receive_time":"-" "refid":".STEP." "remote":"10.36.3.3" "stratum":16 "configured":<true> "local_mode":"client" }}} } }<line_sep>