body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
d246ab7eddaf6b10c72bd35b5dea63b0229fdb99f19bb48d0587d05cede79f63
def reset_states(self): 'Resets all of the metric state variables.' K.batch_set_value([(v, np.zeros((self.num_classes, self.num_classes))) for v in self.variables])
Resets all of the metric state variables.
core/metrics.py
reset_states
JohnBoxAnn/TSGL-EEGNet
3
python
def reset_states(self): K.batch_set_value([(v, np.zeros((self.num_classes, self.num_classes))) for v in self.variables])
def reset_states(self): K.batch_set_value([(v, np.zeros((self.num_classes, self.num_classes))) for v in self.variables])<|docstring|>Resets all of the metric state variables.<|endoftext|>
fed7dec13073325e51bc79611a65b3b7550c39a5ca0516702c1ec124939d47a6
def get_examples(mode='train'): '\n dataset[0][0] examples\n ' examples = {'train': {'id': 242, 'candidates': ['风云人物', '气势汹汹', '予取予求', '乘龙佳婿', '正中下怀', '天方夜谭', '心如刀割'], 'content': '据俄罗斯卫星通讯社3月15日报道,新八国联军#idiom#逼近附近海域,但是军舰却遭岸舰导弹锁定,英承认今非昔比。 最近一段时间,北约多个国家开始频繁进行军事演习,来对其他国家进行威慑。3月12日当天,英国出动了兰开斯特号、威斯敏斯特号...', 'answer': 1}} return examples[mode]
dataset[0][0] examples
tests/dataset/test_fewclue_chid.py
get_examples
zjjlivein/PaddleNLP
0
python
def get_examples(mode='train'): '\n \n ' examples = {'train': {'id': 242, 'candidates': ['风云人物', '气势汹汹', '予取予求', '乘龙佳婿', '正中下怀', '天方夜谭', '心如刀割'], 'content': '据俄罗斯卫星通讯社3月15日报道,新八国联军#idiom#逼近附近海域,但是军舰却遭岸舰导弹锁定,英承认今非昔比。 最近一段时间,北约多个国家开始频繁进行军事演习,来对其他国家进行威慑。3月12日当天,英国出动了兰开斯特号、威斯敏斯特号...', 'answer': 1}} return examples[mode]
def get_examples(mode='train'): '\n \n ' examples = {'train': {'id': 242, 'candidates': ['风云人物', '气势汹汹', '予取予求', '乘龙佳婿', '正中下怀', '天方夜谭', '心如刀割'], 'content': '据俄罗斯卫星通讯社3月15日报道,新八国联军#idiom#逼近附近海域,但是军舰却遭岸舰导弹锁定,英承认今非昔比。 最近一段时间,北约多个国家开始频繁进行军事演习,来对其他国家进行威慑。3月12日当天,英国出动了兰开斯特号、威斯敏斯特号...', 'answer': 1}} return examples[mode]<|docstring|>dataset[0][0] examples<|endoftext|>
e3f0712c8fac8b0575db41e360b9f0dcca5be9e3f93e0038060238d4eb55e5ef
def setUp(self): '\n check input params & datasets all flies\n ' self.config['path_or_read_func'] = 'fewclue' self.config['name'] = 'chid' self.config['splits'] = ['train_0', 'train_1', 'train_2', 'train_3', 'train_4', 'train_few_all', 'dev_0', 'dev_1', 'dev_2', 'dev_3', 'dev_4', 'dev_few_all', 'unlabeled', 'test', 'test_public']
check input params & datasets all flies
tests/dataset/test_fewclue_chid.py
setUp
zjjlivein/PaddleNLP
0
python
def setUp(self): '\n \n ' self.config['path_or_read_func'] = 'fewclue' self.config['name'] = 'chid' self.config['splits'] = ['train_0', 'train_1', 'train_2', 'train_3', 'train_4', 'train_few_all', 'dev_0', 'dev_1', 'dev_2', 'dev_3', 'dev_4', 'dev_few_all', 'unlabeled', 'test', 'test_public']
def setUp(self): '\n \n ' self.config['path_or_read_func'] = 'fewclue' self.config['name'] = 'chid' self.config['splits'] = ['train_0', 'train_1', 'train_2', 'train_3', 'train_4', 'train_few_all', 'dev_0', 'dev_1', 'dev_2', 'dev_3', 'dev_4', 'dev_few_all', 'unlabeled', 'test', 'test_public']<|docstring|>check input params & datasets all flies<|endoftext|>
4ac22548afa7776ce8001276ce827d2c472b16f2ffb772cfba9fa76d84e6316b
def test_train_set(self): '\n check train.json length, id, candidates, content, answer\n ' expected_ds_num = 15 expected_len = 42 expected_train = get_examples('train') ds = load_dataset(**self.config) self.check_output_equal(len(ds), expected_ds_num) self.check_output_equal(len(ds[0]), expected_len) self.check_output_equal(int(expected_train['answer']), ds[0][0]['answer']) self.check_output_equal(expected_train['candidates'], ds[0][0]['candidates']) self.check_output_equal(expected_train['content'], ds[0][0]['content']) self.check_output_equal(expected_train['id'], ds[0][0]['id'])
check train.json length, id, candidates, content, answer
tests/dataset/test_fewclue_chid.py
test_train_set
zjjlivein/PaddleNLP
0
python
def test_train_set(self): '\n \n ' expected_ds_num = 15 expected_len = 42 expected_train = get_examples('train') ds = load_dataset(**self.config) self.check_output_equal(len(ds), expected_ds_num) self.check_output_equal(len(ds[0]), expected_len) self.check_output_equal(int(expected_train['answer']), ds[0][0]['answer']) self.check_output_equal(expected_train['candidates'], ds[0][0]['candidates']) self.check_output_equal(expected_train['content'], ds[0][0]['content']) self.check_output_equal(expected_train['id'], ds[0][0]['id'])
def test_train_set(self): '\n \n ' expected_ds_num = 15 expected_len = 42 expected_train = get_examples('train') ds = load_dataset(**self.config) self.check_output_equal(len(ds), expected_ds_num) self.check_output_equal(len(ds[0]), expected_len) self.check_output_equal(int(expected_train['answer']), ds[0][0]['answer']) self.check_output_equal(expected_train['candidates'], ds[0][0]['candidates']) self.check_output_equal(expected_train['content'], ds[0][0]['content']) self.check_output_equal(expected_train['id'], ds[0][0]['id'])<|docstring|>check train.json length, id, candidates, content, answer<|endoftext|>
4f4e5d6c9fb51c614f14613eeed46a56d809dd00713a9673116cc10c5bd6ace6
def fetch_symbols_list(only_a=True): '获取上市公司代码及简称' url = 'http://www.cninfo.com.cn/cninfo-new/information/companylist' response = get_page_response(url) def _parse(response): soup = BeautifulSoup(response.text, 'lxml') tag_as = soup.find_all('a', href=re.compile('companyinfo_n.html')) res = [(x.text[:6].strip(), x.text[7:].lstrip()) for x in tag_as] df = pd.DataFrame(res, columns=['code', 'short_name']) if only_a: df = df[df.code.str.get(0).str.contains('[0,3,6]')] return df.set_index('code', drop=True) return _parse(response)
获取上市公司代码及简称
cnswd/websource/juchao.py
fetch_symbols_list
huangzhangfeng/cnswd
0
python
def fetch_symbols_list(only_a=True): url = 'http://www.cninfo.com.cn/cninfo-new/information/companylist' response = get_page_response(url) def _parse(response): soup = BeautifulSoup(response.text, 'lxml') tag_as = soup.find_all('a', href=re.compile('companyinfo_n.html')) res = [(x.text[:6].strip(), x.text[7:].lstrip()) for x in tag_as] df = pd.DataFrame(res, columns=['code', 'short_name']) if only_a: df = df[df.code.str.get(0).str.contains('[0,3,6]')] return df.set_index('code', drop=True) return _parse(response)
def fetch_symbols_list(only_a=True): url = 'http://www.cninfo.com.cn/cninfo-new/information/companylist' response = get_page_response(url) def _parse(response): soup = BeautifulSoup(response.text, 'lxml') tag_as = soup.find_all('a', href=re.compile('companyinfo_n.html')) res = [(x.text[:6].strip(), x.text[7:].lstrip()) for x in tag_as] df = pd.DataFrame(res, columns=['code', 'short_name']) if only_a: df = df[df.code.str.get(0).str.contains('[0,3,6]')] return df.set_index('code', drop=True) return _parse(response)<|docstring|>获取上市公司代码及简称<|endoftext|>
5f4f0e6c952bc8ba9d11d188124515ad57af438db6b63a78b07183363cf1ac32
def _mark_changed(old_df, new_df): '标记股票状态、名称字段的改变' new_df['changed'] = False added = old_df.index.difference(new_df.index) new_df = pd.concat([new_df, old_df.loc[(added, :)]]) for s in new_df.status.unique(): o_index = old_df.query('status == {}'.format(s)).index n_index = new_df.query('status == {}'.format(s)).index new_df.loc[(o_index.symmetric_difference(n_index), 'changed')] = True i_index = new_df.index.intersection(old_df.index) for i in i_index: o_name = old_df.loc[(i, 'name')] n_name = new_df.loc[(i, 'name')] if (o_name.strip() != n_name.strip()): new_df.loc[(i, 'changed')] = True
标记股票状态、名称字段的改变
cnswd/websource/juchao.py
_mark_changed
huangzhangfeng/cnswd
0
python
def _mark_changed(old_df, new_df): new_df['changed'] = False added = old_df.index.difference(new_df.index) new_df = pd.concat([new_df, old_df.loc[(added, :)]]) for s in new_df.status.unique(): o_index = old_df.query('status == {}'.format(s)).index n_index = new_df.query('status == {}'.format(s)).index new_df.loc[(o_index.symmetric_difference(n_index), 'changed')] = True i_index = new_df.index.intersection(old_df.index) for i in i_index: o_name = old_df.loc[(i, 'name')] n_name = new_df.loc[(i, 'name')] if (o_name.strip() != n_name.strip()): new_df.loc[(i, 'changed')] = True
def _mark_changed(old_df, new_df): new_df['changed'] = False added = old_df.index.difference(new_df.index) new_df = pd.concat([new_df, old_df.loc[(added, :)]]) for s in new_df.status.unique(): o_index = old_df.query('status == {}'.format(s)).index n_index = new_df.query('status == {}'.format(s)).index new_df.loc[(o_index.symmetric_difference(n_index), 'changed')] = True i_index = new_df.index.intersection(old_df.index) for i in i_index: o_name = old_df.loc[(i, 'name')] n_name = new_df.loc[(i, 'name')] if (o_name.strip() != n_name.strip()): new_df.loc[(i, 'changed')] = True<|docstring|>标记股票状态、名称字段的改变<|endoftext|>
e3bf714d14d52cd9dc5df44c6c3eff68c3475b60f76f699d4ae1cef07f600120
def get_stock_codes(): '所有主板股票代码(含已经退市)' p1 = fetch_symbols_list() p1.rename(columns={'short_name': 'name'}, inplace=True) p1['status'] = '在市' p1.sort_index(inplace=True) p2 = fetch_suspend_stocks()[['seccode', 'secname']].rename(columns={'seccode': 'code', 'secname': 'name'}) p2.set_index('code', drop=True, inplace=True) p2 = p2[p2.index.str.get(0).str.contains('[0,3,6]')] p1.loc[(p2.index, 'status')] = '暂停' p3 = fetch_delisting_stocks()[['name']] p3 = p3[p3.index.str.get(0).str.contains('[0,3,6]')] p3['status'] = '退市' df = pd.concat([p1, p3]) df.sort_index(inplace=True) return df
所有主板股票代码(含已经退市)
cnswd/websource/juchao.py
get_stock_codes
huangzhangfeng/cnswd
0
python
def get_stock_codes(): p1 = fetch_symbols_list() p1.rename(columns={'short_name': 'name'}, inplace=True) p1['status'] = '在市' p1.sort_index(inplace=True) p2 = fetch_suspend_stocks()[['seccode', 'secname']].rename(columns={'seccode': 'code', 'secname': 'name'}) p2.set_index('code', drop=True, inplace=True) p2 = p2[p2.index.str.get(0).str.contains('[0,3,6]')] p1.loc[(p2.index, 'status')] = '暂停' p3 = fetch_delisting_stocks()[['name']] p3 = p3[p3.index.str.get(0).str.contains('[0,3,6]')] p3['status'] = '退市' df = pd.concat([p1, p3]) df.sort_index(inplace=True) return df
def get_stock_codes(): p1 = fetch_symbols_list() p1.rename(columns={'short_name': 'name'}, inplace=True) p1['status'] = '在市' p1.sort_index(inplace=True) p2 = fetch_suspend_stocks()[['seccode', 'secname']].rename(columns={'seccode': 'code', 'secname': 'name'}) p2.set_index('code', drop=True, inplace=True) p2 = p2[p2.index.str.get(0).str.contains('[0,3,6]')] p1.loc[(p2.index, 'status')] = '暂停' p3 = fetch_delisting_stocks()[['name']] p3 = p3[p3.index.str.get(0).str.contains('[0,3,6]')] p3['status'] = '退市' df = pd.concat([p1, p3]) df.sort_index(inplace=True) return df<|docstring|>所有主板股票代码(含已经退市)<|endoftext|>
789a7ce57961ff00cac684fc50a9271fcfd5141c9a4521f9652f47b8a970cd51
def fetch_suspend_stocks(): '获取暂停上市股票列表' url_fmt = 'http://www.cninfo.com.cn/cninfo-new/information/suspendlist-1?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs).iloc[(:, 1:)] return df.reset_index(drop=True)
获取暂停上市股票列表
cnswd/websource/juchao.py
fetch_suspend_stocks
huangzhangfeng/cnswd
0
python
def fetch_suspend_stocks(): url_fmt = 'http://www.cninfo.com.cn/cninfo-new/information/suspendlist-1?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs).iloc[(:, 1:)] return df.reset_index(drop=True)
def fetch_suspend_stocks(): url_fmt = 'http://www.cninfo.com.cn/cninfo-new/information/suspendlist-1?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs).iloc[(:, 1:)] return df.reset_index(drop=True)<|docstring|>获取暂停上市股票列表<|endoftext|>
cda3a3a614a7c23a7b314b54cd7ef4b2e79eee392e383db352be8dbdbbb6ac02
def fetch_delisting_stocks(): '获取终止上市股票清单' url_fmt = 'http://three.cninfo.com.cn/new/information/getDelistingList?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs) df = df.rename(columns={'f007d_0007': '转板日期', 'f008d_0007': '终止上市日期', 'r_seccode_0007': '三板证券代码', 'r_secname_0007': '三板证券简称', 'y_seccode_0007': '股票代码', 'y_secname_0007': '股票简称'}) df.set_index('股票代码', drop=True, inplace=True) return df.applymap(str.strip)
获取终止上市股票清单
cnswd/websource/juchao.py
fetch_delisting_stocks
huangzhangfeng/cnswd
0
python
def fetch_delisting_stocks(): url_fmt = 'http://three.cninfo.com.cn/new/information/getDelistingList?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs) df = df.rename(columns={'f007d_0007': '转板日期', 'f008d_0007': '终止上市日期', 'r_seccode_0007': '三板证券代码', 'r_secname_0007': '三板证券简称', 'y_seccode_0007': '股票代码', 'y_secname_0007': '股票简称'}) df.set_index('股票代码', drop=True, inplace=True) return df.applymap(str.strip)
def fetch_delisting_stocks(): url_fmt = 'http://three.cninfo.com.cn/new/information/getDelistingList?market={}' urls = [url_fmt.format(x) for x in ('sh', 'sz')] datas = [get_page_response(url, method='post').json() for url in urls] dfs = [pd.DataFrame(d) for d in datas] df = pd.concat(dfs) df = df.rename(columns={'f007d_0007': '转板日期', 'f008d_0007': '终止上市日期', 'r_seccode_0007': '三板证券代码', 'r_secname_0007': '三板证券简称', 'y_seccode_0007': '股票代码', 'y_secname_0007': '股票简称'}) df.set_index('股票代码', drop=True, inplace=True) return df.applymap(str.strip)<|docstring|>获取终止上市股票清单<|endoftext|>
0571c882a8a6fa9d9dd0e0e035b3311c774e3a834a29d5a18dfb51ff5e7f718b
@friendly_download(30) def fetch_company_brief_info(stock_code): '公司简要信息' url = _get_url(stock_code, 'brief') r = requests.get(url) r.encoding = 'gb18030' df = pd.read_html(r.text, flavor='lxml')[1] return df
公司简要信息
cnswd/websource/juchao.py
fetch_company_brief_info
huangzhangfeng/cnswd
0
python
@friendly_download(30) def fetch_company_brief_info(stock_code): url = _get_url(stock_code, 'brief') r = requests.get(url) r.encoding = 'gb18030' df = pd.read_html(r.text, flavor='lxml')[1] return df
@friendly_download(30) def fetch_company_brief_info(stock_code): url = _get_url(stock_code, 'brief') r = requests.get(url) r.encoding = 'gb18030' df = pd.read_html(r.text, flavor='lxml')[1] return df<|docstring|>公司简要信息<|endoftext|>
56298065b7fc70d907bd418eedefcd0947a70f89b4277b8bd4bc9adf21a93949
def fetch_issue_info(stock_code): '发行信息' url = _get_url(stock_code, 'issue') page_response = get_page_response(url) df = pd.read_html(StringIO(page_response.content))[1] return df
发行信息
cnswd/websource/juchao.py
fetch_issue_info
huangzhangfeng/cnswd
0
python
def fetch_issue_info(stock_code): url = _get_url(stock_code, 'issue') page_response = get_page_response(url) df = pd.read_html(StringIO(page_response.content))[1] return df
def fetch_issue_info(stock_code): url = _get_url(stock_code, 'issue') page_response = get_page_response(url) df = pd.read_html(StringIO(page_response.content))[1] return df<|docstring|>发行信息<|endoftext|>
714607eee9b090834270d7f4e081384fb8a850931c61266ef39a4b004e16a328
def fetch_index_info(): '获取指数基本信息' xl = ['szxl', 'jcxl'] zs = ['gmzs', 'hyzs', 'fgzs', 'ztzs', 'clzs', 'dzzs', 'zhzs', 'jjzs', 'zqzs', 'qyzs'] prod_ = product(xl, zs) url_fmt = 'http://www.cnindex.com.cn/zstx/{}/{}' urls = [url_fmt.format(x[0], x[1]) for x in prod_] dfs = [] @friendly_download(30, max_sleep=1) def _process(url): try: page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), header=0)[1] dfs.append(df) except ConnectFailed: pass for url in urls: _process(url) data = pd.concat(dfs) col_names = ['name', 'code', 'base_day', 'base_point', 'launch_day', 'constituents'] data.columns = col_names def f(x): return pd.to_datetime(x, format='%Y-%m-%d', errors='coerce') data['base_day'] = data['base_day'].apply(f) data['launch_day'] = data['launch_day'].apply(f) data.set_index('code', drop=True, inplace=True) return data
获取指数基本信息
cnswd/websource/juchao.py
fetch_index_info
huangzhangfeng/cnswd
0
python
def fetch_index_info(): xl = ['szxl', 'jcxl'] zs = ['gmzs', 'hyzs', 'fgzs', 'ztzs', 'clzs', 'dzzs', 'zhzs', 'jjzs', 'zqzs', 'qyzs'] prod_ = product(xl, zs) url_fmt = 'http://www.cnindex.com.cn/zstx/{}/{}' urls = [url_fmt.format(x[0], x[1]) for x in prod_] dfs = [] @friendly_download(30, max_sleep=1) def _process(url): try: page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), header=0)[1] dfs.append(df) except ConnectFailed: pass for url in urls: _process(url) data = pd.concat(dfs) col_names = ['name', 'code', 'base_day', 'base_point', 'launch_day', 'constituents'] data.columns = col_names def f(x): return pd.to_datetime(x, format='%Y-%m-%d', errors='coerce') data['base_day'] = data['base_day'].apply(f) data['launch_day'] = data['launch_day'].apply(f) data.set_index('code', drop=True, inplace=True) return data
def fetch_index_info(): xl = ['szxl', 'jcxl'] zs = ['gmzs', 'hyzs', 'fgzs', 'ztzs', 'clzs', 'dzzs', 'zhzs', 'jjzs', 'zqzs', 'qyzs'] prod_ = product(xl, zs) url_fmt = 'http://www.cnindex.com.cn/zstx/{}/{}' urls = [url_fmt.format(x[0], x[1]) for x in prod_] dfs = [] @friendly_download(30, max_sleep=1) def _process(url): try: page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), header=0)[1] dfs.append(df) except ConnectFailed: pass for url in urls: _process(url) data = pd.concat(dfs) col_names = ['name', 'code', 'base_day', 'base_point', 'launch_day', 'constituents'] data.columns = col_names def f(x): return pd.to_datetime(x, format='%Y-%m-%d', errors='coerce') data['base_day'] = data['base_day'].apply(f) data['launch_day'] = data['launch_day'].apply(f) data.set_index('code', drop=True, inplace=True) return data<|docstring|>获取指数基本信息<|endoftext|>
bb5173b5ec41cba8d5ca5916983965da6413fc3cde32c8641314aa74056d10a5
def fetch_adjustment(stock_code): '\n 提取股票历史分配记录\n 深圳交易所除权基准日与红股上市日一致;上海证券交易所红股上市日\n 一般晚于除权基准日。\n\n 注意:\n 使用除权基准日作为支付日,红股上市日作为生效日;\n ' url = _get_url(stock_code, 'dividend') page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), match='分红年度', skiprows=[0])[0] df.dropna(how='all', inplace=True) if df.empty: return df df.columns = _ADJUSTMENT_FIELDS data = _parse_ratio_and_amount(df) data.set_index('effective_date', inplace=True) data.sort_index(inplace=True) return data
提取股票历史分配记录 深圳交易所除权基准日与红股上市日一致;上海证券交易所红股上市日 一般晚于除权基准日。 注意: 使用除权基准日作为支付日,红股上市日作为生效日;
cnswd/websource/juchao.py
fetch_adjustment
huangzhangfeng/cnswd
0
python
def fetch_adjustment(stock_code): '\n 提取股票历史分配记录\n 深圳交易所除权基准日与红股上市日一致;上海证券交易所红股上市日\n 一般晚于除权基准日。\n\n 注意:\n 使用除权基准日作为支付日,红股上市日作为生效日;\n ' url = _get_url(stock_code, 'dividend') page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), match='分红年度', skiprows=[0])[0] df.dropna(how='all', inplace=True) if df.empty: return df df.columns = _ADJUSTMENT_FIELDS data = _parse_ratio_and_amount(df) data.set_index('effective_date', inplace=True) data.sort_index(inplace=True) return data
def fetch_adjustment(stock_code): '\n 提取股票历史分配记录\n 深圳交易所除权基准日与红股上市日一致;上海证券交易所红股上市日\n 一般晚于除权基准日。\n\n 注意:\n 使用除权基准日作为支付日,红股上市日作为生效日;\n ' url = _get_url(stock_code, 'dividend') page_response = get_page_response(url) df = pd.read_html(BytesIO(page_response.content), match='分红年度', skiprows=[0])[0] df.dropna(how='all', inplace=True) if df.empty: return df df.columns = _ADJUSTMENT_FIELDS data = _parse_ratio_and_amount(df) data.set_index('effective_date', inplace=True) data.sort_index(inplace=True) return data<|docstring|>提取股票历史分配记录 深圳交易所除权基准日与红股上市日一致;上海证券交易所红股上市日 一般晚于除权基准日。 注意: 使用除权基准日作为支付日,红股上市日作为生效日;<|endoftext|>
f4d6123403e5a0024593e23a44840c08bb85f2c348592f34507eab93791e9595
def _parse_ratio_and_amount(df): '\n 解析分配比例及分红金额(每股)\n 更改说明:\n 简化处理解析。单纯计算,而不进行合并。\n 待后续查询时,如一天内有二条记录,则合并计算\n ' base = df.scheme.str.extract(_BASE_PATTERN, expand=False) increase = df.scheme.str.extract(_INCREASE_PATTERN, expand=False) give = df.scheme.str.extract(_GIVE_PATTERN, expand=False) dividend = df.scheme.str.extract(_DIVIDEND_PATTERN, expand=False) increase.fillna(0, inplace=True) give.fillna(0, inplace=True) dividend.fillna(0, inplace=True) ratio = (increase.astype(float).add(give.astype(float)) / base.astype(float)) amount = (dividend.astype(float) / base.astype(float)) def f(x): return pd.to_datetime(x, format='%Y%m%d', errors='coerce') data = pd.DataFrame({'ratio': ratio.values, 'amount': amount.values, 'annual': df.annual, 'record_date': df.record_date.apply(f), 'pay_date': df.effective_date.apply(f), 'listing_date': df.listing_date.apply(f)}) data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'listing_date')] data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'record_date')] data.loc[(data['listing_date'].isnull(), 'listing_date')] = data.loc[(data['listing_date'].isnull(), 'pay_date')] data['effective_date'] = data['pay_date'] return data
解析分配比例及分红金额(每股) 更改说明: 简化处理解析。单纯计算,而不进行合并。 待后续查询时,如一天内有二条记录,则合并计算
cnswd/websource/juchao.py
_parse_ratio_and_amount
huangzhangfeng/cnswd
0
python
def _parse_ratio_and_amount(df): '\n 解析分配比例及分红金额(每股)\n 更改说明:\n 简化处理解析。单纯计算,而不进行合并。\n 待后续查询时,如一天内有二条记录,则合并计算\n ' base = df.scheme.str.extract(_BASE_PATTERN, expand=False) increase = df.scheme.str.extract(_INCREASE_PATTERN, expand=False) give = df.scheme.str.extract(_GIVE_PATTERN, expand=False) dividend = df.scheme.str.extract(_DIVIDEND_PATTERN, expand=False) increase.fillna(0, inplace=True) give.fillna(0, inplace=True) dividend.fillna(0, inplace=True) ratio = (increase.astype(float).add(give.astype(float)) / base.astype(float)) amount = (dividend.astype(float) / base.astype(float)) def f(x): return pd.to_datetime(x, format='%Y%m%d', errors='coerce') data = pd.DataFrame({'ratio': ratio.values, 'amount': amount.values, 'annual': df.annual, 'record_date': df.record_date.apply(f), 'pay_date': df.effective_date.apply(f), 'listing_date': df.listing_date.apply(f)}) data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'listing_date')] data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'record_date')] data.loc[(data['listing_date'].isnull(), 'listing_date')] = data.loc[(data['listing_date'].isnull(), 'pay_date')] data['effective_date'] = data['pay_date'] return data
def _parse_ratio_and_amount(df): '\n 解析分配比例及分红金额(每股)\n 更改说明:\n 简化处理解析。单纯计算,而不进行合并。\n 待后续查询时,如一天内有二条记录,则合并计算\n ' base = df.scheme.str.extract(_BASE_PATTERN, expand=False) increase = df.scheme.str.extract(_INCREASE_PATTERN, expand=False) give = df.scheme.str.extract(_GIVE_PATTERN, expand=False) dividend = df.scheme.str.extract(_DIVIDEND_PATTERN, expand=False) increase.fillna(0, inplace=True) give.fillna(0, inplace=True) dividend.fillna(0, inplace=True) ratio = (increase.astype(float).add(give.astype(float)) / base.astype(float)) amount = (dividend.astype(float) / base.astype(float)) def f(x): return pd.to_datetime(x, format='%Y%m%d', errors='coerce') data = pd.DataFrame({'ratio': ratio.values, 'amount': amount.values, 'annual': df.annual, 'record_date': df.record_date.apply(f), 'pay_date': df.effective_date.apply(f), 'listing_date': df.listing_date.apply(f)}) data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'listing_date')] data.loc[(data['pay_date'].isnull(), 'pay_date')] = data.loc[(data['pay_date'].isnull(), 'record_date')] data.loc[(data['listing_date'].isnull(), 'listing_date')] = data.loc[(data['listing_date'].isnull(), 'pay_date')] data['effective_date'] = data['pay_date'] return data<|docstring|>解析分配比例及分红金额(每股) 更改说明: 简化处理解析。单纯计算,而不进行合并。 待后续查询时,如一天内有二条记录,则合并计算<|endoftext|>
cf38fd2f663fe529768ec3e10a1851d389eb60c497d3d0d54f5635a6eb5fd19e
def fetch_announcement_summary(): '获取最近一期公司公告摘要信息\n 用途:\n 1、限定需要更新公司名录;\n 2、限定刷新公司财务报告名录;\n 3、辅助分析\n ' cols = ['announcementTime', 'announcementTitle', 'announcementType', 'announcementTypeName', 'secCode', 'secName'] url_fmt = 'http://www.cninfo.com.cn/cninfo-new/disclosure/{}_summary/?pageNum={}' markets = ('sse', 'szse') dfs = [] for m in markets: for i in range(1, 100): url = url_fmt.format(m, i) r = get_page_response(url, 'post') d = r.json() df = pd.DataFrame.from_dict(d['announcements'])[cols] dfs.append(df) if (not d['hasMore']): break data = pd.concat(dfs) data.reset_index(inplace=True, drop=True) output = pd.DataFrame({'股票代码': data['secCode'].values, '股票简称': data['secName'].values, '公告时间': data['announcementTime'].apply(pd.Timestamp, unit='ms'), '公告标题': data['announcementTitle'].values, '类别': data['announcementTypeName'].values}) return output
获取最近一期公司公告摘要信息 用途: 1、限定需要更新公司名录; 2、限定刷新公司财务报告名录; 3、辅助分析
cnswd/websource/juchao.py
fetch_announcement_summary
huangzhangfeng/cnswd
0
python
def fetch_announcement_summary(): '获取最近一期公司公告摘要信息\n 用途:\n 1、限定需要更新公司名录;\n 2、限定刷新公司财务报告名录;\n 3、辅助分析\n ' cols = ['announcementTime', 'announcementTitle', 'announcementType', 'announcementTypeName', 'secCode', 'secName'] url_fmt = 'http://www.cninfo.com.cn/cninfo-new/disclosure/{}_summary/?pageNum={}' markets = ('sse', 'szse') dfs = [] for m in markets: for i in range(1, 100): url = url_fmt.format(m, i) r = get_page_response(url, 'post') d = r.json() df = pd.DataFrame.from_dict(d['announcements'])[cols] dfs.append(df) if (not d['hasMore']): break data = pd.concat(dfs) data.reset_index(inplace=True, drop=True) output = pd.DataFrame({'股票代码': data['secCode'].values, '股票简称': data['secName'].values, '公告时间': data['announcementTime'].apply(pd.Timestamp, unit='ms'), '公告标题': data['announcementTitle'].values, '类别': data['announcementTypeName'].values}) return output
def fetch_announcement_summary(): '获取最近一期公司公告摘要信息\n 用途:\n 1、限定需要更新公司名录;\n 2、限定刷新公司财务报告名录;\n 3、辅助分析\n ' cols = ['announcementTime', 'announcementTitle', 'announcementType', 'announcementTypeName', 'secCode', 'secName'] url_fmt = 'http://www.cninfo.com.cn/cninfo-new/disclosure/{}_summary/?pageNum={}' markets = ('sse', 'szse') dfs = [] for m in markets: for i in range(1, 100): url = url_fmt.format(m, i) r = get_page_response(url, 'post') d = r.json() df = pd.DataFrame.from_dict(d['announcements'])[cols] dfs.append(df) if (not d['hasMore']): break data = pd.concat(dfs) data.reset_index(inplace=True, drop=True) output = pd.DataFrame({'股票代码': data['secCode'].values, '股票简称': data['secName'].values, '公告时间': data['announcementTime'].apply(pd.Timestamp, unit='ms'), '公告标题': data['announcementTitle'].values, '类别': data['announcementTypeName'].values}) return output<|docstring|>获取最近一期公司公告摘要信息 用途: 1、限定需要更新公司名录; 2、限定刷新公司财务报告名录; 3、辅助分析<|endoftext|>
9731de31dc50971dd6115ef6e048e795f57a6bfcf5c715c30f4f16eea87451b8
def fetch_industry(date_str, department): '巨潮、证监会行业编码\n\n 异常:\n 如果date_为非交易日,触发值异常\n ' url_fmt = 'http://www.cnindex.com.cn/syl/{}/{}_hsls.html' url = url_fmt.format(date_str, department) try: df = pd.read_html(url)[1].loc[(:, range(2))] df.columns = ['industry_id', 'name'] return df except HTTPError: msg_fmt = "或者当前日期的数据尚未发布,或者日期'{}'并非交易日" raise ValueError(msg_fmt.format(date_str))
巨潮、证监会行业编码 异常: 如果date_为非交易日,触发值异常
cnswd/websource/juchao.py
fetch_industry
huangzhangfeng/cnswd
0
python
def fetch_industry(date_str, department): '巨潮、证监会行业编码\n\n 异常:\n 如果date_为非交易日,触发值异常\n ' url_fmt = 'http://www.cnindex.com.cn/syl/{}/{}_hsls.html' url = url_fmt.format(date_str, department) try: df = pd.read_html(url)[1].loc[(:, range(2))] df.columns = ['industry_id', 'name'] return df except HTTPError: msg_fmt = "或者当前日期的数据尚未发布,或者日期'{}'并非交易日" raise ValueError(msg_fmt.format(date_str))
def fetch_industry(date_str, department): '巨潮、证监会行业编码\n\n 异常:\n 如果date_为非交易日,触发值异常\n ' url_fmt = 'http://www.cnindex.com.cn/syl/{}/{}_hsls.html' url = url_fmt.format(date_str, department) try: df = pd.read_html(url)[1].loc[(:, range(2))] df.columns = ['industry_id', 'name'] return df except HTTPError: msg_fmt = "或者当前日期的数据尚未发布,或者日期'{}'并非交易日" raise ValueError(msg_fmt.format(date_str))<|docstring|>巨潮、证监会行业编码 异常: 如果date_为非交易日,触发值异常<|endoftext|>
2b2255fa7d0222f93294789073bec983122b9ddc1013434ac3af19e9582674e1
def fetch_industry_stocks(date_, department='cninfo'): '行业分类股票列表' logger = logbook.Logger('巨潮行业分类') msg = '"cninfo"代表国证行业分类,"csrc"代表证监会行业分类' assert (department in ('cninfo', 'csrc')), msg a_cols = ['code', 'short_name', 'b_code', 'b_short_name'] b_cols = ['group_code', 'group_name', 'industry_code', 'industry_name'] c_cols = ['a_static_pe', 'a_roll_pe', 'b_static_pe', 'b_roll_pe', 'ab_static_pe', 'ab_roll_pe'] date_str = pd.Timestamp(date_).strftime('%Y-%m-%d') industry = fetch_industry(date_str, department) if (department == 'cninfo'): b_cols = (['sector_code', 'sector_name'] + b_cols) pat = '.\\d{2}$' else: pat = '.$' col_names = ((a_cols + b_cols) + c_cols) codes = industry.loc[(industry.industry_id.str.match(pat), 'industry_id')].values dfs = [] def _process(industry_id): df = _industry_stocks(industry_id, date_str) dfs.append(df) for code in codes: _process(code) logger.info('部门:{},日期:{},编码:{}'.format(department, date_, code)) res = pd.concat(dfs, ignore_index=True) res.columns = col_names res = res.loc[((~ res.code.isnull()), :)] res.code = res.code.map((lambda x: str(int(x)).zfill(6))) return res
行业分类股票列表
cnswd/websource/juchao.py
fetch_industry_stocks
huangzhangfeng/cnswd
0
python
def fetch_industry_stocks(date_, department='cninfo'): logger = logbook.Logger('巨潮行业分类') msg = '"cninfo"代表国证行业分类,"csrc"代表证监会行业分类' assert (department in ('cninfo', 'csrc')), msg a_cols = ['code', 'short_name', 'b_code', 'b_short_name'] b_cols = ['group_code', 'group_name', 'industry_code', 'industry_name'] c_cols = ['a_static_pe', 'a_roll_pe', 'b_static_pe', 'b_roll_pe', 'ab_static_pe', 'ab_roll_pe'] date_str = pd.Timestamp(date_).strftime('%Y-%m-%d') industry = fetch_industry(date_str, department) if (department == 'cninfo'): b_cols = (['sector_code', 'sector_name'] + b_cols) pat = '.\\d{2}$' else: pat = '.$' col_names = ((a_cols + b_cols) + c_cols) codes = industry.loc[(industry.industry_id.str.match(pat), 'industry_id')].values dfs = [] def _process(industry_id): df = _industry_stocks(industry_id, date_str) dfs.append(df) for code in codes: _process(code) logger.info('部门:{},日期:{},编码:{}'.format(department, date_, code)) res = pd.concat(dfs, ignore_index=True) res.columns = col_names res = res.loc[((~ res.code.isnull()), :)] res.code = res.code.map((lambda x: str(int(x)).zfill(6))) return res
def fetch_industry_stocks(date_, department='cninfo'): logger = logbook.Logger('巨潮行业分类') msg = '"cninfo"代表国证行业分类,"csrc"代表证监会行业分类' assert (department in ('cninfo', 'csrc')), msg a_cols = ['code', 'short_name', 'b_code', 'b_short_name'] b_cols = ['group_code', 'group_name', 'industry_code', 'industry_name'] c_cols = ['a_static_pe', 'a_roll_pe', 'b_static_pe', 'b_roll_pe', 'ab_static_pe', 'ab_roll_pe'] date_str = pd.Timestamp(date_).strftime('%Y-%m-%d') industry = fetch_industry(date_str, department) if (department == 'cninfo'): b_cols = (['sector_code', 'sector_name'] + b_cols) pat = '.\\d{2}$' else: pat = '.$' col_names = ((a_cols + b_cols) + c_cols) codes = industry.loc[(industry.industry_id.str.match(pat), 'industry_id')].values dfs = [] def _process(industry_id): df = _industry_stocks(industry_id, date_str) dfs.append(df) for code in codes: _process(code) logger.info('部门:{},日期:{},编码:{}'.format(department, date_, code)) res = pd.concat(dfs, ignore_index=True) res.columns = col_names res = res.loc[((~ res.code.isnull()), :)] res.code = res.code.map((lambda x: str(int(x)).zfill(6))) return res<|docstring|>行业分类股票列表<|endoftext|>
daf42803d8d2a5fe7105cdd2a6144a0d8d7eb31c3e2564e5d478f5fa8c56f9e3
def ensure_report_date(date_): '\n 转换为报告日期\n\n 逻辑\n ----\n 1. 如输入日期为当天,则自动转换为前一个财务报告期;\n 2. 如果为历史日期,输入日期必须为季度报告截止日期\n ' date_ = pd.Timestamp(date_) if (date_.date() == pd.Timestamp('today').date()): qe = pd.tseries.offsets.QuarterEnd((- 1)) return qe.apply(date_).date() else: if (not date_.is_quarter_end): raise ValueError('输入日期无效,应为报告截止日期') return date_
转换为报告日期 逻辑 ---- 1. 如输入日期为当天,则自动转换为前一个财务报告期; 2. 如果为历史日期,输入日期必须为季度报告截止日期
cnswd/websource/juchao.py
ensure_report_date
huangzhangfeng/cnswd
0
python
def ensure_report_date(date_): '\n 转换为报告日期\n\n 逻辑\n ----\n 1. 如输入日期为当天,则自动转换为前一个财务报告期;\n 2. 如果为历史日期,输入日期必须为季度报告截止日期\n ' date_ = pd.Timestamp(date_) if (date_.date() == pd.Timestamp('today').date()): qe = pd.tseries.offsets.QuarterEnd((- 1)) return qe.apply(date_).date() else: if (not date_.is_quarter_end): raise ValueError('输入日期无效,应为报告截止日期') return date_
def ensure_report_date(date_): '\n 转换为报告日期\n\n 逻辑\n ----\n 1. 如输入日期为当天,则自动转换为前一个财务报告期;\n 2. 如果为历史日期,输入日期必须为季度报告截止日期\n ' date_ = pd.Timestamp(date_) if (date_.date() == pd.Timestamp('today').date()): qe = pd.tseries.offsets.QuarterEnd((- 1)) return qe.apply(date_).date() else: if (not date_.is_quarter_end): raise ValueError('输入日期无效,应为报告截止日期') return date_<|docstring|>转换为报告日期 逻辑 ---- 1. 如输入日期为当天,则自动转换为前一个财务报告期; 2. 如果为历史日期,输入日期必须为季度报告截止日期<|endoftext|>
0ec5b1cab1375a97e54b578f74ce501a12e4de19e2d293bb7d17936bb35cefad
def fetch_prbookinfos(date_=pd.Timestamp('today')): '\n 股票财务报告期预约披露时间表\n\n 参数\n ----\n date_ : 类似日期\n 要抓取预约时间表的报告日期\n 默认为当天,代表当前日期下,上一个应公布财务报告的报告日期\n 除接受当日参数外,其余日期必须为标准财务报告截止日期\n 如2018-03-31,2017-12-31\n\n 备注\n ----\n 连续抓页,须长时间休眠才能正确完成\n 尤其是当出现异常页,再次尝试前,休眠至少3秒\n\n ' date_ = pd.Timestamp(date_) if (date_ < EARLIEST_DATE): raise NoDataBefore('日期不得早于{}'.format(EARLIEST_DATE)) url = 'http://three.cninfo.com.cn/new/information/getPrbookInfo' cols = ['报告期', '首次预约', '第一次变更', '第二次变更', '第三次变更', '实际披露', 'orgId', '股票代码', '股票简称'] report_date = ensure_report_date(date_).strftime('%Y-%m-%d') markets = _get_markets(date_) try: dfs = _fetch_prbookinfos(report_date, url, markets) except TypeError: raise NoWebData('网页不存在报告截止日期为{}的预约时间表'.format(report_date)) res = pd.concat(dfs, ignore_index=True) res.columns = cols return res
股票财务报告期预约披露时间表 参数 ---- date_ : 类似日期 要抓取预约时间表的报告日期 默认为当天,代表当前日期下,上一个应公布财务报告的报告日期 除接受当日参数外,其余日期必须为标准财务报告截止日期 如2018-03-31,2017-12-31 备注 ---- 连续抓页,须长时间休眠才能正确完成 尤其是当出现异常页,再次尝试前,休眠至少3秒
cnswd/websource/juchao.py
fetch_prbookinfos
huangzhangfeng/cnswd
0
python
def fetch_prbookinfos(date_=pd.Timestamp('today')): '\n 股票财务报告期预约披露时间表\n\n 参数\n ----\n date_ : 类似日期\n 要抓取预约时间表的报告日期\n 默认为当天,代表当前日期下,上一个应公布财务报告的报告日期\n 除接受当日参数外,其余日期必须为标准财务报告截止日期\n 如2018-03-31,2017-12-31\n\n 备注\n ----\n 连续抓页,须长时间休眠才能正确完成\n 尤其是当出现异常页,再次尝试前,休眠至少3秒\n\n ' date_ = pd.Timestamp(date_) if (date_ < EARLIEST_DATE): raise NoDataBefore('日期不得早于{}'.format(EARLIEST_DATE)) url = 'http://three.cninfo.com.cn/new/information/getPrbookInfo' cols = ['报告期', '首次预约', '第一次变更', '第二次变更', '第三次变更', '实际披露', 'orgId', '股票代码', '股票简称'] report_date = ensure_report_date(date_).strftime('%Y-%m-%d') markets = _get_markets(date_) try: dfs = _fetch_prbookinfos(report_date, url, markets) except TypeError: raise NoWebData('网页不存在报告截止日期为{}的预约时间表'.format(report_date)) res = pd.concat(dfs, ignore_index=True) res.columns = cols return res
def fetch_prbookinfos(date_=pd.Timestamp('today')): '\n 股票财务报告期预约披露时间表\n\n 参数\n ----\n date_ : 类似日期\n 要抓取预约时间表的报告日期\n 默认为当天,代表当前日期下,上一个应公布财务报告的报告日期\n 除接受当日参数外,其余日期必须为标准财务报告截止日期\n 如2018-03-31,2017-12-31\n\n 备注\n ----\n 连续抓页,须长时间休眠才能正确完成\n 尤其是当出现异常页,再次尝试前,休眠至少3秒\n\n ' date_ = pd.Timestamp(date_) if (date_ < EARLIEST_DATE): raise NoDataBefore('日期不得早于{}'.format(EARLIEST_DATE)) url = 'http://three.cninfo.com.cn/new/information/getPrbookInfo' cols = ['报告期', '首次预约', '第一次变更', '第二次变更', '第三次变更', '实际披露', 'orgId', '股票代码', '股票简称'] report_date = ensure_report_date(date_).strftime('%Y-%m-%d') markets = _get_markets(date_) try: dfs = _fetch_prbookinfos(report_date, url, markets) except TypeError: raise NoWebData('网页不存在报告截止日期为{}的预约时间表'.format(report_date)) res = pd.concat(dfs, ignore_index=True) res.columns = cols return res<|docstring|>股票财务报告期预约披露时间表 参数 ---- date_ : 类似日期 要抓取预约时间表的报告日期 默认为当天,代表当前日期下,上一个应公布财务报告的报告日期 除接受当日参数外,其余日期必须为标准财务报告截止日期 如2018-03-31,2017-12-31 备注 ---- 连续抓页,须长时间休眠才能正确完成 尤其是当出现异常页,再次尝试前,休眠至少3秒<|endoftext|>
3c447adfdc8059d4976ed7a21c596e511feca11c8a5701d7a9ba00694c663fe9
@ms_function def default_parameter_f(x, y=3): ' default_parameter_f ' z = (x + y) return z
default_parameter_f
tests/ut/python/pynative_mode/test_parse_method.py
default_parameter_f
limberc/mindspore
3,200
python
@ms_function def (x, y=3): ' ' z = (x + y) return z
@ms_function def (x, y=3): ' ' z = (x + y) return z<|docstring|>default_parameter_f<|endoftext|>
315de39142b12500d29a7147b04b7a6fa026144cdf215ba2d88c5d4d382ed8a6
def test_parse_defalut_parameter_case1(): ' Test default parameter function call ' log.debug('begin test_parse_defalut_parameter_case1') ret = default_parameter_f(2) log.debug('finished test_parse_defalut_parameter_case1, ret = %r', ret)
Test default parameter function call
tests/ut/python/pynative_mode/test_parse_method.py
test_parse_defalut_parameter_case1
limberc/mindspore
3,200
python
def test_parse_defalut_parameter_case1(): ' ' log.debug('begin test_parse_defalut_parameter_case1') ret = default_parameter_f(2) log.debug('finished test_parse_defalut_parameter_case1, ret = %r', ret)
def test_parse_defalut_parameter_case1(): ' ' log.debug('begin test_parse_defalut_parameter_case1') ret = default_parameter_f(2) log.debug('finished test_parse_defalut_parameter_case1, ret = %r', ret)<|docstring|>Test default parameter function call<|endoftext|>
8fe80656f2f776c033cc73a0c03f7801547756d4e7de75ca14a0a67df336fa02
def get_val_fn(x): ' get_val_fn ' ret = (x + 3) return ret
get_val_fn
tests/ut/python/pynative_mode/test_parse_method.py
get_val_fn
limberc/mindspore
3,200
python
def (x): ' ' ret = (x + 3) return ret
def (x): ' ' ret = (x + 3) return ret<|docstring|>get_val_fn<|endoftext|>
c4206ba59be36827fdb25d862dc52f88c2e404207dd6a2c0f8ffa0c5b9a11f40
@ms_function def bool_exp(x, y): ' bool_exp ' return (not (x > y))
bool_exp
tests/ut/python/pynative_mode/test_parse_method.py
bool_exp
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' return (not (x > y))
@ms_function def (x, y): ' ' return (not (x > y))<|docstring|>bool_exp<|endoftext|>
002eb9f21e8b07988ac824ba65f9b3415fc15690f8800a34ee56f8bf7d0343b7
def test_bool_exp(): ' test_bool_exp ' bool_exp(1, 2)
test_bool_exp
tests/ut/python/pynative_mode/test_parse_method.py
test_bool_exp
limberc/mindspore
3,200
python
def (): ' ' bool_exp(1, 2)
def (): ' ' bool_exp(1, 2)<|docstring|>test_bool_exp<|endoftext|>
b9748548992ac67309bd05b3b879bf54b31007d073d50eaac6dc803b80f9e53e
@ms_function def var_parameter_f(x, *args): ' var_parameter_f ' z = (((x + args[0]) + args[1]) + args[2]) return z
var_parameter_f
tests/ut/python/pynative_mode/test_parse_method.py
var_parameter_f
limberc/mindspore
3,200
python
@ms_function def (x, *args): ' ' z = (((x + args[0]) + args[1]) + args[2]) return z
@ms_function def (x, *args): ' ' z = (((x + args[0]) + args[1]) + args[2]) return z<|docstring|>var_parameter_f<|endoftext|>
575f844dab9f36fbd2fd3bc4b3dee66a74ce802eb4fbcc408465f00823c98618
def test_var_parameter_case1(): ' test_var_parameter_case1 ' log.debug('start test_var_parameter_case1') var_parameter_f(1, 2, 3, 4, 5) log.debug('end test_var_parameter_case1')
test_var_parameter_case1
tests/ut/python/pynative_mode/test_parse_method.py
test_var_parameter_case1
limberc/mindspore
3,200
python
def (): ' ' log.debug('start ') var_parameter_f(1, 2, 3, 4, 5) log.debug('end ')
def (): ' ' log.debug('start ') var_parameter_f(1, 2, 3, 4, 5) log.debug('end ')<|docstring|>test_var_parameter_case1<|endoftext|>
fc338ecbb248be64c56eb92663403e9fc82df3ca09531694040a9dbbfeab12ad
@non_graph_engine def test_call_method_on_construct(): ' test_call_method_on_construct ' log.debug('begin test_call_method_on_construct') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[3, 5, 7], [2, 3, 5]]).astype(np.int32) net = Net(y) output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished test_call_method_on_construct')
test_call_method_on_construct
tests/ut/python/pynative_mode/test_parse_method.py
test_call_method_on_construct
limberc/mindspore
3,200
python
@non_graph_engine def (): ' ' log.debug('begin ') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[3, 5, 7], [2, 3, 5]]).astype(np.int32) net = Net(y) output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished ')
@non_graph_engine def (): ' ' log.debug('begin ') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[3, 5, 7], [2, 3, 5]]).astype(np.int32) net = Net(y) output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished ')<|docstring|>test_call_method_on_construct<|endoftext|>
4dcf05b5c6e0504ec94bc0fe772f301098d2851b8192eb45bd853c459bb59d7c
@non_graph_engine def test_call_other_object_method(): ' test_call_other_object_method ' log.debug('begin test_call_other_object_method') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) y1 = Tensor(np.array([[5, 4, 5], [1, 1, 2]]).astype(np.int32)) z = np.array([[8, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net1(y, y1) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished test_call_other_object_method')
test_call_other_object_method
tests/ut/python/pynative_mode/test_parse_method.py
test_call_other_object_method
limberc/mindspore
3,200
python
@non_graph_engine def (): ' ' log.debug('begin ') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) y1 = Tensor(np.array([[5, 4, 5], [1, 1, 2]]).astype(np.int32)) z = np.array([[8, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net1(y, y1) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished ')
@non_graph_engine def (): ' ' log.debug('begin ') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) y1 = Tensor(np.array([[5, 4, 5], [1, 1, 2]]).astype(np.int32)) z = np.array([[8, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net1(y, y1) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished ')<|docstring|>test_call_other_object_method<|endoftext|>
cd4090e088820fbe9542a79c6ab7060ab58e9332c402aff5891de5341978054e
@non_graph_engine def test_call_no_self_other_object_method(): ' test_call_no_self_other_object_method ' log.debug('begin test_call_other_object_method') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[6, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net2(y) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished test_call_other_object_method')
test_call_no_self_other_object_method
tests/ut/python/pynative_mode/test_parse_method.py
test_call_no_self_other_object_method
limberc/mindspore
3,200
python
@non_graph_engine def (): ' ' log.debug('begin test_call_other_object_method') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[6, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net2(y) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished test_call_other_object_method')
@non_graph_engine def (): ' ' log.debug('begin test_call_other_object_method') x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)) y = Tensor(np.array([[2, 3, 4], [1, 1, 2]]).astype(np.int32)) z = np.array([[6, 9, 12], [3, 4, 7]]).astype(np.int32) net = Net2(y) with pytest.raises(TypeError): output = net.construct(x) result = output.asnumpy() print(result) assert np.all((result == z)) log.debug('finished test_call_other_object_method')<|docstring|>test_call_no_self_other_object_method<|endoftext|>
b476a0ccfe5c131f4d79137651c2f7bc47bfe12717b2a160b07e238663513ee6
def test_call_no_self_other_object_attr_value(): ' test_call_no_self_other_object_attr_value ' return
test_call_no_self_other_object_attr_value
tests/ut/python/pynative_mode/test_parse_method.py
test_call_no_self_other_object_attr_value
limberc/mindspore
3,200
python
def (): ' ' return
def (): ' ' return<|docstring|>test_call_no_self_other_object_attr_value<|endoftext|>
9e6745a0cfbb37a6a1517fad35674e24208b52cc189be310430cbb85d73c2300
def vararg1(x, y): ' vararg1 ' z = (x + y) return z
vararg1
tests/ut/python/pynative_mode/test_parse_method.py
vararg1
limberc/mindspore
3,200
python
def (x, y): ' ' z = (x + y) return z
def (x, y): ' ' z = (x + y) return z<|docstring|>vararg1<|endoftext|>
42fc28190b3e7f74c82cd956890a956060bdac9910ce23ac51fc39c6509a81b0
def varargs_main(fn): ' varargs_main ' @ms_function def t1(*args): return fn(*args) return t1
varargs_main
tests/ut/python/pynative_mode/test_parse_method.py
varargs_main
limberc/mindspore
3,200
python
def (fn): ' ' @ms_function def t1(*args): return fn(*args) return t1
def (fn): ' ' @ms_function def t1(*args): return fn(*args) return t1<|docstring|>varargs_main<|endoftext|>
8e0fe8707c707957b02f5f88c6b0a15cf5153cc4329279de8ae6133d60069736
def test_var_parameter_case3(): ' test_var_parameter_case3 ' log.debug('start test_var_parameter_case3') ret = varargs_main(vararg1)(1, 2) log.debug('ret = %r', ret) log.debug('end test_var_parameter_case3')
test_var_parameter_case3
tests/ut/python/pynative_mode/test_parse_method.py
test_var_parameter_case3
limberc/mindspore
3,200
python
def (): ' ' log.debug('start ') ret = varargs_main(vararg1)(1, 2) log.debug('ret = %r', ret) log.debug('end ')
def (): ' ' log.debug('start ') ret = varargs_main(vararg1)(1, 2) log.debug('ret = %r', ret) log.debug('end ')<|docstring|>test_var_parameter_case3<|endoftext|>
40a18b5e013c7cda2fec93465842955df6165d5b8932b9d076059409aadd4aef
@core(tg=True) def set_flag(x): ' set_flag ' return (x + 1)
set_flag
tests/ut/python/pynative_mode/test_parse_method.py
set_flag
limberc/mindspore
3,200
python
@core(tg=True) def (x): ' ' return (x + 1)
@core(tg=True) def (x): ' ' return (x + 1)<|docstring|>set_flag<|endoftext|>
c2c2222fa3ff45b08a3d502a16f8432bc075926064692428ad1ba9403d73f9a0
@ms_function def set_test_flag_main(x, y): ' set_test_flag_main ' z = set_flag(x) z = (z + y) return z
set_test_flag_main
tests/ut/python/pynative_mode/test_parse_method.py
set_test_flag_main
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' z = set_flag(x) z = (z + y) return z
@ms_function def (x, y): ' ' z = set_flag(x) z = (z + y) return z<|docstring|>set_test_flag_main<|endoftext|>
88766fd8f8cf43c5ebf563a0a4240fb2ddb57aeb03c8a16690da406ec073d0df
def test_set_flag(): ' Test default parameter function call ' log.debug('begin test_set_flag') ret = set_test_flag_main(2, 3) log.debug('finished test_set_flag, ret = %r', ret)
Test default parameter function call
tests/ut/python/pynative_mode/test_parse_method.py
test_set_flag
limberc/mindspore
3,200
python
def test_set_flag(): ' ' log.debug('begin test_set_flag') ret = set_test_flag_main(2, 3) log.debug('finished test_set_flag, ret = %r', ret)
def test_set_flag(): ' ' log.debug('begin test_set_flag') ret = set_test_flag_main(2, 3) log.debug('finished test_set_flag, ret = %r', ret)<|docstring|>Test default parameter function call<|endoftext|>
7f4632938e7fb1f78df3b3587466c4f2da66cf1f0baa70aa7dd2d74b45a2a6a8
@ms_function def invoke_dataclass(x, y): ' invoke_dataclass ' acs = Access(x, y) return acs.max()
invoke_dataclass
tests/ut/python/pynative_mode/test_parse_method.py
invoke_dataclass
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' acs = Access(x, y) return acs.max()
@ms_function def (x, y): ' ' acs = Access(x, y) return acs.max()<|docstring|>invoke_dataclass<|endoftext|>
af6f24db9eb32c7991b17db6b18d77feead4f8811464022269251144e3d8ed1f
def test_access(): ' test_access ' invoke_dataclass(1, 2)
test_access
tests/ut/python/pynative_mode/test_parse_method.py
test_access
limberc/mindspore
3,200
python
def (): ' ' invoke_dataclass(1, 2)
def (): ' ' invoke_dataclass(1, 2)<|docstring|>test_access<|endoftext|>
7c60da264701b949344a5f1d0d56ca893f4bfc71f4080945ac78aa564028af85
@ms_function def invoke_dataclass2(x, y): ' invoke_dataclass ' acs = Access2(x, y) return acs.max()
invoke_dataclass
tests/ut/python/pynative_mode/test_parse_method.py
invoke_dataclass2
limberc/mindspore
3,200
python
@ms_function def 2(x, y): ' ' acs = Access2(x, y) return acs.max()
@ms_function def 2(x, y): ' ' acs = Access2(x, y) return acs.max()<|docstring|>invoke_dataclass<|endoftext|>
0d53e3a73fbba7bcacafa449be7069104a8c23ff491654a4071b6faa1df0ca3b
def test_access_attr_error(): ' test_access ' with pytest.raises(AttributeError): invoke_dataclass2(2, 1)
test_access
tests/ut/python/pynative_mode/test_parse_method.py
test_access_attr_error
limberc/mindspore
3,200
python
def _attr_error(): ' ' with pytest.raises(AttributeError): invoke_dataclass2(2, 1)
def _attr_error(): ' ' with pytest.raises(AttributeError): invoke_dataclass2(2, 1)<|docstring|>test_access<|endoftext|>
6f212e1099c687eaa2f5210b92d15c3b9731b87c26dc65614b3cf7815de5f36d
def myfunc(x): ' myfunc ' return (x * x)
myfunc
tests/ut/python/pynative_mode/test_parse_method.py
myfunc
limberc/mindspore
3,200
python
def (x): ' ' return (x * x)
def (x): ' ' return (x * x)<|docstring|>myfunc<|endoftext|>
f405a07d68d58eb1b38a1554fccc387b71f7c31b929bbd931cc2703b3338f752
@ms_function def ms_infer_for(): ' ms_infer_for ' a = 0.0 for x in [1.1, 2.3, 3.3]: a = (a + x) return a
ms_infer_for
tests/ut/python/pynative_mode/test_parse_method.py
ms_infer_for
limberc/mindspore
3,200
python
@ms_function def (): ' ' a = 0.0 for x in [1.1, 2.3, 3.3]: a = (a + x) return a
@ms_function def (): ' ' a = 0.0 for x in [1.1, 2.3, 3.3]: a = (a + x) return a<|docstring|>ms_infer_for<|endoftext|>
f698e52221cd63cfd144c72162a16ca046ec87f360eb45d16f9f77054e9ee7e6
def test_infer_for(): ' test_infer_for ' ms_infer_for()
test_infer_for
tests/ut/python/pynative_mode/test_parse_method.py
test_infer_for
limberc/mindspore
3,200
python
def (): ' ' ms_infer_for()
def (): ' ' ms_infer_for()<|docstring|>test_infer_for<|endoftext|>
f73185a4253ea936a29e0da99f53daf631c8073c5ee57a0dd26f121c3573b33b
@ms_function def ms_infer_for_func(y): ' ms_infer_for_func ' for x in [1.0, 2.0, 3.0]: y = (myfunc(x) + y) return y
ms_infer_for_func
tests/ut/python/pynative_mode/test_parse_method.py
ms_infer_for_func
limberc/mindspore
3,200
python
@ms_function def (y): ' ' for x in [1.0, 2.0, 3.0]: y = (myfunc(x) + y) return y
@ms_function def (y): ' ' for x in [1.0, 2.0, 3.0]: y = (myfunc(x) + y) return y<|docstring|>ms_infer_for_func<|endoftext|>
28466c9b65c66e5d765cc252d47dc05f135a5438bcd67128c24c75eb0fc47312
def test_ms_infer_for_func(): ' test_ms_infer_for_func ' ms_infer_for_func(1.0)
test_ms_infer_for_func
tests/ut/python/pynative_mode/test_parse_method.py
test_ms_infer_for_func
limberc/mindspore
3,200
python
def (): ' ' ms_infer_for_func(1.0)
def (): ' ' ms_infer_for_func(1.0)<|docstring|>test_ms_infer_for_func<|endoftext|>
b9ed3993765de513802da781366f99d448ffbce7122e2352818bbf69c1b2ad8e
@ms_function def add(x, y): ' add ' return (x + y)
add
tests/ut/python/pynative_mode/test_parse_method.py
add
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' return (x + y)
@ms_function def (x, y): ' ' return (x + y)<|docstring|>add<|endoftext|>
b9ce39a2d3581d339e9036fa402198758954f6499600fdd169ef9308ff4aa3f1
def test_add(): ' test_add ' res = add(1, 2.0) return res
test_add
tests/ut/python/pynative_mode/test_parse_method.py
test_add
limberc/mindspore
3,200
python
def (): ' ' res = add(1, 2.0) return res
def (): ' ' res = add(1, 2.0) return res<|docstring|>test_add<|endoftext|>
e04011b29ccc99b60ab25e96d2cca594e3b8fb601f905e722c12cce74d52fd14
@ms_function def add_list(): ' add_list ' a = [1, 2, 3] b = (a[1] + a[2]) return b
add_list
tests/ut/python/pynative_mode/test_parse_method.py
add_list
limberc/mindspore
3,200
python
@ms_function def (): ' ' a = [1, 2, 3] b = (a[1] + a[2]) return b
@ms_function def (): ' ' a = [1, 2, 3] b = (a[1] + a[2]) return b<|docstring|>add_list<|endoftext|>
6c2d3e9a91b770ad1bbbceeff1c6054765931350b3fd1a5a8e94d3fa192ddf54
def test_list(): ' test_list ' return add_list()
test_list
tests/ut/python/pynative_mode/test_parse_method.py
test_list
limberc/mindspore
3,200
python
def (): ' ' return add_list()
def (): ' ' return add_list()<|docstring|>test_list<|endoftext|>
6e88b0a5d03ee7ca36d74baf0a4fbce3f1a07b294709c94b8d350b7b7d5d8f47
@ms_function def compare_list_len(): ' compare_list_len ' a = [1, 2, 3] return ms_len(a)
compare_list_len
tests/ut/python/pynative_mode/test_parse_method.py
compare_list_len
limberc/mindspore
3,200
python
@ms_function def (): ' ' a = [1, 2, 3] return ms_len(a)
@ms_function def (): ' ' a = [1, 2, 3] return ms_len(a)<|docstring|>compare_list_len<|endoftext|>
5a9b4a4125383593db414dd31c2fd08fa1074adf35cd999aaeb3a3028a44aedf
def test_list_len(): ' test_list_len ' compare_list_len()
test_list_len
tests/ut/python/pynative_mode/test_parse_method.py
test_list_len
limberc/mindspore
3,200
python
def (): ' ' compare_list_len()
def (): ' ' compare_list_len()<|docstring|>test_list_len<|endoftext|>
e3298fda6a89d09fbd7e918ace88fd4a752106b4968aed7a97075f9a6c4022eb
@ms_function def add_tuple(): ' add_tuple ' a = (1, 2, 3) b = (a[1] + a[2]) return b
add_tuple
tests/ut/python/pynative_mode/test_parse_method.py
add_tuple
limberc/mindspore
3,200
python
@ms_function def (): ' ' a = (1, 2, 3) b = (a[1] + a[2]) return b
@ms_function def (): ' ' a = (1, 2, 3) b = (a[1] + a[2]) return b<|docstring|>add_tuple<|endoftext|>
d8d4b3d93732bd867bc8069a4a7ac2023064d5367862e4903101578a5fb45fe3
def test_tuple(): ' test_tuple ' return add_tuple()
test_tuple
tests/ut/python/pynative_mode/test_parse_method.py
test_tuple
limberc/mindspore
3,200
python
def (): ' ' return add_tuple()
def (): ' ' return add_tuple()<|docstring|>test_tuple<|endoftext|>
f61d6f31786e4a280f793155b74794d8b70e3126a020939dd981775ba93d843a
def invoke_func(x): ' invoke_func ' return (x * x)
invoke_func
tests/ut/python/pynative_mode/test_parse_method.py
invoke_func
limberc/mindspore
3,200
python
def (x): ' ' return (x * x)
def (x): ' ' return (x * x)<|docstring|>invoke_func<|endoftext|>
5fd5c54c3f27c4db9c475ff97faa20c6bcb05bacd028494acc3a615a1f79e052
@ms_function def tuple_of_node(x, y): ' tuple_of_node ' a = invoke_func(x) b = invoke_func(y) c = (a, b) d = (c[1] * x) return d
tuple_of_node
tests/ut/python/pynative_mode/test_parse_method.py
tuple_of_node
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' a = invoke_func(x) b = invoke_func(y) c = (a, b) d = (c[1] * x) return d
@ms_function def (x, y): ' ' a = invoke_func(x) b = invoke_func(y) c = (a, b) d = (c[1] * x) return d<|docstring|>tuple_of_node<|endoftext|>
88079f0229531247c88f4b2d40835c932732a64860b79d065c5c443517efbbc1
def test_tuple_node(): ' test_tuple_node ' res = tuple_of_node(1, 2) return res
test_tuple_node
tests/ut/python/pynative_mode/test_parse_method.py
test_tuple_node
limberc/mindspore
3,200
python
def (): ' ' res = tuple_of_node(1, 2) return res
def (): ' ' res = tuple_of_node(1, 2) return res<|docstring|>test_tuple_node<|endoftext|>
02fd7488471b1d3deebf3c2caf015b712c9b357eb34126cab86ecd2eae4eca26
@ms_function def range_spec(x, y): ' range_spec ' for _ in range(1, 10, 3): x = (x + 1) return (x + y)
range_spec
tests/ut/python/pynative_mode/test_parse_method.py
range_spec
limberc/mindspore
3,200
python
@ms_function def (x, y): ' ' for _ in range(1, 10, 3): x = (x + 1) return (x + y)
@ms_function def (x, y): ' ' for _ in range(1, 10, 3): x = (x + 1) return (x + y)<|docstring|>range_spec<|endoftext|>
adb64c43535fe64a78e93f7f82c8c16426eeacb156536c04d73c881211615611
def test_range(): ' test_range ' res = range_spec(10, 10) return res
test_range
tests/ut/python/pynative_mode/test_parse_method.py
test_range
limberc/mindspore
3,200
python
def (): ' ' res = range_spec(10, 10) return res
def (): ' ' res = range_spec(10, 10) return res<|docstring|>test_range<|endoftext|>
a343b86dc9bbb67ab66e40743f08cd306b913a731300431c560ce5b7f98c8089
def test_expr(): ' test const expr ' a = (1, 2) @constexpr def tuple_len(x): assert (len(x) == 2) tuple_len(a)
test const expr
tests/ut/python/pynative_mode/test_parse_method.py
test_expr
limberc/mindspore
3,200
python
def test_expr(): ' ' a = (1, 2) @constexpr def tuple_len(x): assert (len(x) == 2) tuple_len(a)
def test_expr(): ' ' a = (1, 2) @constexpr def tuple_len(x): assert (len(x) == 2) tuple_len(a)<|docstring|>test const expr<|endoftext|>
5cef5ccc43de65ed11a5f624757dd839a909692b54241939f1d6b194e4f4d7f7
def test_tuple_to_array(): ' test range tuple to array ' range_x = range(10) res = F.tuple_to_array(range_x) print(res)
test range tuple to array
tests/ut/python/pynative_mode/test_parse_method.py
test_tuple_to_array
limberc/mindspore
3,200
python
def test_tuple_to_array(): ' ' range_x = range(10) res = F.tuple_to_array(range_x) print(res)
def test_tuple_to_array(): ' ' range_x = range(10) res = F.tuple_to_array(range_x) print(res)<|docstring|>test range tuple to array<|endoftext|>
ee1d9c108a8ec3b62edf9268693067cc44b4a107731b284fbd695356a9b2ddc5
def candidate(self, residuals): "Given a set of residuals this will return the index of the candidate event and the 'significance' value" n = len(residuals) if (n == 0): return (0, 0) std = np.std(residuals) if (std == 0): return (0, 0) result = (np.cumsum(residuals) * (1.0 / (std * np.sqrt(n)))) ols_cusum = np.insert(result, 0, 0) t = np.linspace(0, 1, num=(n + 1)) shape = np.sqrt((t * (1 - t))) clambda = np.append(np.insert((ols_cusum[1:(- 1)] / shape[1:(- 1)]), 0, 0), 0) index = np.abs(clambda).argmax() significance = np.abs(clambda[index]) return (index, significance)
Given a set of residuals this will return the index of the candidate event and the 'significance' value
edinet_baseline_hourly_module/edinet_models/pyEMIS2/analysis/ols_cusum.py
candidate
BeeGroup-cimne/module_edinet
0
python
def candidate(self, residuals): n = len(residuals) if (n == 0): return (0, 0) std = np.std(residuals) if (std == 0): return (0, 0) result = (np.cumsum(residuals) * (1.0 / (std * np.sqrt(n)))) ols_cusum = np.insert(result, 0, 0) t = np.linspace(0, 1, num=(n + 1)) shape = np.sqrt((t * (1 - t))) clambda = np.append(np.insert((ols_cusum[1:(- 1)] / shape[1:(- 1)]), 0, 0), 0) index = np.abs(clambda).argmax() significance = np.abs(clambda[index]) return (index, significance)
def candidate(self, residuals): n = len(residuals) if (n == 0): return (0, 0) std = np.std(residuals) if (std == 0): return (0, 0) result = (np.cumsum(residuals) * (1.0 / (std * np.sqrt(n)))) ols_cusum = np.insert(result, 0, 0) t = np.linspace(0, 1, num=(n + 1)) shape = np.sqrt((t * (1 - t))) clambda = np.append(np.insert((ols_cusum[1:(- 1)] / shape[1:(- 1)]), 0, 0), 0) index = np.abs(clambda).argmax() significance = np.abs(clambda[index]) return (index, significance)<|docstring|>Given a set of residuals this will return the index of the candidate event and the 'significance' value<|endoftext|>
e614459246ec1a1b4a8a68e19d07aa0ed14e9c88dd6c0fe2f62b5885599a2a1a
def add_event(self, model): 'See if an event can be detected in the model. If so, add it.' candidates = [] for p in model.periods: cusum = self.period_CUSUM(p) if cusum.has_event(): candidates.append(cusum.event) self.logger.info(('%i candidate events' % len(candidates))) if (len(candidates) == 0): return False winner = sorted(candidates, key=(lambda x: x.significance))[0] self.logger.info(('winner: %s' % winner.date.strftime('%d/%m/%Y'))) model.add_event(winner) return True
See if an event can be detected in the model. If so, add it.
edinet_baseline_hourly_module/edinet_models/pyEMIS2/analysis/ols_cusum.py
add_event
BeeGroup-cimne/module_edinet
0
python
def add_event(self, model): candidates = [] for p in model.periods: cusum = self.period_CUSUM(p) if cusum.has_event(): candidates.append(cusum.event) self.logger.info(('%i candidate events' % len(candidates))) if (len(candidates) == 0): return False winner = sorted(candidates, key=(lambda x: x.significance))[0] self.logger.info(('winner: %s' % winner.date.strftime('%d/%m/%Y'))) model.add_event(winner) return True
def add_event(self, model): candidates = [] for p in model.periods: cusum = self.period_CUSUM(p) if cusum.has_event(): candidates.append(cusum.event) self.logger.info(('%i candidate events' % len(candidates))) if (len(candidates) == 0): return False winner = sorted(candidates, key=(lambda x: x.significance))[0] self.logger.info(('winner: %s' % winner.date.strftime('%d/%m/%Y'))) model.add_event(winner) return True<|docstring|>See if an event can be detected in the model. If so, add it.<|endoftext|>
b02037aa79040788ebdd78c1927fc94ed03b132fe28ece78a684e230964bf1e4
def event_CUSUM(self, model, event_index): 'Return an OLS_CUSUM covering the periods before and after an event' dates = model.event_dates() from_indices = (model.data['date'] > dates[event_index]) to_indices = (model.data['date'] <= dates[(event_index + 2)]) data = model.data[(from_indices & to_indices)] submodel = model.modelFactory(data) res = submodel.residuals(data) return OLS_CUSUM(data['date'], res, self.alpha)
Return an OLS_CUSUM covering the periods before and after an event
edinet_baseline_hourly_module/edinet_models/pyEMIS2/analysis/ols_cusum.py
event_CUSUM
BeeGroup-cimne/module_edinet
0
python
def event_CUSUM(self, model, event_index): dates = model.event_dates() from_indices = (model.data['date'] > dates[event_index]) to_indices = (model.data['date'] <= dates[(event_index + 2)]) data = model.data[(from_indices & to_indices)] submodel = model.modelFactory(data) res = submodel.residuals(data) return OLS_CUSUM(data['date'], res, self.alpha)
def event_CUSUM(self, model, event_index): dates = model.event_dates() from_indices = (model.data['date'] > dates[event_index]) to_indices = (model.data['date'] <= dates[(event_index + 2)]) data = model.data[(from_indices & to_indices)] submodel = model.modelFactory(data) res = submodel.residuals(data) return OLS_CUSUM(data['date'], res, self.alpha)<|docstring|>Return an OLS_CUSUM covering the periods before and after an event<|endoftext|>
a8bf6c7af4e9173e5b28a3419afa3b967966afbe52fdf83f1c8d9931b03100e3
def create_vmdk(service_instance, datacenter_mo, datastore_path): 'Create vmdk in specific datacenter' vdm = service_instance.content.virtualDiskManager task = vdm.CreateVirtualDisk(datastore_path, datacenter_mo, vim.VirtualDiskManager.SeSparseVirtualDiskSpec(diskType='seSparse', adapterType='lsiLogic', capacityKb=((1024 * 1024) * 4))) pyVim.task.WaitForTask(task) print("Created VMDK '{}' in Datacenter '{}'".format(datastore_path, datacenter_mo.name)) return task.info.result
Create vmdk in specific datacenter
samples/vsphere/common/vim/vmdk.py
create_vmdk
eoq/vsphere-automation-sdk-python
589
python
def create_vmdk(service_instance, datacenter_mo, datastore_path): vdm = service_instance.content.virtualDiskManager task = vdm.CreateVirtualDisk(datastore_path, datacenter_mo, vim.VirtualDiskManager.SeSparseVirtualDiskSpec(diskType='seSparse', adapterType='lsiLogic', capacityKb=((1024 * 1024) * 4))) pyVim.task.WaitForTask(task) print("Created VMDK '{}' in Datacenter '{}'".format(datastore_path, datacenter_mo.name)) return task.info.result
def create_vmdk(service_instance, datacenter_mo, datastore_path): vdm = service_instance.content.virtualDiskManager task = vdm.CreateVirtualDisk(datastore_path, datacenter_mo, vim.VirtualDiskManager.SeSparseVirtualDiskSpec(diskType='seSparse', adapterType='lsiLogic', capacityKb=((1024 * 1024) * 4))) pyVim.task.WaitForTask(task) print("Created VMDK '{}' in Datacenter '{}'".format(datastore_path, datacenter_mo.name)) return task.info.result<|docstring|>Create vmdk in specific datacenter<|endoftext|>
4ceb8a125f3d5135866afb963a394f0efd03e4522c114a3d040142d89811464b
def delete_vmdk(service_instance, datacenter_mo, datastore_path): 'Delete vmdk from specific datastore' vdm = service_instance.content.virtualDiskManager task = vdm.DeleteVirtualDisk(datastore_path, datacenter_mo) pyVim.task.WaitForTask(task)
Delete vmdk from specific datastore
samples/vsphere/common/vim/vmdk.py
delete_vmdk
eoq/vsphere-automation-sdk-python
589
python
def delete_vmdk(service_instance, datacenter_mo, datastore_path): vdm = service_instance.content.virtualDiskManager task = vdm.DeleteVirtualDisk(datastore_path, datacenter_mo) pyVim.task.WaitForTask(task)
def delete_vmdk(service_instance, datacenter_mo, datastore_path): vdm = service_instance.content.virtualDiskManager task = vdm.DeleteVirtualDisk(datastore_path, datacenter_mo) pyVim.task.WaitForTask(task)<|docstring|>Delete vmdk from specific datastore<|endoftext|>
8a34fa095633851c45f2326645baf0e5acef46520499faf4101d0dbd59ac6852
def detect_vmdk(client, soap_stub, datacenter_name, datastore_name, datastore_path): 'Find vmdk in specific datastore' datastore_mo = get_datastore_mo(client, soap_stub, datacenter_name, datastore_name) if (not datastore_mo): return False dsfile = datastore_file.File(datastore_mo) if dsfile.exists(datastore_path): return True else: return False
Find vmdk in specific datastore
samples/vsphere/common/vim/vmdk.py
detect_vmdk
eoq/vsphere-automation-sdk-python
589
python
def detect_vmdk(client, soap_stub, datacenter_name, datastore_name, datastore_path): datastore_mo = get_datastore_mo(client, soap_stub, datacenter_name, datastore_name) if (not datastore_mo): return False dsfile = datastore_file.File(datastore_mo) if dsfile.exists(datastore_path): return True else: return False
def detect_vmdk(client, soap_stub, datacenter_name, datastore_name, datastore_path): datastore_mo = get_datastore_mo(client, soap_stub, datacenter_name, datastore_name) if (not datastore_mo): return False dsfile = datastore_file.File(datastore_mo) if dsfile.exists(datastore_path): return True else: return False<|docstring|>Find vmdk in specific datastore<|endoftext|>
689d8b6180c1328f72976c1cdbd20467c08571d0a1ddb719694adcdf21770bf5
async def async_setup_entry(hass, config_entry, async_add_entities): 'Set up from config entry.' coordinator: DataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id] name = config_entry.data[CONF_NAME] entities = [SyncThruOnlineSensor(coordinator, name), SyncThruProblemSensor(coordinator, name)] async_add_entities(entities)
Set up from config entry.
homeassistant/components/syncthru/binary_sensor.py
async_setup_entry
Tommatheussen/core
11
python
async def async_setup_entry(hass, config_entry, async_add_entities): coordinator: DataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id] name = config_entry.data[CONF_NAME] entities = [SyncThruOnlineSensor(coordinator, name), SyncThruProblemSensor(coordinator, name)] async_add_entities(entities)
async def async_setup_entry(hass, config_entry, async_add_entities): coordinator: DataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id] name = config_entry.data[CONF_NAME] entities = [SyncThruOnlineSensor(coordinator, name), SyncThruProblemSensor(coordinator, name)] async_add_entities(entities)<|docstring|>Set up from config entry.<|endoftext|>
a0ba4ef3d5de544de84f774fdc0514f02f272e4ac1fe9392a1390b2b11e3a31a
def __init__(self, coordinator, name): 'Initialize the sensor.' super().__init__(coordinator) self.syncthru: SyncThru = coordinator.data self._name = name self._id_suffix = ''
Initialize the sensor.
homeassistant/components/syncthru/binary_sensor.py
__init__
Tommatheussen/core
11
python
def __init__(self, coordinator, name): super().__init__(coordinator) self.syncthru: SyncThru = coordinator.data self._name = name self._id_suffix =
def __init__(self, coordinator, name): super().__init__(coordinator) self.syncthru: SyncThru = coordinator.data self._name = name self._id_suffix = <|docstring|>Initialize the sensor.<|endoftext|>
957abd3e8798671bb3b2cb8802e38436ac240fa56cfd6d92719d3f0021603681
@property def unique_id(self): 'Return unique ID for the sensor.' serial = self.syncthru.serial_number() return (f'{serial}{self._id_suffix}' if serial else None)
Return unique ID for the sensor.
homeassistant/components/syncthru/binary_sensor.py
unique_id
Tommatheussen/core
11
python
@property def unique_id(self): serial = self.syncthru.serial_number() return (f'{serial}{self._id_suffix}' if serial else None)
@property def unique_id(self): serial = self.syncthru.serial_number() return (f'{serial}{self._id_suffix}' if serial else None)<|docstring|>Return unique ID for the sensor.<|endoftext|>
c2acbec88b5ad13d0f458e2f3155e56fd2fabdb29665addbac450039553aa2e4
@property def name(self): 'Return the name of the sensor.' return self._name
Return the name of the sensor.
homeassistant/components/syncthru/binary_sensor.py
name
Tommatheussen/core
11
python
@property def name(self): return self._name
@property def name(self): return self._name<|docstring|>Return the name of the sensor.<|endoftext|>
632da38c928d1d72dc75af84bb155909101eb17884e272f3022822412f45336c
@property def device_info(self): 'Return device information.' return {'identifiers': device_identifiers(self.syncthru)}
Return device information.
homeassistant/components/syncthru/binary_sensor.py
device_info
Tommatheussen/core
11
python
@property def device_info(self): return {'identifiers': device_identifiers(self.syncthru)}
@property def device_info(self): return {'identifiers': device_identifiers(self.syncthru)}<|docstring|>Return device information.<|endoftext|>
575143fce557e6cf1ad1757beb6ef7140246fb56707a173b4bec3cab83976c6c
def __init__(self, syncthru, name): 'Initialize the sensor.' super().__init__(syncthru, name) self._id_suffix = '_online'
Initialize the sensor.
homeassistant/components/syncthru/binary_sensor.py
__init__
Tommatheussen/core
11
python
def __init__(self, syncthru, name): super().__init__(syncthru, name) self._id_suffix = '_online'
def __init__(self, syncthru, name): super().__init__(syncthru, name) self._id_suffix = '_online'<|docstring|>Initialize the sensor.<|endoftext|>
73e314fdf4c6a944f3f4bd6e7a98284a0fd81746914add2b9f0099e6466a3d7a
@property def is_on(self): 'Set the state to whether the printer is online.' return self.syncthru.is_online()
Set the state to whether the printer is online.
homeassistant/components/syncthru/binary_sensor.py
is_on
Tommatheussen/core
11
python
@property def is_on(self): return self.syncthru.is_online()
@property def is_on(self): return self.syncthru.is_online()<|docstring|>Set the state to whether the printer is online.<|endoftext|>
8c221d2b4b0ea5a6283c6861af4856a85a2ab98e810675314a4995c3bb5df7b9
def __init__(self, syncthru, name): 'Initialize the sensor.' super().__init__(syncthru, name) self._id_suffix = '_problem'
Initialize the sensor.
homeassistant/components/syncthru/binary_sensor.py
__init__
Tommatheussen/core
11
python
def __init__(self, syncthru, name): super().__init__(syncthru, name) self._id_suffix = '_problem'
def __init__(self, syncthru, name): super().__init__(syncthru, name) self._id_suffix = '_problem'<|docstring|>Initialize the sensor.<|endoftext|>
05765d8bb05fd6fc8fa1a550c50077a1c090b411f763e35efa364fc3854febd1
@property def is_on(self): 'Set the state to whether there is a problem with the printer.' return SYNCTHRU_STATE_PROBLEM[self.syncthru.device_status()]
Set the state to whether there is a problem with the printer.
homeassistant/components/syncthru/binary_sensor.py
is_on
Tommatheussen/core
11
python
@property def is_on(self): return SYNCTHRU_STATE_PROBLEM[self.syncthru.device_status()]
@property def is_on(self): return SYNCTHRU_STATE_PROBLEM[self.syncthru.device_status()]<|docstring|>Set the state to whether there is a problem with the printer.<|endoftext|>
d1df3c77cc5d1c1ef49350fa09206297ef255d42c27c1381d918335ffdd9da98
def __init__(__self__, resource_name, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None, __props__=None, __name__=None, __opts__=None): "\n Manages a Event Hubs Consumer Group as a nested resource within an Event Hub.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if (eventhub_name is None): raise TypeError("Missing required property 'eventhub_name'") __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name if (namespace_name is None): raise TypeError("Missing required property 'namespace_name'") __props__['namespace_name'] = namespace_name if (resource_group_name is None): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata super(EventHubConsumerGroup, __self__).__init__('azure:eventhub/eventHubConsumerGroup:EventHubConsumerGroup', resource_name, __props__, opts)
Manages a Event Hubs Consumer Group as a nested resource within an Event Hub. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created. :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created. :param pulumi.Input[str] user_metadata: Specifies the user metadata. > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.
sdk/python/pulumi_azure/eventhub/event_hub_consumer_group.py
__init__
vijayraavi/pulumi-azure
0
python
def __init__(__self__, resource_name, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None, __props__=None, __name__=None, __opts__=None): "\n Manages a Event Hubs Consumer Group as a nested resource within an Event Hub.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if (eventhub_name is None): raise TypeError("Missing required property 'eventhub_name'") __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name if (namespace_name is None): raise TypeError("Missing required property 'namespace_name'") __props__['namespace_name'] = namespace_name if (resource_group_name is None): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata super(EventHubConsumerGroup, __self__).__init__('azure:eventhub/eventHubConsumerGroup:EventHubConsumerGroup', resource_name, __props__, opts)
def __init__(__self__, resource_name, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None, __props__=None, __name__=None, __opts__=None): "\n Manages a Event Hubs Consumer Group as a nested resource within an Event Hub.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if (eventhub_name is None): raise TypeError("Missing required property 'eventhub_name'") __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name if (namespace_name is None): raise TypeError("Missing required property 'namespace_name'") __props__['namespace_name'] = namespace_name if (resource_group_name is None): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata super(EventHubConsumerGroup, __self__).__init__('azure:eventhub/eventHubConsumerGroup:EventHubConsumerGroup', resource_name, __props__, opts)<|docstring|>Manages a Event Hubs Consumer Group as a nested resource within an Event Hub. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created. :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created. :param pulumi.Input[str] user_metadata: Specifies the user metadata. > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.<|endoftext|>
afa0524fe440efd4908b0775ec4a5b758ee5e12ad60be1594e8648701475c651
@staticmethod def get(resource_name, id, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None): "\n Get an existing EventHubConsumerGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n \n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name __props__['namespace_name'] = namespace_name __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata return EventHubConsumerGroup(resource_name, opts=opts, __props__=__props__)
Get an existing EventHubConsumerGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created. :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created. :param pulumi.Input[str] user_metadata: Specifies the user metadata. > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.
sdk/python/pulumi_azure/eventhub/event_hub_consumer_group.py
get
vijayraavi/pulumi-azure
0
python
@staticmethod def get(resource_name, id, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None): "\n Get an existing EventHubConsumerGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n \n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name __props__['namespace_name'] = namespace_name __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata return EventHubConsumerGroup(resource_name, opts=opts, __props__=__props__)
@staticmethod def get(resource_name, id, opts=None, eventhub_name=None, location=None, name=None, namespace_name=None, resource_group_name=None, user_metadata=None): "\n Get an existing EventHubConsumerGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n \n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created.\n :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.\n :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.\n :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.\n :param pulumi.Input[str] user_metadata: Specifies the user metadata.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['eventhub_name'] = eventhub_name __props__['location'] = location __props__['name'] = name __props__['namespace_name'] = namespace_name __props__['resource_group_name'] = resource_group_name __props__['user_metadata'] = user_metadata return EventHubConsumerGroup(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing EventHubConsumerGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created. :param pulumi.Input[str] namespace_name: Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created. :param pulumi.Input[str] user_metadata: Specifies the user metadata. > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/eventhub_consumer_group_legacy.html.markdown.<|endoftext|>
b81dd19dda4c5b78409968ea80d92f00901cb25b7ad9393744d299658e664ae5
def Classify(self, *args): '\n :param P:\n :type P: gp_Pnt2d\n :param Tol:\n :type Tol: float\n :rtype: TopAbs_State\n\n ' return _IntStart.IntStart_SITopolTool_Classify(self, *args)
:param P: :type P: gp_Pnt2d :param Tol: :type Tol: float :rtype: TopAbs_State
Lib/site-packages/OCC/IntStart.py
Classify
JWerbrouck/RWTH_M1_Projekt
0
python
def Classify(self, *args): '\n :param P:\n :type P: gp_Pnt2d\n :param Tol:\n :type Tol: float\n :rtype: TopAbs_State\n\n ' return _IntStart.IntStart_SITopolTool_Classify(self, *args)
def Classify(self, *args): '\n :param P:\n :type P: gp_Pnt2d\n :param Tol:\n :type Tol: float\n :rtype: TopAbs_State\n\n ' return _IntStart.IntStart_SITopolTool_Classify(self, *args)<|docstring|>:param P: :type P: gp_Pnt2d :param Tol: :type Tol: float :rtype: TopAbs_State<|endoftext|>
80516eb6a922ab202a11180abf0e564c18d04125ff6bd27c8ec3391532529f7b
def _kill_pointed(self): '_kill_pointed(IntStart_SITopolTool self)' return _IntStart.IntStart_SITopolTool__kill_pointed(self)
_kill_pointed(IntStart_SITopolTool self)
Lib/site-packages/OCC/IntStart.py
_kill_pointed
JWerbrouck/RWTH_M1_Projekt
0
python
def _kill_pointed(self): return _IntStart.IntStart_SITopolTool__kill_pointed(self)
def _kill_pointed(self): return _IntStart.IntStart_SITopolTool__kill_pointed(self)<|docstring|>_kill_pointed(IntStart_SITopolTool self)<|endoftext|>
becae5d18bca64d9cd035e5284d5cbabf1a3a9a598cfa7cb5722fb7876499f33
def GetHandle(self): 'GetHandle(IntStart_SITopolTool self) -> Handle_IntStart_SITopolTool' return _IntStart.IntStart_SITopolTool_GetHandle(self)
GetHandle(IntStart_SITopolTool self) -> Handle_IntStart_SITopolTool
Lib/site-packages/OCC/IntStart.py
GetHandle
JWerbrouck/RWTH_M1_Projekt
0
python
def GetHandle(self): return _IntStart.IntStart_SITopolTool_GetHandle(self)
def GetHandle(self): return _IntStart.IntStart_SITopolTool_GetHandle(self)<|docstring|>GetHandle(IntStart_SITopolTool self) -> Handle_IntStart_SITopolTool<|endoftext|>
6767495c163b810279f09e88107019a4204561f8ada551195bdb81c3d539a434
def merge_templates(self, replacements, separator): '\n Duplicate template. Creates a copy of the template, does a merge, and separates them by a new paragraph, a new break or a new section break.\n separator must be :\n - page_break : Page Break. \n - column_break : Column Break. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - textWrapping_break : Line Break.\n - continuous_section : Continuous section break. Begins the section on the next paragraph.\n - evenPage_section : evenPage section break. section begins on the next even-numbered page, leaving the next odd page blank if necessary.\n - nextColumn_section : nextColumn section break. section begins on the following column on the page. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - nextPage_section : nextPage section break. section begins on the following page.\n - oddPage_section : oddPage section break. section begins on the next odd-numbered page, leaving the next even page blank if necessary.\n ' valid_separators = {'page_break', 'column_break', 'textWrapping_break', 'continuous_section', 'evenPage_section', 'nextColumn_section', 'nextPage_section', 'oddPage_section'} if (not (separator in valid_separators)): raise ValueError('Invalid separator argument') (type, sepClass) = separator.split('_') for part in self.parts.values(): root = part.getroot() tag = root.tag if ((tag == ('{%(w)s}ftr' % NAMESPACES)) or (tag == ('{%(w)s}hdr' % NAMESPACES))): continue if (sepClass == 'section'): firstSection = root.find('w:body/w:p/w:pPr/w:sectPr', namespaces=NAMESPACES) if (firstSection == None): firstSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) nextPageSec = deepcopy(firstSection) for child in nextPageSec: if (child.tag == ('{%(w)s}type' % NAMESPACES)): nextPageSec.remove(child) newType = etree.SubElement(nextPageSec, ('{%(w)s}type' % NAMESPACES)) newType.set(('{%(w)s}val' % NAMESPACES), type) secRoot = firstSection.getparent() secRoot.replace(firstSection, nextPageSec) lastSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) mainSection = deepcopy(lastSection) lsecRoot = lastSection.getparent() lsecRoot.remove(lastSection) childrenList = root.findall('w:body/*', namespaces=NAMESPACES) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.clear() lr = len(replacements) lc = len(childrenList) parts = [] for (i, repl) in enumerate(replacements): for (j, n) in enumerate(childrenList): element = deepcopy(n) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.append(element) parts.append(element) if ((j + 1) == lc): if ((i + 1) == lr): child.append(mainSection) parts.append(mainSection) elif (sepClass == 'section'): intSection = deepcopy(mainSection) p = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) pPr = etree.SubElement(p, ('{%(w)s}pPr' % NAMESPACES)) pPr.append(intSection) parts.append(p) elif (sepClass == 'break'): pb = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) r = etree.SubElement(pb, ('{%(w)s}r' % NAMESPACES)) nbreak = Element(('{%(w)s}br' % NAMESPACES)) nbreak.attrib[('{%(w)s}type' % NAMESPACES)] = type r.append(nbreak) self.merge(parts, **repl)
Duplicate template. Creates a copy of the template, does a merge, and separates them by a new paragraph, a new break or a new section break. separator must be : - page_break : Page Break. - column_break : Column Break. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS - textWrapping_break : Line Break. - continuous_section : Continuous section break. Begins the section on the next paragraph. - evenPage_section : evenPage section break. section begins on the next even-numbered page, leaving the next odd page blank if necessary. - nextColumn_section : nextColumn section break. section begins on the following column on the page. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS - nextPage_section : nextPage section break. section begins on the following page. - oddPage_section : oddPage section break. section begins on the next odd-numbered page, leaving the next even page blank if necessary.
mailmerge.py
merge_templates
danigoland/docx-mailmerge
2
python
def merge_templates(self, replacements, separator): '\n Duplicate template. Creates a copy of the template, does a merge, and separates them by a new paragraph, a new break or a new section break.\n separator must be :\n - page_break : Page Break. \n - column_break : Column Break. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - textWrapping_break : Line Break.\n - continuous_section : Continuous section break. Begins the section on the next paragraph.\n - evenPage_section : evenPage section break. section begins on the next even-numbered page, leaving the next odd page blank if necessary.\n - nextColumn_section : nextColumn section break. section begins on the following column on the page. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - nextPage_section : nextPage section break. section begins on the following page.\n - oddPage_section : oddPage section break. section begins on the next odd-numbered page, leaving the next even page blank if necessary.\n ' valid_separators = {'page_break', 'column_break', 'textWrapping_break', 'continuous_section', 'evenPage_section', 'nextColumn_section', 'nextPage_section', 'oddPage_section'} if (not (separator in valid_separators)): raise ValueError('Invalid separator argument') (type, sepClass) = separator.split('_') for part in self.parts.values(): root = part.getroot() tag = root.tag if ((tag == ('{%(w)s}ftr' % NAMESPACES)) or (tag == ('{%(w)s}hdr' % NAMESPACES))): continue if (sepClass == 'section'): firstSection = root.find('w:body/w:p/w:pPr/w:sectPr', namespaces=NAMESPACES) if (firstSection == None): firstSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) nextPageSec = deepcopy(firstSection) for child in nextPageSec: if (child.tag == ('{%(w)s}type' % NAMESPACES)): nextPageSec.remove(child) newType = etree.SubElement(nextPageSec, ('{%(w)s}type' % NAMESPACES)) newType.set(('{%(w)s}val' % NAMESPACES), type) secRoot = firstSection.getparent() secRoot.replace(firstSection, nextPageSec) lastSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) mainSection = deepcopy(lastSection) lsecRoot = lastSection.getparent() lsecRoot.remove(lastSection) childrenList = root.findall('w:body/*', namespaces=NAMESPACES) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.clear() lr = len(replacements) lc = len(childrenList) parts = [] for (i, repl) in enumerate(replacements): for (j, n) in enumerate(childrenList): element = deepcopy(n) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.append(element) parts.append(element) if ((j + 1) == lc): if ((i + 1) == lr): child.append(mainSection) parts.append(mainSection) elif (sepClass == 'section'): intSection = deepcopy(mainSection) p = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) pPr = etree.SubElement(p, ('{%(w)s}pPr' % NAMESPACES)) pPr.append(intSection) parts.append(p) elif (sepClass == 'break'): pb = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) r = etree.SubElement(pb, ('{%(w)s}r' % NAMESPACES)) nbreak = Element(('{%(w)s}br' % NAMESPACES)) nbreak.attrib[('{%(w)s}type' % NAMESPACES)] = type r.append(nbreak) self.merge(parts, **repl)
def merge_templates(self, replacements, separator): '\n Duplicate template. Creates a copy of the template, does a merge, and separates them by a new paragraph, a new break or a new section break.\n separator must be :\n - page_break : Page Break. \n - column_break : Column Break. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - textWrapping_break : Line Break.\n - continuous_section : Continuous section break. Begins the section on the next paragraph.\n - evenPage_section : evenPage section break. section begins on the next even-numbered page, leaving the next odd page blank if necessary.\n - nextColumn_section : nextColumn section break. section begins on the following column on the page. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS\n - nextPage_section : nextPage section break. section begins on the following page.\n - oddPage_section : oddPage section break. section begins on the next odd-numbered page, leaving the next even page blank if necessary.\n ' valid_separators = {'page_break', 'column_break', 'textWrapping_break', 'continuous_section', 'evenPage_section', 'nextColumn_section', 'nextPage_section', 'oddPage_section'} if (not (separator in valid_separators)): raise ValueError('Invalid separator argument') (type, sepClass) = separator.split('_') for part in self.parts.values(): root = part.getroot() tag = root.tag if ((tag == ('{%(w)s}ftr' % NAMESPACES)) or (tag == ('{%(w)s}hdr' % NAMESPACES))): continue if (sepClass == 'section'): firstSection = root.find('w:body/w:p/w:pPr/w:sectPr', namespaces=NAMESPACES) if (firstSection == None): firstSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) nextPageSec = deepcopy(firstSection) for child in nextPageSec: if (child.tag == ('{%(w)s}type' % NAMESPACES)): nextPageSec.remove(child) newType = etree.SubElement(nextPageSec, ('{%(w)s}type' % NAMESPACES)) newType.set(('{%(w)s}val' % NAMESPACES), type) secRoot = firstSection.getparent() secRoot.replace(firstSection, nextPageSec) lastSection = root.find('w:body/w:sectPr', namespaces=NAMESPACES) mainSection = deepcopy(lastSection) lsecRoot = lastSection.getparent() lsecRoot.remove(lastSection) childrenList = root.findall('w:body/*', namespaces=NAMESPACES) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.clear() lr = len(replacements) lc = len(childrenList) parts = [] for (i, repl) in enumerate(replacements): for (j, n) in enumerate(childrenList): element = deepcopy(n) for child in root: if (child.tag == ('{%(w)s}body' % NAMESPACES)): child.append(element) parts.append(element) if ((j + 1) == lc): if ((i + 1) == lr): child.append(mainSection) parts.append(mainSection) elif (sepClass == 'section'): intSection = deepcopy(mainSection) p = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) pPr = etree.SubElement(p, ('{%(w)s}pPr' % NAMESPACES)) pPr.append(intSection) parts.append(p) elif (sepClass == 'break'): pb = etree.SubElement(child, ('{%(w)s}p' % NAMESPACES)) r = etree.SubElement(pb, ('{%(w)s}r' % NAMESPACES)) nbreak = Element(('{%(w)s}br' % NAMESPACES)) nbreak.attrib[('{%(w)s}type' % NAMESPACES)] = type r.append(nbreak) self.merge(parts, **repl)<|docstring|>Duplicate template. Creates a copy of the template, does a merge, and separates them by a new paragraph, a new break or a new section break. separator must be : - page_break : Page Break. - column_break : Column Break. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS - textWrapping_break : Line Break. - continuous_section : Continuous section break. Begins the section on the next paragraph. - evenPage_section : evenPage section break. section begins on the next even-numbered page, leaving the next odd page blank if necessary. - nextColumn_section : nextColumn section break. section begins on the following column on the page. ONLY HAVE EFFECT IF DOCUMENT HAVE COLUMNS - nextPage_section : nextPage section break. section begins on the following page. - oddPage_section : oddPage section break. section begins on the next odd-numbered page, leaving the next even page blank if necessary.<|endoftext|>
205d31190b7a479f688c7eab63a03508ab1c5b5d3a687e5198c55e5052fd059a
def merge_pages(self, replacements): '\n Deprecated method.\n ' warnings.warn('merge_pages has been deprecated in favour of merge_templates', category=DeprecationWarning, stacklevel=2) self.merge_templates(replacements, 'page_break')
Deprecated method.
mailmerge.py
merge_pages
danigoland/docx-mailmerge
2
python
def merge_pages(self, replacements): '\n \n ' warnings.warn('merge_pages has been deprecated in favour of merge_templates', category=DeprecationWarning, stacklevel=2) self.merge_templates(replacements, 'page_break')
def merge_pages(self, replacements): '\n \n ' warnings.warn('merge_pages has been deprecated in favour of merge_templates', category=DeprecationWarning, stacklevel=2) self.merge_templates(replacements, 'page_break')<|docstring|>Deprecated method.<|endoftext|>
f8b0a2b1405cff14dd1e1fb27c0d410cd92a88d483d01b6fbef39157c14cf78f
def task1(): '\n Pick three ids at random and plot their line graph\n ' size = 3 np.random.seed(1) plt.ioff() (train_vectors, train_labels) = get_vector_and_labels(TRAIN_LABELED_FILE) output_folder = (RESULT_DIR / 'task1') all_indices = np.arange(32) np.random.shuffle(all_indices) random_ids = all_indices[:size] random_window_indices = np.random.randint(5, size=size) random_indices = np.multiply(random_ids, random_window_indices) subset = np.take(train_vectors, random_indices, axis=0) label_subset = np.take(train_labels, random_indices, axis=0) for i in range(size): plt.figure((i + 1)) plt.title('User ID: {}'.format(int(label_subset[i][(- 1)]))) plt.plot(subset[i]) plt.savefig((output_folder / 'vis{}.png'.format((i + 1))))
Pick three ids at random and plot their line graph
omsignal/task.py
task1
eeishaan/ift6759-block1
0
python
def task1(): '\n \n ' size = 3 np.random.seed(1) plt.ioff() (train_vectors, train_labels) = get_vector_and_labels(TRAIN_LABELED_FILE) output_folder = (RESULT_DIR / 'task1') all_indices = np.arange(32) np.random.shuffle(all_indices) random_ids = all_indices[:size] random_window_indices = np.random.randint(5, size=size) random_indices = np.multiply(random_ids, random_window_indices) subset = np.take(train_vectors, random_indices, axis=0) label_subset = np.take(train_labels, random_indices, axis=0) for i in range(size): plt.figure((i + 1)) plt.title('User ID: {}'.format(int(label_subset[i][(- 1)]))) plt.plot(subset[i]) plt.savefig((output_folder / 'vis{}.png'.format((i + 1))))
def task1(): '\n \n ' size = 3 np.random.seed(1) plt.ioff() (train_vectors, train_labels) = get_vector_and_labels(TRAIN_LABELED_FILE) output_folder = (RESULT_DIR / 'task1') all_indices = np.arange(32) np.random.shuffle(all_indices) random_ids = all_indices[:size] random_window_indices = np.random.randint(5, size=size) random_indices = np.multiply(random_ids, random_window_indices) subset = np.take(train_vectors, random_indices, axis=0) label_subset = np.take(train_labels, random_indices, axis=0) for i in range(size): plt.figure((i + 1)) plt.title('User ID: {}'.format(int(label_subset[i][(- 1)]))) plt.plot(subset[i]) plt.savefig((output_folder / 'vis{}.png'.format((i + 1))))<|docstring|>Pick three ids at random and plot their line graph<|endoftext|>
c9af56c4d17c0b03335a97aec7b84aa4f9054331a7b283dfd5cc808c8581e68b
def batchify(self, obs): 'Convert batch observations `text` and `label` to\n rank 3 tensor `x` and rank 2 tensor `q`, `y`\n ' exs = [ex for ex in obs if ('text' in ex)] ids = [ex['id'] for ex in obs if ('text' in ex)] valid_inds = [i for (i, ex) in enumerate(obs) if ('text' in ex)] if (len(exs) == 0): return ((None,) * 5) ms = self.memory_size xs = [ex['text'].split('\n') for ex in exs] qs = [self.txt2vec(x.pop()) for x in xs] parsed_xs = [] for x in xs: x_mask = [('?' not in s) for s in x] x = [s for (s, b) in zip(x, x_mask) if b] if (('labels' in exs[0]) and self.use_random_noise): parsed_x = [] for s in x: parsed_x.append(s) if (random.random() < 0.1): parsed_x.append('') x = parsed_x parsed_xs.append(x[(- ms):]) xs = parsed_xs xs = [[self.txt2vec(sent) for sent in x] for x in xs] x_max_len = ms arr_max_len = max((max((len(arr) for arr in x)) for x in xs)) tensor = xp.zeros((len(xs), x_max_len, arr_max_len)).astype(xp.int32) for (i, x) in enumerate(xs): offset = (ms - len(x)) for (j, arr) in enumerate(x): tensor[(i, (offset + j))][:len(arr)] = arr x = chainer.Variable(tensor) if False: print('\n\nx:', [self.vec2txt(tensor[0][i]) for i in range(len(tensor[0]))]) arr_max_len = max([len(arr) for arr in qs]) tensor = xp.zeros((len(qs), arr_max_len)).astype(xp.int32) for (j, arr) in enumerate(qs): tensor[j][:len(arr)] = arr q = chainer.Variable(tensor) if False: print('q:', self.vec2txt(tensor[0])) y = None if ('labels' in exs[0]): ys = [self.txt2vec(' '.join(ex['labels']))[:2] for ex in exs] tensor = xp.zeros((len(ys), 2)).astype(xp.int32) for (j, arr) in enumerate(ys): tensor[j][:len(arr)] = arr y = chainer.Variable(tensor) if False: print('y:', self.vec2txt(tensor[0])) return (x, q, y, ids, valid_inds)
Convert batch observations `text` and `label` to rank 3 tensor `x` and rank 2 tensor `q`, `y`
chainer_memn2n/chainer_memn2n.py
batchify
ryonakamura/parlai_agents
47
python
def batchify(self, obs): 'Convert batch observations `text` and `label` to\n rank 3 tensor `x` and rank 2 tensor `q`, `y`\n ' exs = [ex for ex in obs if ('text' in ex)] ids = [ex['id'] for ex in obs if ('text' in ex)] valid_inds = [i for (i, ex) in enumerate(obs) if ('text' in ex)] if (len(exs) == 0): return ((None,) * 5) ms = self.memory_size xs = [ex['text'].split('\n') for ex in exs] qs = [self.txt2vec(x.pop()) for x in xs] parsed_xs = [] for x in xs: x_mask = [('?' not in s) for s in x] x = [s for (s, b) in zip(x, x_mask) if b] if (('labels' in exs[0]) and self.use_random_noise): parsed_x = [] for s in x: parsed_x.append(s) if (random.random() < 0.1): parsed_x.append() x = parsed_x parsed_xs.append(x[(- ms):]) xs = parsed_xs xs = [[self.txt2vec(sent) for sent in x] for x in xs] x_max_len = ms arr_max_len = max((max((len(arr) for arr in x)) for x in xs)) tensor = xp.zeros((len(xs), x_max_len, arr_max_len)).astype(xp.int32) for (i, x) in enumerate(xs): offset = (ms - len(x)) for (j, arr) in enumerate(x): tensor[(i, (offset + j))][:len(arr)] = arr x = chainer.Variable(tensor) if False: print('\n\nx:', [self.vec2txt(tensor[0][i]) for i in range(len(tensor[0]))]) arr_max_len = max([len(arr) for arr in qs]) tensor = xp.zeros((len(qs), arr_max_len)).astype(xp.int32) for (j, arr) in enumerate(qs): tensor[j][:len(arr)] = arr q = chainer.Variable(tensor) if False: print('q:', self.vec2txt(tensor[0])) y = None if ('labels' in exs[0]): ys = [self.txt2vec(' '.join(ex['labels']))[:2] for ex in exs] tensor = xp.zeros((len(ys), 2)).astype(xp.int32) for (j, arr) in enumerate(ys): tensor[j][:len(arr)] = arr y = chainer.Variable(tensor) if False: print('y:', self.vec2txt(tensor[0])) return (x, q, y, ids, valid_inds)
def batchify(self, obs): 'Convert batch observations `text` and `label` to\n rank 3 tensor `x` and rank 2 tensor `q`, `y`\n ' exs = [ex for ex in obs if ('text' in ex)] ids = [ex['id'] for ex in obs if ('text' in ex)] valid_inds = [i for (i, ex) in enumerate(obs) if ('text' in ex)] if (len(exs) == 0): return ((None,) * 5) ms = self.memory_size xs = [ex['text'].split('\n') for ex in exs] qs = [self.txt2vec(x.pop()) for x in xs] parsed_xs = [] for x in xs: x_mask = [('?' not in s) for s in x] x = [s for (s, b) in zip(x, x_mask) if b] if (('labels' in exs[0]) and self.use_random_noise): parsed_x = [] for s in x: parsed_x.append(s) if (random.random() < 0.1): parsed_x.append() x = parsed_x parsed_xs.append(x[(- ms):]) xs = parsed_xs xs = [[self.txt2vec(sent) for sent in x] for x in xs] x_max_len = ms arr_max_len = max((max((len(arr) for arr in x)) for x in xs)) tensor = xp.zeros((len(xs), x_max_len, arr_max_len)).astype(xp.int32) for (i, x) in enumerate(xs): offset = (ms - len(x)) for (j, arr) in enumerate(x): tensor[(i, (offset + j))][:len(arr)] = arr x = chainer.Variable(tensor) if False: print('\n\nx:', [self.vec2txt(tensor[0][i]) for i in range(len(tensor[0]))]) arr_max_len = max([len(arr) for arr in qs]) tensor = xp.zeros((len(qs), arr_max_len)).astype(xp.int32) for (j, arr) in enumerate(qs): tensor[j][:len(arr)] = arr q = chainer.Variable(tensor) if False: print('q:', self.vec2txt(tensor[0])) y = None if ('labels' in exs[0]): ys = [self.txt2vec(' '.join(ex['labels']))[:2] for ex in exs] tensor = xp.zeros((len(ys), 2)).astype(xp.int32) for (j, arr) in enumerate(ys): tensor[j][:len(arr)] = arr y = chainer.Variable(tensor) if False: print('y:', self.vec2txt(tensor[0])) return (x, q, y, ids, valid_inds)<|docstring|>Convert batch observations `text` and `label` to rank 3 tensor `x` and rank 2 tensor `q`, `y`<|endoftext|>
01ebe870e8dd9e345a4e67d5b3e0641e41265d3bd39f1b78937b6d45bd8cf4a5
def sparsemax_loss(X, target, k=None): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxLossFunction.apply(X, target, k)
sparsemax loss: sparse alternative to cross-entropy. Computed using a partial sorting strategy. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.
ludwig/utils/entmax/losses.py
sparsemax_loss
dantreiman/ludwig
7,739
python
def sparsemax_loss(X, target, k=None): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxLossFunction.apply(X, target, k)
def sparsemax_loss(X, target, k=None): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxLossFunction.apply(X, target, k)<|docstring|>sparsemax loss: sparse alternative to cross-entropy. Computed using a partial sorting strategy. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.<|endoftext|>
425f2f43714ed0f9d84e6777ba489f1f80afd2ce416ac39f7892544a517dfdc4
def sparsemax_bisect_loss(X, target, n_iter=50): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using bisection.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxBisectLossFunction.apply(X, target, n_iter)
sparsemax loss: sparse alternative to cross-entropy. Computed using bisection. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.
ludwig/utils/entmax/losses.py
sparsemax_bisect_loss
dantreiman/ludwig
7,739
python
def sparsemax_bisect_loss(X, target, n_iter=50): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using bisection.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxBisectLossFunction.apply(X, target, n_iter)
def sparsemax_bisect_loss(X, target, n_iter=50): 'sparsemax loss: sparse alternative to cross-entropy.\n\n Computed using bisection.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return SparsemaxBisectLossFunction.apply(X, target, n_iter)<|docstring|>sparsemax loss: sparse alternative to cross-entropy. Computed using bisection. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.<|endoftext|>
e78623aa5dd84b85e212d1f9bcabefc1b9d9753ebb8dedeb64c10244d747843d
def entmax15_loss(X, target, k=None): '1.5-entmax loss: sparse alternative to cross-entropy\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return Entmax15LossFunction.apply(X, target, k)
1.5-entmax loss: sparse alternative to cross-entropy Computed using a partial sorting strategy. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.
ludwig/utils/entmax/losses.py
entmax15_loss
dantreiman/ludwig
7,739
python
def entmax15_loss(X, target, k=None): '1.5-entmax loss: sparse alternative to cross-entropy\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return Entmax15LossFunction.apply(X, target, k)
def entmax15_loss(X, target, k=None): '1.5-entmax loss: sparse alternative to cross-entropy\n\n Computed using a partial sorting strategy.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return Entmax15LossFunction.apply(X, target, k)<|docstring|>1.5-entmax loss: sparse alternative to cross-entropy Computed using a partial sorting strategy. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.<|endoftext|>
73a0f8f32caebe2b3c29b77e0a5398c83770ff446309ac2b562822d59906e0b5
def entmax_bisect_loss(X, target, alpha=1.5, n_iter=50): 'alpha-entmax loss: sparse alternative to cross-entropy.\n\n Computed using bisection, supporting arbitrary alpha > 1.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n alpha : float or torch.Tensor\n Tensor of alpha parameters (> 1) to use for each row of X. If scalar\n or python float, the same value is used for all rows. A value of\n alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover\n softmax. For numeric reasons, this algorithm does not work with `alpha=1`:\n if you want softmax, we recommend `torch.nn.softmax`\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return EntmaxBisectLossFunction.apply(X, target, alpha, n_iter)
alpha-entmax loss: sparse alternative to cross-entropy. Computed using bisection, supporting arbitrary alpha > 1. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use for each row of X. If scalar or python float, the same value is used for all rows. A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you want softmax, we recommend `torch.nn.softmax` n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.
ludwig/utils/entmax/losses.py
entmax_bisect_loss
dantreiman/ludwig
7,739
python
def entmax_bisect_loss(X, target, alpha=1.5, n_iter=50): 'alpha-entmax loss: sparse alternative to cross-entropy.\n\n Computed using bisection, supporting arbitrary alpha > 1.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n alpha : float or torch.Tensor\n Tensor of alpha parameters (> 1) to use for each row of X. If scalar\n or python float, the same value is used for all rows. A value of\n alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover\n softmax. For numeric reasons, this algorithm does not work with `alpha=1`:\n if you want softmax, we recommend `torch.nn.softmax`\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return EntmaxBisectLossFunction.apply(X, target, alpha, n_iter)
def entmax_bisect_loss(X, target, alpha=1.5, n_iter=50): 'alpha-entmax loss: sparse alternative to cross-entropy.\n\n Computed using bisection, supporting arbitrary alpha > 1.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_classes)\n The input 2D tensor of predicted scores\n\n target : torch.LongTensor, shape=(n_samples,)\n The ground truth labels, 0 <= target < n_classes.\n\n alpha : float or torch.Tensor\n Tensor of alpha parameters (> 1) to use for each row of X. If scalar\n or python float, the same value is used for all rows. A value of\n alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover\n softmax. For numeric reasons, this algorithm does not work with `alpha=1`:\n if you want softmax, we recommend `torch.nn.softmax`\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n Returns\n -------\n losses, torch.Tensor, shape=(n_samples,)\n The loss incurred at each sample.\n ' return EntmaxBisectLossFunction.apply(X, target, alpha, n_iter)<|docstring|>alpha-entmax loss: sparse alternative to cross-entropy. Computed using bisection, supporting arbitrary alpha > 1. Parameters ---------- X : torch.Tensor, shape=(n_samples, n_classes) The input 2D tensor of predicted scores target : torch.LongTensor, shape=(n_samples,) The ground truth labels, 0 <= target < n_classes. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use for each row of X. If scalar or python float, the same value is used for all rows. A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you want softmax, we recommend `torch.nn.softmax` n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. Returns ------- losses, torch.Tensor, shape=(n_samples,) The loss incurred at each sample.<|endoftext|>
b5edc0d7484a99a8043c001581d43db0adb57c08a47e562124c81e2a9700cc5a
@classmethod def forward(cls, ctx, X, target, alpha, proj_args): 'X (FloatTensor): n x num_classes target (LongTensor): n, the indices of the target classes.' assert (X.shape[0] == target.shape[0]) p_star = cls.project(X, alpha, **proj_args) loss = cls.omega(p_star, alpha) p_star.scatter_add_(1, target.unsqueeze(1), torch.full_like(p_star, (- 1))) loss += torch.einsum('ij,ij->i', p_star, X) ctx.save_for_backward(p_star) return loss
X (FloatTensor): n x num_classes target (LongTensor): n, the indices of the target classes.
ludwig/utils/entmax/losses.py
forward
dantreiman/ludwig
7,739
python
@classmethod def forward(cls, ctx, X, target, alpha, proj_args): assert (X.shape[0] == target.shape[0]) p_star = cls.project(X, alpha, **proj_args) loss = cls.omega(p_star, alpha) p_star.scatter_add_(1, target.unsqueeze(1), torch.full_like(p_star, (- 1))) loss += torch.einsum('ij,ij->i', p_star, X) ctx.save_for_backward(p_star) return loss
@classmethod def forward(cls, ctx, X, target, alpha, proj_args): assert (X.shape[0] == target.shape[0]) p_star = cls.project(X, alpha, **proj_args) loss = cls.omega(p_star, alpha) p_star.scatter_add_(1, target.unsqueeze(1), torch.full_like(p_star, (- 1))) loss += torch.einsum('ij,ij->i', p_star, X) ctx.save_for_backward(p_star) return loss<|docstring|>X (FloatTensor): n x num_classes target (LongTensor): n, the indices of the target classes.<|endoftext|>
c99a300b7a0140e53adf688fe27b382e41e6eeeb1e1f351b137453854688ffad
def data_filename(modname, filename): 'Given the module name, and filename, finds the path to the file from the python sys.modules\n dictionary. Used to access the header dictionary file' import os, sys filename = os.path.join(os.path.dirname(sys.modules[modname].__file__), filename) return filename
Given the module name, and filename, finds the path to the file from the python sys.modules dictionary. Used to access the header dictionary file
src/dwell/rad/__init__.py
data_filename
eelsirhc/pydwell
0
python
def data_filename(modname, filename): 'Given the module name, and filename, finds the path to the file from the python sys.modules\n dictionary. Used to access the header dictionary file' import os, sys filename = os.path.join(os.path.dirname(sys.modules[modname].__file__), filename) return filename
def data_filename(modname, filename): 'Given the module name, and filename, finds the path to the file from the python sys.modules\n dictionary. Used to access the header dictionary file' import os, sys filename = os.path.join(os.path.dirname(sys.modules[modname].__file__), filename) return filename<|docstring|>Given the module name, and filename, finds the path to the file from the python sys.modules dictionary. Used to access the header dictionary file<|endoftext|>
261cd7fe86a73aa905244e2c2b6b8d4b79f46f63fed739eaa3e70af0b0044cbe
def has_install(self, pkg_name, pkg_version=None, binary=False, with_dep=False): "\n pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)\n return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)\n " return True
pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version) return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)
src/pm_proxy/npmjs.py
has_install
Yanivmd/maloss
1
python
def has_install(self, pkg_name, pkg_version=None, binary=False, with_dep=False): "\n pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)\n return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)\n " return True
def has_install(self, pkg_name, pkg_version=None, binary=False, with_dep=False): "\n pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)\n return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)\n " return True<|docstring|>pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version) return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)<|endoftext|>
c1731b2f44aa2498e07b9d8dd9893f2f9c1bb3d305c1f724c0a905d9ea02ddbb
def filesys_decode(path): '\n Ensure that the given path is decoded,\n NONE when no expected encoding works\n ' if isinstance(path, six.text_type): return path fs_enc = (sys.getfilesystemencoding() or 'utf-8') candidates = (fs_enc, 'utf-8') for enc in candidates: try: return path.decode(enc) except UnicodeDecodeError: continue
Ensure that the given path is decoded, NONE when no expected encoding works
venv/Lib/site-packages/setuptools/unicode_utils.py
filesys_decode
suraj038/TCS_Hospital_Management_System_Case_Studies
38,667
python
def filesys_decode(path): '\n Ensure that the given path is decoded,\n NONE when no expected encoding works\n ' if isinstance(path, six.text_type): return path fs_enc = (sys.getfilesystemencoding() or 'utf-8') candidates = (fs_enc, 'utf-8') for enc in candidates: try: return path.decode(enc) except UnicodeDecodeError: continue
def filesys_decode(path): '\n Ensure that the given path is decoded,\n NONE when no expected encoding works\n ' if isinstance(path, six.text_type): return path fs_enc = (sys.getfilesystemencoding() or 'utf-8') candidates = (fs_enc, 'utf-8') for enc in candidates: try: return path.decode(enc) except UnicodeDecodeError: continue<|docstring|>Ensure that the given path is decoded, NONE when no expected encoding works<|endoftext|>
5019ca3065935e09a314fdc4ae8ecea5a26f6a7272d44b3a0f477bf4251a0d90
def try_encode(string, enc): 'turn unicode encoding into a functional routine' try: return string.encode(enc) except UnicodeEncodeError: return None
turn unicode encoding into a functional routine
venv/Lib/site-packages/setuptools/unicode_utils.py
try_encode
suraj038/TCS_Hospital_Management_System_Case_Studies
38,667
python
def try_encode(string, enc): try: return string.encode(enc) except UnicodeEncodeError: return None
def try_encode(string, enc): try: return string.encode(enc) except UnicodeEncodeError: return None<|docstring|>turn unicode encoding into a functional routine<|endoftext|>
9c5e482f90ca15a5af947c3e07a96bb55d24cc664c1f8dc205b6e39c21b39b0d
def sum_earnings(finance_data): 'Validate and process input.' try: required_nums = (finance_data.count(',') + 1) history = list(map(int, finance_data.split(','))) if (len(history) != required_nums): raise ValueError balance = 0 for i in history: balance += i if (balance < 0): balance = 0 return balance except ValueError: return 0
Validate and process input.
python/beginner/sum-earnings_Kushagra-0801.py
sum_earnings
fredbaa/hacktoberithms
16
python
def sum_earnings(finance_data): try: required_nums = (finance_data.count(',') + 1) history = list(map(int, finance_data.split(','))) if (len(history) != required_nums): raise ValueError balance = 0 for i in history: balance += i if (balance < 0): balance = 0 return balance except ValueError: return 0
def sum_earnings(finance_data): try: required_nums = (finance_data.count(',') + 1) history = list(map(int, finance_data.split(','))) if (len(history) != required_nums): raise ValueError balance = 0 for i in history: balance += i if (balance < 0): balance = 0 return balance except ValueError: return 0<|docstring|>Validate and process input.<|endoftext|>
06919361e952e6e104d4da031f5922cae267f92cf20e9be41f2bee10ad303317
def DepotToolsPylint(input_api, output_api): 'Gather all the pylint logic into one place to make it self-contained.' files_to_check = ['^[^/]*\\.py$', '^testing_support/[^/]*\\.py$', '^tests/[^/]*\\.py$', '^recipe_modules/.*\\.py$'] files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) if os.path.exists('.gitignore'): with open('.gitignore') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) if os.path.exists('.git/info/exclude'): with open('.git/info/exclude') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) disabled_warnings = ['R0401', 'W0613'] return input_api.canned_checks.GetPylint(input_api, output_api, files_to_check=files_to_check, files_to_skip=files_to_skip, disabled_warnings=disabled_warnings)
Gather all the pylint logic into one place to make it self-contained.
third_party/depot_tools/PRESUBMIT.py
DepotToolsPylint
gengleilei/wee8
3
python
def DepotToolsPylint(input_api, output_api): files_to_check = ['^[^/]*\\.py$', '^testing_support/[^/]*\\.py$', '^tests/[^/]*\\.py$', '^recipe_modules/.*\\.py$'] files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) if os.path.exists('.gitignore'): with open('.gitignore') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) if os.path.exists('.git/info/exclude'): with open('.git/info/exclude') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) disabled_warnings = ['R0401', 'W0613'] return input_api.canned_checks.GetPylint(input_api, output_api, files_to_check=files_to_check, files_to_skip=files_to_skip, disabled_warnings=disabled_warnings)
def DepotToolsPylint(input_api, output_api): files_to_check = ['^[^/]*\\.py$', '^testing_support/[^/]*\\.py$', '^tests/[^/]*\\.py$', '^recipe_modules/.*\\.py$'] files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) if os.path.exists('.gitignore'): with open('.gitignore') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) if os.path.exists('.git/info/exclude'): with open('.git/info/exclude') as fh: lines = [l.strip() for l in fh.readlines()] files_to_skip.extend([fnmatch.translate(l) for l in lines if (l and (not l.startswith('#')))]) disabled_warnings = ['R0401', 'W0613'] return input_api.canned_checks.GetPylint(input_api, output_api, files_to_check=files_to_check, files_to_skip=files_to_skip, disabled_warnings=disabled_warnings)<|docstring|>Gather all the pylint logic into one place to make it self-contained.<|endoftext|>
a9c22c891302665c5da5b63bf4a614cb68480eb8bda1cd112ca912a87dd0ec2b
def find_demand_list_match_str(title): '模糊查询需求列表\n\n GET /api/project/demand/<str:title>\n ' return jsonify({'msg': 'ok', 'data': list(demand.find_demand_list_match_str(title))})
模糊查询需求列表 GET /api/project/demand/<str:title>
api/controller/demand.py
find_demand_list_match_str
preservance717/pms
27
python
def find_demand_list_match_str(title): '模糊查询需求列表\n\n GET /api/project/demand/<str:title>\n ' return jsonify({'msg': 'ok', 'data': list(demand.find_demand_list_match_str(title))})
def find_demand_list_match_str(title): '模糊查询需求列表\n\n GET /api/project/demand/<str:title>\n ' return jsonify({'msg': 'ok', 'data': list(demand.find_demand_list_match_str(title))})<|docstring|>模糊查询需求列表 GET /api/project/demand/<str:title><|endoftext|>
ee7d6d6da22905db2837159ef0308aad77c08d3df044a0f0ae3dcab1c3e8526a
@fresh_jwt_required def demand_search(): '模糊查询项目需求\n GET /api/demand?title=aaa&projectId=1\n ' return {'data': list(Demand.find().where((Demand.projectId == request.args.get('projectId')), (Demand.title % (('%' + request.args.get('title')) + '%'))))}
模糊查询项目需求 GET /api/demand?title=aaa&projectId=1
api/controller/demand.py
demand_search
preservance717/pms
27
python
@fresh_jwt_required def demand_search(): '模糊查询项目需求\n GET /api/demand?title=aaa&projectId=1\n ' return {'data': list(Demand.find().where((Demand.projectId == request.args.get('projectId')), (Demand.title % (('%' + request.args.get('title')) + '%'))))}
@fresh_jwt_required def demand_search(): '模糊查询项目需求\n GET /api/demand?title=aaa&projectId=1\n ' return {'data': list(Demand.find().where((Demand.projectId == request.args.get('projectId')), (Demand.title % (('%' + request.args.get('title')) + '%'))))}<|docstring|>模糊查询项目需求 GET /api/demand?title=aaa&projectId=1<|endoftext|>
a99d530586e0e82823edfe03c659ef4ca772b36bbb9ecd407ff4f9f06f23193a
@fresh_jwt_required def demand_add(): '添加需求\n\n POST /api/project/demand\n ' if (not request.is_json): return (jsonify({'msg': 'Missing JSON in request'}), 400) schema = DemandSchema() (data, errors) = schema.load(request.json) if errors: return (jsonify({'msg': errors}), 400) try: data = demand.create_demand(request.json) if (data[1] == False): return (jsonify({'msg': '需求名称重复'}), 200) elif (data[1] == True): return (jsonify({'msg': 'ok', 'data': model_to_dict(data[0])}), 201) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})
添加需求 POST /api/project/demand
api/controller/demand.py
demand_add
preservance717/pms
27
python
@fresh_jwt_required def demand_add(): '添加需求\n\n POST /api/project/demand\n ' if (not request.is_json): return (jsonify({'msg': 'Missing JSON in request'}), 400) schema = DemandSchema() (data, errors) = schema.load(request.json) if errors: return (jsonify({'msg': errors}), 400) try: data = demand.create_demand(request.json) if (data[1] == False): return (jsonify({'msg': '需求名称重复'}), 200) elif (data[1] == True): return (jsonify({'msg': 'ok', 'data': model_to_dict(data[0])}), 201) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})
@fresh_jwt_required def demand_add(): '添加需求\n\n POST /api/project/demand\n ' if (not request.is_json): return (jsonify({'msg': 'Missing JSON in request'}), 400) schema = DemandSchema() (data, errors) = schema.load(request.json) if errors: return (jsonify({'msg': errors}), 400) try: data = demand.create_demand(request.json) if (data[1] == False): return (jsonify({'msg': '需求名称重复'}), 200) elif (data[1] == True): return (jsonify({'msg': 'ok', 'data': model_to_dict(data[0])}), 201) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})<|docstring|>添加需求 POST /api/project/demand<|endoftext|>
ad943b7076bb39476fbc7857821573bcf2ca38d796c9c68785c2384935d8ce41
@fresh_jwt_required def demand_info(demand_id): '获取需求详情\n\n GET /api/project/demand/<int:demand_id>\n ' return jsonify({'msg': 'ok', 'data': demand.demand_detail(demand_id)})
获取需求详情 GET /api/project/demand/<int:demand_id>
api/controller/demand.py
demand_info
preservance717/pms
27
python
@fresh_jwt_required def demand_info(demand_id): '获取需求详情\n\n GET /api/project/demand/<int:demand_id>\n ' return jsonify({'msg': 'ok', 'data': demand.demand_detail(demand_id)})
@fresh_jwt_required def demand_info(demand_id): '获取需求详情\n\n GET /api/project/demand/<int:demand_id>\n ' return jsonify({'msg': 'ok', 'data': demand.demand_detail(demand_id)})<|docstring|>获取需求详情 GET /api/project/demand/<int:demand_id><|endoftext|>
82721066fecb7a1121b93ab010ed9fb5a7c2b88efea752dbfc51715389916c94
def demand_update(): '更新需求信息\n\n PUT /api/project/demand\n ' if (not request.json): abort(400) if (demand.find_demand_title_by_id(request.json['id']) == request.json['title']): pass elif demand.find_one_demand_by_title(request.json['title']): return jsonify({'msg': '该需求已存在'}) try: data = demand.update_demands(request.json) return jsonify({'msg': 'ok', 'data': model_to_dict(data)}) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})
更新需求信息 PUT /api/project/demand
api/controller/demand.py
demand_update
preservance717/pms
27
python
def demand_update(): '更新需求信息\n\n PUT /api/project/demand\n ' if (not request.json): abort(400) if (demand.find_demand_title_by_id(request.json['id']) == request.json['title']): pass elif demand.find_one_demand_by_title(request.json['title']): return jsonify({'msg': '该需求已存在'}) try: data = demand.update_demands(request.json) return jsonify({'msg': 'ok', 'data': model_to_dict(data)}) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})
def demand_update(): '更新需求信息\n\n PUT /api/project/demand\n ' if (not request.json): abort(400) if (demand.find_demand_title_by_id(request.json['id']) == request.json['title']): pass elif demand.find_one_demand_by_title(request.json['title']): return jsonify({'msg': '该需求已存在'}) try: data = demand.update_demands(request.json) return jsonify({'msg': 'ok', 'data': model_to_dict(data)}) except PermissionDenied: return jsonify({'msg': 'PermissionDenied'})<|docstring|>更新需求信息 PUT /api/project/demand<|endoftext|>