<?xml version="1.0"?> <net name="detokenizer" version="11"> <layers> <layer id="0" name="Parameter_312413" type="Parameter" version="opset1"> <data shape="?,?" element_type="i64" /> <output> <port id="0" precision="I64" names="Parameter_312413"> <dim>-1</dim> <dim>-1</dim> </port> </output> </layer> <layer id="1" name="Constant_312389" type="Const" version="opset1"> <data element_type="u8" shape="4241003" offset="0" size="4241003" /> <output> <port id="0" precision="U8"> <dim>4241003</dim> </port> </output> </layer> <layer id="2" name="Convert_312423" type="Convert" version="opset1"> <data destination_type="i32" /> <input> <port id="0" precision="I64"> <dim>-1</dim> <dim>-1</dim> </port> </input> <output> <port id="1" precision="I32"> <dim>-1</dim> <dim>-1</dim> </port> </output> </layer> <layer id="3" name="SentencepieceDetokenizer_312414" type="SentencepieceDetokenizer" version="extension"> <input> <port id="0" precision="U8"> <dim>4241003</dim> </port> <port id="1" precision="I32"> <dim>-1</dim> <dim>-1</dim> </port> </input> <output> <port id="2" precision="I32"> <dim>-1</dim> </port> <port id="3" precision="I32"> <dim>-1</dim> </port> <port id="4" precision="U8"> <dim>-1</dim> </port> </output> </layer> <layer id="4" name="StringTensorPack_312415" type="StringTensorPack" version="extension"> <data mode="begins_ends" /> <input> <port id="0" precision="I32"> <dim>-1</dim> </port> <port id="1" precision="I32"> <dim>-1</dim> </port> <port id="2" precision="U8"> <dim>-1</dim> </port> </input> <output> <port id="3" precision="STRING" names="string_output"> <dim>-1</dim> </port> </output> </layer> <layer id="5" name="Result_312416" type="Result" version="opset1"> <input> <port id="0" precision="STRING"> <dim>-1</dim> </port> </input> </layer> </layers> <edges> <edge from-layer="0" from-port="0" to-layer="2" to-port="0" /> <edge from-layer="1" from-port="0" to-layer="3" to-port="0" /> <edge from-layer="2" from-port="1" to-layer="3" to-port="1" /> <edge from-layer="3" from-port="2" to-layer="4" to-port="0" /> <edge from-layer="3" from-port="3" to-layer="4" to-port="1" /> <edge from-layer="3" from-port="4" to-layer="4" to-port="2" /> <edge from-layer="4" from-port="3" to-layer="5" to-port="0" /> </edges> <rt_info> <bos_token_id value="2" /> <chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + ' ' + message['content'] | trim + '<end_of_turn> ' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model '}}{% endif %}" /> <eos_token_id value="1" /> <original_tokenizer_class value="<class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" /> <pad_token_id value="0" /> </rt_info> </net>