corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-660701
2409.14858
LLMs' ways of seeing User Personas
<|reference_start|>LLMs' ways of seeing User Personas: Large Language Models (LLMs), which have gained significant traction in recent years, also function as big structured repositories of data. User personas are a significant and widely utilized method in HCI. This study aims to investigate how LLMs, in their role as data repositories, interpret user personas. Our focus is specifically on personas within the Indian context, seeking to understand how LLMs would interpret such culturally specific personas. To achieve this, we conduct both quantitative and qualitative analyses. This multifaceted approach allows us a primary understanding of the interpretative capabilities of LLMs concerning personas within the Indian context.<|reference_end|>
arxiv
@article{panda2024llms', title={LLMs' ways of seeing User Personas}, author={Swaroop Panda}, journal={arXiv preprint arXiv:2409.14858}, year={2024}, archivePrefix={arXiv}, eprint={2409.14858}, primaryClass={cs.HC} }
panda2024llms'
arxiv-660702
2409.14859
MentalImager: Exploring Generative Images for Assisting Support-Seekers' Self-Disclosure in Online Mental Health Communities
<|reference_start|>MentalImager: Exploring Generative Images for Assisting Support-Seekers' Self-Disclosure in Online Mental Health Communities: Support-seekers' self-disclosure of their suffering experiences, thoughts, and feelings in the post can help them get needed peer support in online mental health communities (OMHCs). However, such mental health self-disclosure could be challenging. Images can facilitate the manifestation of relevant experiences and feelings in the text; yet, relevant images are not always available. In this paper, we present a technical prototype named MentalImager and validate in a human evaluation study that it can generate topical- and emotional-relevant images based on the seekers' drafted posts or specified keywords. Two user studies demonstrate that MentalImager not only improves seekers' satisfaction with their self-disclosure in their posts but also invokes support-providers' empathy for the seekers and willingness to offer help. Such improvements are credited to the generated images, which help seekers express their emotions and inspire them to add more details about their experiences and feelings. We report concerns on MentalImager and discuss insights for supporting self-disclosure in OMHCs.<|reference_end|>
arxiv
@article{zhang2024mentalimager:, title={MentalImager: Exploring Generative Images for Assisting Support-Seekers' Self-Disclosure in Online Mental Health Communities}, author={Han Zhang, Jiaqi Zhang, Yuxiang Zhou, Ryan Louie, Taewook Kim, Qingyu Guo, Shuailin Li, Zhenhui Peng}, journal={arXiv preprint arXiv:2409.14859}, year={2024}, archivePrefix={arXiv}, eprint={2409.14859}, primaryClass={cs.HC} }
zhang2024mentalimager:
arxiv-660703
2409.14863
Architectural Challenges of Nomadic Networks in 6G
<|reference_start|>Architectural Challenges of Nomadic Networks in 6G: This paper examines architectural challenges and opportunities arising from Nomadic Networks in the context of emerging 6G research. Nomadic networks are proposed as a solution to the limitations of stationary communication networks, providing enhanced connectivity for dynamic and mobile environments, such as large outdoor events, emergency situations and mobile industrial applications. The key requirements for nomadic networks are outlined, including functional split within the Radio Access Network and robust backhauling solutions. It also addresses the complexity of managing network components, ensuring interoperability with existing systems and maintaining stakeholder trust. A comprehensive architectural framework for Nomadic Networks in 6G is proposed. Different deployment scenarios for Nomadic Networks are investigated, including spawned, steered, and wandering Radio Access Networks as well as integrated, migrated and donated Core Networks. By introducing Nomadic-Network-as-a-Service and a related orchestration framework, the potential for flexible and scalable network management is emphasized. By addressing the architectural challenges, the paper provides a path for the successful implementation of Nomadic Networks towards more adaptable and flexible 6G networks that can meet the evolving needs of multiple sectors.<|reference_end|>
arxiv
@article{lindenschmitt2024architectural, title={Architectural Challenges of Nomadic Networks in 6G}, author={Daniel Lindenschmitt, Benedikt Veith, Khurshid Alam, Ainur Daurembekova, Michael Gundall, Mohammad Asif Habibi, Bin Han, Dennis Krummacker, Philipp Rosemann and Hans D. Schotten}, journal={arXiv preprint arXiv:2409.14863}, year={2024}, archivePrefix={arXiv}, eprint={2409.14863}, primaryClass={cs.NI} }
lindenschmitt2024architectural
arxiv-660704
2409.14866
Effective and Evasive Fuzz Testing-Driven Jailbreaking Attacks against LLMs
<|reference_start|>Effective and Evasive Fuzz Testing-Driven Jailbreaking Attacks against LLMs: Large Language Models (LLMs) have excelled in various tasks but are still vulnerable to jailbreaking attacks, where attackers create jailbreak prompts to mislead the model to produce harmful or offensive content. Current jailbreak methods either rely heavily on manually crafted templates, which pose challenges in scalability and adaptability, or struggle to generate semantically coherent prompts, making them easy to detect. Additionally, most existing approaches involve lengthy prompts, leading to higher query costs.In this paper, to remedy these challenges, we introduce a novel jailbreaking attack framework, which is an automated, black-box jailbreaking attack framework that adapts the black-box fuzz testing approach with a series of customized designs. Instead of relying on manually crafted templates, our method starts with an empty seed pool, removing the need to search for any related jailbreaking templates. We also develop three novel question-dependent mutation strategies using an LLM helper to generate prompts that maintain semantic coherence while significantly reducing their length. Additionally, we implement a two-level judge module to accurately detect genuine successful jailbreaks. We evaluated our method on 7 representative LLMs and compared it with 5 state-of-the-art jailbreaking attack strategies. For proprietary LLM APIs, such as GPT-3.5 turbo, GPT-4, and Gemini-Pro, our method achieves attack success rates of over 90%,80% and 74%, respectively, exceeding existing baselines by more than 60%. Additionally, our method can maintain high semantic coherence while significantly reducing the length of jailbreak prompts. When targeting GPT-4, our method can achieve over 78% attack success rate even with 100 tokens. Moreover, our method demonstrates transferability and is robust to state-of-the-art defenses. We will open-source our codes upon publication.<|reference_end|>
arxiv
@article{gong2024effective, title={Effective and Evasive Fuzz Testing-Driven Jailbreaking Attacks against LLMs}, author={Xueluan Gong, Mingzhe Li, Yilin Zhang, Fengyuan Ran, Chen Chen, Yanjiao Chen, Qian Wang, Kwok-Yan Lam}, journal={arXiv preprint arXiv:2409.14866}, year={2024}, archivePrefix={arXiv}, eprint={2409.14866}, primaryClass={cs.CR cs.AI} }
gong2024effective
arxiv-660705
2409.14867
A novel agent with formal goal-reaching guarantees: an experimental study with a mobile robot
<|reference_start|>A novel agent with formal goal-reaching guarantees: an experimental study with a mobile robot: Reinforcement Learning (RL) has been shown to be effective and convenient for a number of tasks in robotics. However, it requires the exploration of a sufficiently large number of state-action pairs, many of which may be unsafe or unimportant. For instance, online model-free learning can be hazardous and inefficient in the absence of guarantees that a certain set of desired states will be reached during an episode. An increasingly common approach to address safety involves the addition of a shielding system that constrains the RL actions to a safe set of actions. In turn, a difficulty for such frameworks is how to effectively couple RL with the shielding system to make sure the exploration is not excessively restricted. This work presents a novel safe model-free RL agent called Critic As Lyapunov Function (CALF) and showcases how CALF can be used to improve upon control baselines in robotics in an efficient and convenient fashion while ensuring guarantees of stable goal reaching. The latter is a crucial part of safety, as seen generally. With CALF all state-action pairs remain explorable and yet reaching of desired goal states is formally guaranteed. Formal analysis is provided that shows the goal stabilization-ensuring properties of CALF and a set of real-world and numerical experiments with a non-holonomic wheeled mobile robot (WMR) TurtleBot3 Burger confirmed the superiority of CALF over such a well-established RL agent as proximal policy optimization (PPO), and a modified version of SARSA in a few-episode setting in terms of attained total cost.<|reference_end|>
arxiv
@article{yaremenko2024a, title={A novel agent with formal goal-reaching guarantees: an experimental study with a mobile robot}, author={Grigory Yaremenko, Dmitrii Dobriborsci, Roman Zashchitin, Ruben Contreras Maestre, Ngoc Quoc Huy Hoang, Pavel Osinenko}, journal={arXiv preprint arXiv:2409.14867}, year={2024}, archivePrefix={arXiv}, eprint={2409.14867}, primaryClass={cs.RO cs.AI math.DS math.OC} }
yaremenko2024a
arxiv-660706
2409.14870
Testing Dependency of Weighted Random Graphs
<|reference_start|>Testing Dependency of Weighted Random Graphs: In this paper, we study the task of detecting the edge dependency between two weighted random graphs. We formulate this task as a simple hypothesis testing problem, where under the null hypothesis, the two observed graphs are statistically independent, whereas under the alternative, the edges of one graph are dependent on the edges of a uniformly and randomly vertex-permuted version of the other graph. For general edge-weight distributions, we establish thresholds at which optimal testing becomes information-theoretically possible or impossible, as a function of the total number of nodes in the observed graphs and the generative distributions of the weights. Finally, we identify a statistical-computational gap, and present evidence suggesting that this gap is inherent using the framework of low-degree polynomials.<|reference_end|>
arxiv
@article{oren2024testing, title={Testing Dependency of Weighted Random Graphs}, author={Mor Oren and Vered Paslev and Wasim Huleihel}, journal={arXiv preprint arXiv:2409.14870}, year={2024}, archivePrefix={arXiv}, eprint={2409.14870}, primaryClass={cs.LG cs.IT math.IT} }
oren2024testing
arxiv-660707
2409.14872
FedSlate:A Federated Deep Reinforcement Learning Recommender System
<|reference_start|>FedSlate:A Federated Deep Reinforcement Learning Recommender System: Reinforcement learning methods have been used to optimize long-term user engagement in recommendation systems. However, existing reinforcement learning-based recommendation systems do not fully exploit the relevance of individual user behavior across different platforms. One potential solution is to aggregate data from various platforms in a centralized location and use the aggregated data for training. However, this approach raises economic and legal concerns, including increased communication costs and potential threats to user privacy. To address these challenges, we propose \textbf{FedSlate}, a federated reinforcement learning recommendation algorithm that effectively utilizes information that is prohibited from being shared at a legal level. We employ the SlateQ algorithm to assist FedSlate in learning users' long-term behavior and evaluating the value of recommended content. We extend the existing application scope of recommendation systems from single-user single-platform to single-user multi-platform and address cross-platform learning challenges by introducing federated learning. We use RecSim to construct a simulation environment for evaluating FedSlate and compare its performance with state-of-the-art benchmark recommendation models. Experimental results demonstrate the superior effects of FedSlate over baseline methods in various environmental settings, and FedSlate facilitates the learning of recommendation strategies in scenarios where baseline methods are completely inapplicable. Code is available at \textit{https://github.com/TianYaDY/FedSlate}.<|reference_end|>
arxiv
@article{deng2024fedslate:a, title={FedSlate:A Federated Deep Reinforcement Learning Recommender System}, author={Yongxin Deng, Xiaoyu Tan, Xihe Qiu and Yaochu Jin}, journal={arXiv preprint arXiv:2409.14872}, year={2024}, archivePrefix={arXiv}, eprint={2409.14872}, primaryClass={cs.IR cs.AI} }
deng2024fedslate:a
arxiv-660708
2409.14873
Optimal state estimation: Turnpike analysis and performance results
<|reference_start|>Optimal state estimation: Turnpike analysis and performance results: In this paper, we introduce turnpike arguments in the context of optimal state estimation. In particular, we show that the optimal solution of the state estimation problem involving all available past data serves as turnpike for the solutions of truncated problems involving only a subset of the data. We consider two different mathematical characterizations of this phenomenon and provide corresponding sufficient conditions that rely on strict dissipativity and decaying sensitivity. As second contribution, we show how a specific turnpike property can be used to establish performance guarantees when approximating the optimal solution of the full problem by a sequence of truncated problems, and we show that the resulting performance (both averaged and non-averaged) is approximately optimal with error terms that can be made arbitrarily small by an appropriate choice of the horizon length. In addition, we discuss interesting implications of these results for the practically relevant case of moving horizon estimation and illustrate our results with a numerical example.<|reference_end|>
arxiv
@article{schiller2024optimal, title={Optimal state estimation: Turnpike analysis and performance results}, author={Julian D. Schiller, Lars Gr"une, Matthias A. M"uller}, journal={arXiv preprint arXiv:2409.14873}, year={2024}, archivePrefix={arXiv}, eprint={2409.14873}, primaryClass={math.OC cs.SY eess.SY} }
schiller2024optimal
arxiv-660709
2409.14874
Towards Ground-truth-free Evaluation of Any Segmentation in Medical Images
<|reference_start|>Towards Ground-truth-free Evaluation of Any Segmentation in Medical Images: We explore the feasibility and potential of building a ground-truth-free evaluation model to assess the quality of segmentations generated by the Segment Anything Model (SAM) and its variants in medical imaging. This evaluation model estimates segmentation quality scores by analyzing the coherence and consistency between the input images and their corresponding segmentation predictions. Based on prior research, we frame the task of training this model as a regression problem within a supervised learning framework, using Dice scores (and optionally other metrics) along with mean squared error to compute the training loss. The model is trained utilizing a large collection of public datasets of medical images with segmentation predictions from SAM and its variants. We name this model EvanySeg (Evaluation of Any Segmentation in Medical Images). Our exploration of convolution-based models (e.g., ResNet) and transformer-based models (e.g., ViT) suggested that ViT yields better performance for this task. EvanySeg can be employed for various tasks, including: (1) identifying poorly segmented samples by detecting low-percentile segmentation quality scores; (2) benchmarking segmentation models without ground truth by averaging quality scores across test samples; (3) alerting human experts to poor-quality segmentation predictions during human-AI collaboration by applying a threshold within the score space; and (4) selecting the best segmentation prediction for each test sample at test time when multiple segmentation models are available, by choosing the prediction with the highest quality score. Models and code will be made available at https://github.com/ahjolsenbics/EvanySeg.<|reference_end|>
arxiv
@article{senbi2024towards, title={Towards Ground-truth-free Evaluation of Any Segmentation in Medical Images}, author={Ahjol Senbi, Tianyu Huang, Fei Lyu, Qing Li, Yuhui Tao, Wei Shao, Qiang Chen, Chengyan Wang, Shuo Wang, Tao Zhou, Yizhe Zhang}, journal={arXiv preprint arXiv:2409.14874}, year={2024}, archivePrefix={arXiv}, eprint={2409.14874}, primaryClass={eess.IV cs.AI cs.CV cs.LG} }
senbi2024towards
arxiv-660710
2409.14876
Mammo-Clustering:A Weakly Supervised Multi-view Global-Local Context Clustering Network for Detection and Classification in Mammography
<|reference_start|>Mammo-Clustering:A Weakly Supervised Multi-view Global-Local Context Clustering Network for Detection and Classification in Mammography: Breast cancer has long posed a significant threat to women's health, making early screening crucial for mitigating its impact. However, mammography, the preferred method for early screening, faces limitations such as the burden of double reading by radiologists, challenges in widespread adoption in remote and underdeveloped areas, and obstacles in intelligent early screening development due to data constraints. To address these challenges, we propose a weakly supervised multi-view mammography early screening model for breast cancer based on context clustering. Context clustering, a feature extraction structure that is neither CNN nor transformer, combined with multi-view learning for information complementation, presents a promising approach. The weak supervision design specifically addresses data limitations. Our model achieves state-of-the-art performance with fewer parameters on two public datasets, with an AUC of 0.828 on the Vindr-Mammo dataset and 0.805 on the CBIS-DDSM dataset. Our model shows potential in reducing the burden on doctors and increasing the feasibility of breast cancer screening for women in underdeveloped regions.<|reference_end|>
arxiv
@article{yang2024mammo-clustering:a, title={Mammo-Clustering:A Weakly Supervised Multi-view Global-Local Context Clustering Network for Detection and Classification in Mammography}, author={Shilong Yang, Chulong Zhang, Qi Zang, Juan Yu, Liang Zeng, Xiao Luo, Yexuan Xing, Xin Pan, Qi Li, Xiaokun Liang, Yaoqin Xie}, journal={arXiv preprint arXiv:2409.14876}, year={2024}, archivePrefix={arXiv}, eprint={2409.14876}, primaryClass={cs.CV cs.AI} }
yang2024mammo-clustering:a
arxiv-660711
2409.14878
InterMind: A Doctor-Patient-Family Interactive Depression Assessment System Empowered by Large Language Models
<|reference_start|>InterMind: A Doctor-Patient-Family Interactive Depression Assessment System Empowered by Large Language Models: Depression poses significant challenges to patients and healthcare organizations, necessitating efficient assessment methods. Existing paradigms typically focus on a patient-doctor way that overlooks multi-role interactions, such as family involvement in the evaluation and caregiving process. Moreover, current automatic depression detection (ADD) methods usually model depression detection as a classification or regression task, lacking interpretability for the decision-making process. To address these issues, we developed InterMind, a doctor-patient-family interactive depression assessment system empowered by large language models (LLMs). Our system enables patients and families to contribute descriptions, generates assistive diagnostic reports for doctors, and provides actionable insights, improving diagnostic precision and efficiency. To enhance LLMs' performance in psychological counseling and diagnostic interpretability, we integrate retrieval-augmented generation (RAG) and chain-of-thoughts (CoT) techniques for data augmentation, which mitigates the hallucination issue of LLMs in specific scenarios after instruction fine-tuning. Quantitative experiments and professional assessments by clinicians validate the effectiveness of our system.<|reference_end|>
arxiv
@article{zhou2024intermind:, title={InterMind: A Doctor-Patient-Family Interactive Depression Assessment System Empowered by Large Language Models}, author={Zhiyuan Zhou, Jilong Liu, Sanwang Wang, Shijie Hao, Yanrong Guo, Richang Hong}, journal={arXiv preprint arXiv:2409.14878}, year={2024}, archivePrefix={arXiv}, eprint={2409.14878}, primaryClass={cs.HC} }
zhou2024intermind:
arxiv-660712
2409.14879
Privacy Policy Analysis through Prompt Engineering for LLMs
<|reference_start|>Privacy Policy Analysis through Prompt Engineering for LLMs: Privacy policies are often obfuscated by their complexity, which impedes transparency and informed consent. Conventional machine learning approaches for automatically analyzing these policies demand significant resources and substantial domain-specific training, causing adaptability issues. Moreover, they depend on extensive datasets that may require regular maintenance due to changing privacy concerns. In this paper, we propose, apply, and assess PAPEL (Privacy Policy Analysis through Prompt Engineering for LLMs), a framework harnessing the power of Large Language Models (LLMs) through prompt engineering to automate the analysis of privacy policies. PAPEL aims to streamline the extraction, annotation, and summarization of information from these policies, enhancing their accessibility and comprehensibility without requiring additional model training. By integrating zero-shot, one-shot, and few-shot learning approaches and the chain-of-thought prompting in creating predefined prompts and prompt templates, PAPEL guides LLMs to efficiently dissect, interpret, and synthesize the critical aspects of privacy policies into user-friendly summaries. We demonstrate the effectiveness of PAPEL with two applications: (i) annotation and (ii) contradiction analysis. We assess the ability of several LLaMa and GPT models to identify and articulate data handling practices, offering insights comparable to existing automated analysis approaches while reducing training efforts and increasing the adaptability to new analytical needs. The experiments demonstrate that the LLMs PAPEL utilizes (LLaMA and Chat GPT models) achieve robust performance in privacy policy annotation, with F1 scores reaching 0.8 and above (using the OPP-115 gold standard), underscoring the effectiveness of simpler prompts across various advanced language models.<|reference_end|>
arxiv
@article{goknil2024privacy, title={Privacy Policy Analysis through Prompt Engineering for LLMs}, author={Arda Goknil, Femke B. Gelderblom, Simeon Tverdal, Shukun Tokas, Hui Song}, journal={arXiv preprint arXiv:2409.14879}, year={2024}, archivePrefix={arXiv}, eprint={2409.14879}, primaryClass={cs.CL cs.CY cs.SE} }
goknil2024privacy
arxiv-660713
2409.14880
End-to-End Graph Flattening Method for Large Language Models
<|reference_start|>End-to-End Graph Flattening Method for Large Language Models: In recent years, the breakthrough of Large Language Models (LLMs) offers new ideas for achieving universal methods on graph data. The common practice of converting graphs into natural language for LLMs, which refers to graph flattening, exhibits good generalizability and interpretability. However, the poor organization of the textual format results in poor performance in long-distance scenario understanding. Inspired by human cognitive reasoning habits, we propose a novel method for graph flattening to fit LLMs, termed as End-to-End DAG-Path prompting (EEDP). Experiments on real-world datasets show that EEDP enhances the reasoning performance of LLMs in long-distance scenarios while maintaining excellent performance in short-distance scenarios, demonstrating good robustness in the face of distance variations.<|reference_end|>
arxiv
@article{hong2024end-to-end, title={End-to-End Graph Flattening Method for Large Language Models}, author={Bin Hong, Jinze Wu, Jiayu Liu, Liang Ding, Jing Sha, Kai Zhang, Shijin Wang, Zhenya Huang}, journal={arXiv preprint arXiv:2409.14880}, year={2024}, archivePrefix={arXiv}, eprint={2409.14880}, primaryClass={cs.CL cs.AI} }
hong2024end-to-end
arxiv-660714
2409.14881
Bounded indegree $k$-forests problem and a faster algorithm for directed graph augmentation
<|reference_start|>Bounded indegree $k$-forests problem and a faster algorithm for directed graph augmentation: We consider two problems for a directed graph $G$, which we show to be closely related. The first one is to find $k$ edge-disjoint forests in $G$ of maximal size such that the indegree of each vertex in these forests is at most $k$. We describe a min-max characterization for this problem and show that it can be solved in $O(k \delta m \log n)$ time, where $(n,m)$ is the size of $G$ and $\delta$ is the difference between $k$ and the edge connectivity of the graph. The second problem is the directed edge-connectivity augmentation problem, which has been extensively studied before: find a smallest set of directed edges whose addition to the graph makes it strongly $k$-connected. We improve the complexity for this problem from $O(k \delta (m+\delta n)\log n)$ [Gabow, STOC 1994] to $O(k \delta m \log n)$, by exploiting our solution for the first problem. A similar approach with the same complexity also works for the undirected version of the problem.<|reference_end|>
arxiv
@article{arkhipov2024bounded, title={Bounded indegree $k$-forests problem and a faster algorithm for directed graph augmentation}, author={Pavel Arkhipov, Vladimir Kolmogorov}, journal={arXiv preprint arXiv:2409.14881}, year={2024}, archivePrefix={arXiv}, eprint={2409.14881}, primaryClass={cs.DS cs.DM math.CO} }
arkhipov2024bounded
arxiv-660715
2409.14882
Probabilistically Aligned View-unaligned Clustering with Adaptive Template Selection
<|reference_start|>Probabilistically Aligned View-unaligned Clustering with Adaptive Template Selection: In most existing multi-view modeling scenarios, cross-view correspondence (CVC) between instances of the same target from different views, like paired image-text data, is a crucial prerequisite for effortlessly deriving a consistent representation. Nevertheless, this premise is frequently compromised in certain applications, where each view is organized and transmitted independently, resulting in the view-unaligned problem (VuP). Restoring CVC of unaligned multi-view data is a challenging and highly demanding task that has received limited attention from the research community. To tackle this practical challenge, we propose to integrate the permutation derivation procedure into the bipartite graph paradigm for view-unaligned clustering, termed Probabilistically Aligned View-unaligned Clustering with Adaptive Template Selection (PAVuC-ATS). Specifically, we learn consistent anchors and view-specific graphs by the bipartite graph, and derive permutations applied to the unaligned graphs by reformulating the alignment between two latent representations as a 2-step transition of a Markov chain with adaptive template selection, thereby achieving the probabilistic alignment. The convergence of the resultant optimization problem is validated both experimentally and theoretically. Extensive experiments on six benchmark datasets demonstrate the superiority of the proposed PAVuC-ATS over the baseline methods.<|reference_end|>
arxiv
@article{dong2024probabilistically, title={Probabilistically Aligned View-unaligned Clustering with Adaptive Template Selection}, author={Wenhua Dong, Xiao-Jun Wu, Zhenhua Feng, Sara Atito, Muhammad Awais, and Josef Kittler}, journal={arXiv preprint arXiv:2409.14882}, year={2024}, archivePrefix={arXiv}, eprint={2409.14882}, primaryClass={cs.CV} }
dong2024probabilistically
arxiv-660716
2409.14884
Weighted Approximation By Max-Product Generalized Exponential Sampling Series
<|reference_start|>Weighted Approximation By Max-Product Generalized Exponential Sampling Series: In this article, we study the convergence behaviour of the classical generalized Max Product exponential sampling series in the weighted space of log-uniformly continuous and bounded functions. We derive basic convergence results for both the series and study the asymptotic convergence behaviour. Some quantitative approximation results have been obtained utilizing the notion of weighted logarithmic modulus of continuity.<|reference_end|>
arxiv
@article{pradhan2024weighted, title={Weighted Approximation By Max-Product Generalized Exponential Sampling Series}, author={Satyaranjan Pradhan, Madan Mohan Soren}, journal={arXiv preprint arXiv:2409.14884}, year={2024}, archivePrefix={arXiv}, eprint={2409.14884}, primaryClass={math.FA cs.NA math.NA} }
pradhan2024weighted
arxiv-660717
2409.14887
Deploying Open-Source Large Language Models: A performance Analysis
<|reference_start|>Deploying Open-Source Large Language Models: A performance Analysis: Since the release of ChatGPT in November 2022, large language models (LLMs) have seen considerable success, including in the open-source community, with many open-weight models available. However, the requirements to deploy such a service are often unknown and difficult to evaluate in advance. To facilitate this process, we conducted numerous tests at the Centre Inria de l'Universit\'e de Bordeaux. In this article, we propose a comparison of the performance of several models of different sizes (mainly Mistral and LLaMa) depending on the available GPUs, using vLLM, a Python library designed to optimize the inference of these models. Our results provide valuable information for private and public groups wishing to deploy LLMs, allowing them to evaluate the performance of different models based on their available hardware. This study thus contributes to facilitating the adoption and use of these large language models in various application domains.<|reference_end|>
arxiv
@article{bendi-ouis2024deploying, title={Deploying Open-Source Large Language Models: A performance Analysis}, author={Yannis Bendi-Ouis, Dan Dutarte, Xavier Hinaut}, journal={arXiv preprint arXiv:2409.14887}, year={2024}, archivePrefix={arXiv}, eprint={2409.14887}, primaryClass={cs.PF cs.AI cs.LG} }
bendi-ouis2024deploying
arxiv-660718
2409.14888
Advancing Video Quality Assessment for AIGC
<|reference_start|>Advancing Video Quality Assessment for AIGC: In recent years, AI generative models have made remarkable progress across various domains, including text generation, image generation, and video generation. However, assessing the quality of text-to-video generation is still in its infancy, and existing evaluation frameworks fall short when compared to those for natural videos. Current video quality assessment (VQA) methods primarily focus on evaluating the overall quality of natural videos and fail to adequately account for the substantial quality discrepancies between frames in generated videos. To address this issue, we propose a novel loss function that combines mean absolute error with cross-entropy loss to mitigate inter-frame quality inconsistencies. Additionally, we introduce the innovative S2CNet technique to retain critical content, while leveraging adversarial training to enhance the model's generalization capabilities. Experimental results demonstrate that our method outperforms existing VQA techniques on the AIGC Video dataset, surpassing the previous state-of-the-art by 3.1% in terms of PLCC.<|reference_end|>
arxiv
@article{yue2024advancing, title={Advancing Video Quality Assessment for AIGC}, author={Xinli Yue, Jianhui Sun, Han Kong, Liangchao Yao, Tianyi Wang, Lei Li, Fengyun Rao, Jing Lv, Fan Xia, Yuetang Deng, Qian Wang, Lingchen Zhao}, journal={arXiv preprint arXiv:2409.14888}, year={2024}, archivePrefix={arXiv}, eprint={2409.14888}, primaryClass={cs.CV} }
yue2024advancing
arxiv-660719
2409.14889
Task scheduling for autonomous vehicles in the Martian environment
<|reference_start|>Task scheduling for autonomous vehicles in the Martian environment: In the paper, we introduced a novel variant of Electric VRP/TSP, the Solar Powered Rover Routing Problem (SPRRP), to tackle the routing of energy-constrained autonomous electric vehicles for Martian missions. We proposed a basic formulation of the problem based on the graph model that decomposes each Point of Interest into movement, charging, and research tasks. We have also outlined further possibilities for extending the problem.<|reference_end|>
arxiv
@article{burzyński2024task, title={Task scheduling for autonomous vehicles in the Martian environment}, author={Wojciech Burzy'nski, Mariusz Kaleta}, journal={Progress in Polish Artificial Intelligence Research, 4(5), pp. 366-373, 2024}, year={2024}, doi={10.48550/arXiv.2409.09677}, archivePrefix={arXiv}, eprint={2409.14889}, primaryClass={cs.DM} }
burzyński2024task
arxiv-660720
2409.14891
Observe Then Act: Asynchronous Active Vision-Action Model for Robotic Manipulation
<|reference_start|>Observe Then Act: Asynchronous Active Vision-Action Model for Robotic Manipulation: In real-world scenarios, many robotic manipulation tasks are hindered by occlusions and limited fields of view, posing significant challenges for passive observation-based models that rely on fixed or wrist-mounted cameras. In this paper, we investigate the problem of robotic manipulation under limited visual observation and propose a task-driven asynchronous active vision-action model.Our model serially connects a camera Next-Best-View (NBV) policy with a gripper Next-Best Pose (NBP) policy, and trains them in a sensor-motor coordination framework using few-shot reinforcement learning. This approach allows the agent to adjust a third-person camera to actively observe the environment based on the task goal, and subsequently infer the appropriate manipulation actions.We trained and evaluated our model on 8 viewpoint-constrained tasks in RLBench. The results demonstrate that our model consistently outperforms baseline algorithms, showcasing its effectiveness in handling visual constraints in manipulation tasks.<|reference_end|>
arxiv
@article{wang2024observe, title={Observe Then Act: Asynchronous Active Vision-Action Model for Robotic Manipulation}, author={Guokang Wang, Hang Li, Shuyuan Zhang, Yanhong Liu, Huaping Liu}, journal={arXiv preprint arXiv:2409.14891}, year={2024}, archivePrefix={arXiv}, eprint={2409.14891}, primaryClass={cs.RO cs.CV} }
wang2024observe
arxiv-660721
2409.14893
Novel Gradient Sparsification Algorithm via Bayesian Inference
<|reference_start|>Novel Gradient Sparsification Algorithm via Bayesian Inference: Error accumulation is an essential component of the Top-$k$ sparsification method in distributed gradient descent. It implicitly scales the learning rate and prevents the slow-down of lateral movement, but it can also deteriorate convergence. This paper proposes a novel sparsification algorithm called regularized Top-$k$ (RegTop-$k$) that controls the learning rate scaling of error accumulation. The algorithm is developed by looking at the gradient sparsification as an inference problem and determining a Bayesian optimal sparsification mask via maximum-a-posteriori estimation. It utilizes past aggregated gradients to evaluate posterior statistics, based on which it prioritizes the local gradient entries. Numerical experiments with ResNet-18 on CIFAR-10 show that at $0.1\%$ sparsification, RegTop-$k$ achieves about $8\%$ higher accuracy than standard Top-$k$.<|reference_end|>
arxiv
@article{bereyhi2024novel, title={Novel Gradient Sparsification Algorithm via Bayesian Inference}, author={Ali Bereyhi and Ben Liang and Gary Boudreau and Ali Afana}, journal={arXiv preprint arXiv:2409.14893}, year={2024}, archivePrefix={arXiv}, eprint={2409.14893}, primaryClass={cs.LG cs.IT eess.SP math.IT} }
bereyhi2024novel
arxiv-660722
2409.14896
Built Different: Tactile Perception to Overcome Cross-Embodiment Capability Differences in Collaborative Manipulation
<|reference_start|>Built Different: Tactile Perception to Overcome Cross-Embodiment Capability Differences in Collaborative Manipulation: Tactile sensing is a powerful means of implicit communication between a human and a robot assistant. In this paper, we investigate how tactile sensing can transcend cross-embodiment differences across robotic systems in the context of collaborative manipulation. Consider tasks such as collaborative object carrying where the human-robot interaction is force rich. Learning and executing such skills requires the robot to comply to the human and to learn behaviors at the joint-torque level. However, most robots do not offer this compliance or provide access to their joint torques. To address this challenge, we present an approach that uses tactile sensors to transfer policies from robots with these capabilities to those without. We show how our method can enable a cooperative task where a robot and human must work together to maneuver objects through space. We first demonstrate the skill on an impedance control-capable robot equipped with tactile sensing, then show the positive transfer of the tactile policy to a planar prismatic robot that is only capable of position control and does not come equipped with any sort of force/torque feedback, yet is able to comply to the human motions only using tactile feedback. Further details and videos can be found on our project website at https://www.mmintlab.com/research/tactile-collaborative/.<|reference_end|>
arxiv
@article{bogert2024built, title={Built Different: Tactile Perception to Overcome Cross-Embodiment Capability Differences in Collaborative Manipulation}, author={William van den Bogert, Madhavan Iyengar, Nima Fazeli}, journal={arXiv preprint arXiv:2409.14896}, year={2024}, archivePrefix={arXiv}, eprint={2409.14896}, primaryClass={cs.RO cs.LG} }
bogert2024built
arxiv-660723
2409.14899
CON: Continual Object Navigation via Data-Free Inter-Agent Knowledge Transfer in Unseen and Unfamiliar Places
<|reference_start|>CON: Continual Object Navigation via Data-Free Inter-Agent Knowledge Transfer in Unseen and Unfamiliar Places: This work explores the potential of brief inter-agent knowledge transfer (KT) to enhance the robotic object goal navigation (ON) in unseen and unfamiliar environments. Drawing on the analogy of human travelers acquiring local knowledge, we propose a framework in which a traveler robot (student) communicates with local robots (teachers) to obtain ON knowledge through minimal interactions. We frame this process as a data-free continual learning (CL) challenge, aiming to transfer knowledge from a black-box model (teacher) to a new model (student). In contrast to approaches like zero-shot ON using large language models (LLMs), which utilize inherently communication-friendly natural language for knowledge representation, the other two major ON approaches -- frontier-driven methods using object feature maps and learning-based ON using neural state-action maps -- present complex challenges where data-free KT remains largely uncharted. To address this gap, we propose a lightweight, plug-and-play KT module targeting non-cooperative black-box teachers in open-world settings. Using the universal assumption that every teacher robot has vision and mobility capabilities, we define state-action history as the primary knowledge base. Our formulation leads to the development of a query-based occupancy map that dynamically represents target object locations, serving as an effective and communication-friendly knowledge representation. We validate the effectiveness of our method through experiments conducted in the Habitat environment.<|reference_end|>
arxiv
@article{terashima2024con:, title={CON: Continual Object Navigation via Data-Free Inter-Agent Knowledge Transfer in Unseen and Unfamiliar Places}, author={Kouki Terashima, Daiki Iwata, Kanji Tanaka}, journal={arXiv preprint arXiv:2409.14899}, year={2024}, archivePrefix={arXiv}, eprint={2409.14899}, primaryClass={cs.RO cs.CV cs.LG} }
terashima2024con:
arxiv-660724
2409.14901
Syntax and semantics of multi-adjoint normal logic programming
<|reference_start|>Syntax and semantics of multi-adjoint normal logic programming: Multi-adjoint logic programming is a general framework with interesting features, which involves other positive logic programming frameworks such as monotonic and residuated logic programming, generalized annotated logic programs, fuzzy logic programming and possibilistic logic programming. One of the most interesting extensions of this framework is the possibility of considering a negation operator in the logic programs, which will improve its flexibility and the range of real applications. This paper introduces multi-adjoint normal logic programming, which is an extension of multi-adjoint logic programming including a negation operator in the underlying lattice. Beside the introduction of the syntax and semantics of this paradigm, we will provide sufficient conditions for the existence of stable models defined on a convex compact set of a euclidean space. Finally, we will consider a particular algebraic structure in which sufficient conditions can be given in order to ensure the unicity of stable models of multi-adjoint normal logic programs.<|reference_end|>
arxiv
@article{cornejo2024syntax, title={Syntax and semantics of multi-adjoint normal logic programming}, author={M. Eugenia Cornejo, David Lobo, Jes'us Medina}, journal={Fuzzy Sets and Systems 345 (2018) 41-62}, year={2024}, doi={10.1016/j.fss.2017.12.009}, archivePrefix={arXiv}, eprint={2409.14901}, primaryClass={cs.LO math.LO} }
cornejo2024syntax
arxiv-660725
2409.14902
A Contract Theory for Layered Control Architectures
<|reference_start|>A Contract Theory for Layered Control Architectures: Autonomous systems typically leverage layered control architectures with a combination of discrete and continuous models operating at different timescales. As a result, layered systems form a new class of hybrid systems composed of systems operating on a diverse set of continuous and discrete signals. This paper formalizes the notion of a layered (hierarchical) control architecture through a theory of relations between its layers. This theory enables us to formulate contracts within layered control systems -- these define interfaces between layers and isolate the design of each layer, guaranteeing that composition of contracts at each layer results in a contract capturing the desired system-wide specification. Thus, the proposed theory yields the ability to analyze layered control architectures via a compositional approach.<|reference_end|>
arxiv
@article{mazo2024a, title={A Contract Theory for Layered Control Architectures}, author={Manuel Mazo Jr., Will Compton, Max H. Cohen and Aaron D. Ames}, journal={arXiv preprint arXiv:2409.14902}, year={2024}, archivePrefix={arXiv}, eprint={2409.14902}, primaryClass={eess.SY cs.SY math.OC} }
mazo2024a
arxiv-660726
2409.14904
DSG-KD: Knowledge Distillation from Domain-Specific to General Language Models
<|reference_start|>DSG-KD: Knowledge Distillation from Domain-Specific to General Language Models: The use of pre-trained language models fine-tuned to address specific downstream tasks is a common approach in natural language processing (NLP). However, acquiring domain-specific knowledge via fine-tuning is challenging. Traditional methods involve pretraining language models using vast amounts of domain-specific data before fine-tuning for particular tasks. This study investigates emergency/non-emergency classification tasks based on electronic medical record (EMR) data obtained from pediatric emergency departments (PEDs) in Korea. Our findings reveal that existing domain-specific pre-trained language models underperform compared to general language models in handling N-lingual free-text data characteristics of non-English-speaking regions. To address these limitations, we propose a domain knowledge transfer methodology that leverages knowledge distillation to infuse general language models with domain-specific knowledge via fine-tuning. This study demonstrates the effective transfer of specialized knowledge between models by defining a general language model as the student model and a domain-specific pre-trained model as the teacher model. In particular, we address the complexities of EMR data obtained from PEDs in non-English-speaking regions, such as Korea, and demonstrate that the proposed method enhances classification performance in such contexts. The proposed methodology not only outperforms baseline models on Korean PED EMR data, but also promises broader applicability in various professional and technical domains. In future works, we intend to extend this methodology to include diverse non-English-speaking regions and address additional downstream tasks, with the aim of developing advanced model architectures using state-of-the-art KD techniques. The code is available in https://github.com/JoSangYeon/DSG-KD.<|reference_end|>
arxiv
@article{cho2024dsg-kd:, title={DSG-KD: Knowledge Distillation from Domain-Specific to General Language Models}, author={Sangyeon Cho, Jangyeong Jeon, Dongjoon Lee, Changhee Lee, Junyeong Kim}, journal={arXiv preprint arXiv:2409.14904}, year={2024}, archivePrefix={arXiv}, eprint={2409.14904}, primaryClass={cs.CL cs.AI} }
cho2024dsg-kd:
arxiv-660727
2409.14906
Kriformer: A Novel Spatiotemporal Kriging Approach Based on Graph Transformers
<|reference_start|>Kriformer: A Novel Spatiotemporal Kriging Approach Based on Graph Transformers: Accurately estimating data in sensor-less areas is crucial for understanding system dynamics, such as traffic state estimation and environmental monitoring. This study addresses challenges posed by sparse sensor deployment and unreliable data by framing the problem as a spatiotemporal kriging task and proposing a novel graph transformer model, Kriformer. This model estimates data at locations without sensors by mining spatial and temporal correlations, even with limited resources. Kriformer utilizes transformer architecture to enhance the model's perceptual range and solve edge information aggregation challenges, capturing spatiotemporal information effectively. A carefully constructed positional encoding module embeds the spatiotemporal features of nodes, while a sophisticated spatiotemporal attention mechanism enhances estimation accuracy. The multi-head spatial interaction attention module captures subtle spatial relationships between observed and unobserved locations. During training, a random masking strategy prompts the model to learn with partial information loss, allowing the spatiotemporal embedding and multi-head attention mechanisms to synergistically capture correlations among locations. Experimental results show that Kriformer excels in representation learning for unobserved locations, validated on two real-world traffic speed datasets, demonstrating its effectiveness in spatiotemporal kriging tasks.<|reference_end|>
arxiv
@article{pan2024kriformer:, title={Kriformer: A Novel Spatiotemporal Kriging Approach Based on Graph Transformers}, author={Renbin Pan, Feng Xiao, Hegui Zhang, Minyu Shen}, journal={arXiv preprint arXiv:2409.14906}, year={2024}, archivePrefix={arXiv}, eprint={2409.14906}, primaryClass={cs.LG stat.ML} }
pan2024kriformer:
arxiv-660728
2409.14907
Knowledge Planning in Large Language Models for Domain-Aligned Counseling Summarization
<|reference_start|>Knowledge Planning in Large Language Models for Domain-Aligned Counseling Summarization: In mental health counseling, condensing dialogues into concise and relevant summaries (aka counseling notes) holds pivotal significance. Large Language Models (LLMs) exhibit remarkable capabilities in various generative tasks; however, their adaptation to domain-specific intricacies remains challenging, especially within mental health contexts. Unlike standard LLMs, mental health experts first plan to apply domain knowledge in writing summaries. Our work enhances LLMs' ability by introducing a novel planning engine to orchestrate structuring knowledge alignment. To achieve high-order planning, we divide knowledge encapsulation into two major phases: (i) holding dialogue structure and (ii) incorporating domain-specific knowledge. We employ a planning engine on Llama-2, resulting in a novel framework, PIECE. Our proposed system employs knowledge filtering-cum-scaffolding to encapsulate domain knowledge. Additionally, PIECE leverages sheaf convolution learning to enhance its understanding of the dialogue's structural nuances. We compare PIECE with 14 baseline methods and observe a significant improvement across ROUGE and Bleurt scores. Further, expert evaluation and analyses validate the generation quality to be effective, sometimes even surpassing the gold standard. We further benchmark PIECE with other LLMs and report improvement, including Llama-2 (+2.72%), Mistral (+2.04%), and Zephyr (+1.59%), to justify the generalizability of the planning engine.<|reference_end|>
arxiv
@article{srivastava2024knowledge, title={Knowledge Planning in Large Language Models for Domain-Aligned Counseling Summarization}, author={Aseem Srivastava, Smriti Joshi, Tanmoy Chakraborty, Md Shad Akhtar}, journal={arXiv preprint arXiv:2409.14907}, year={2024}, archivePrefix={arXiv}, eprint={2409.14907}, primaryClass={cs.CL} }
srivastava2024knowledge
arxiv-660729
2409.14908
KARMA: Augmenting Embodied AI Agents with Long-and-short Term Memory Systems
<|reference_start|>KARMA: Augmenting Embodied AI Agents with Long-and-short Term Memory Systems: Embodied AI agents responsible for executing interconnected, long-sequence household tasks often face difficulties with in-context memory, leading to inefficiencies and errors in task execution. To address this issue, we introduce KARMA, an innovative memory system that integrates long-term and short-term memory modules, enhancing large language models (LLMs) for planning in embodied agents through memory-augmented prompting. KARMA distinguishes between long-term and short-term memory, with long-term memory capturing comprehensive 3D scene graphs as representations of the environment, while short-term memory dynamically records changes in objects' positions and states. This dual-memory structure allows agents to retrieve relevant past scene experiences, thereby improving the accuracy and efficiency of task planning. Short-term memory employs strategies for effective and adaptive memory replacement, ensuring the retention of critical information while discarding less pertinent data. Compared to state-of-the-art embodied agents enhanced with memory, our memory-augmented embodied AI agent improves success rates by 1.3x and 2.3x in Composite Tasks and Complex Tasks within the AI2-THOR simulator, respectively, and enhances task execution efficiency by 3.4x and 62.7x. Furthermore, we demonstrate that KARMA's plug-and-play capability allows for seamless deployment on real-world robotic systems, such as mobile manipulation platforms.Through this plug-and-play memory system, KARMA significantly enhances the ability of embodied agents to generate coherent and contextually appropriate plans, making the execution of complex household tasks more efficient. The experimental videos from the work can be found at https://youtu.be/4BT7fnw9ehs.<|reference_end|>
arxiv
@article{wang2024karma:, title={KARMA: Augmenting Embodied AI Agents with Long-and-short Term Memory Systems}, author={Zixuan Wang, Bo Yu, Junzhe Zhao, Wenhao Sun, Sai Hou, Shuai Liang, Xing Hu, Yinhe Han, Yiming Gan}, journal={arXiv preprint arXiv:2409.14908}, year={2024}, archivePrefix={arXiv}, eprint={2409.14908}, primaryClass={cs.RO cs.AI} }
wang2024karma:
arxiv-660730
2409.14910
Kinodynamic Motion Planning for Collaborative Object Transportation by Multiple Mobile Manipulators
<|reference_start|>Kinodynamic Motion Planning for Collaborative Object Transportation by Multiple Mobile Manipulators: This work proposes a kinodynamic motion planning technique for collaborative object transportation by multiple mobile manipulators in dynamic environments. A global path planner computes a linear piecewise path from start to goal. A novel algorithm detects the narrow regions between the static obstacles and aids in defining the obstacle-free region to enhance the feasibility of the global path. We then formulate a local online motion planning technique for trajectory generation that minimizes the control efforts in a receding horizon manner. It plans the trajectory for finite time horizons, considering the kinodynamic constraints and the static and dynamic obstacles. The planning technique jointly plans for the mobile bases and the arms to utilize the locomotion capability of the mobile base and the manipulation capability of the arm efficiently. We use a convex cone approach to avoid self-collision of the formation by modifying the mobile manipulators admissible state without imposing additional constraints. Numerical simulations and hardware experiments showcase the efficiency of the proposed approach.<|reference_end|>
arxiv
@article{patra2024kinodynamic, title={Kinodynamic Motion Planning for Collaborative Object Transportation by Multiple Mobile Manipulators}, author={Keshab Patra, Arpita Sinha and Anirban Guha}, journal={arXiv preprint arXiv:2409.14910}, year={2024}, archivePrefix={arXiv}, eprint={2409.14910}, primaryClass={cs.RO cs.MA math.OC} }
patra2024kinodynamic
arxiv-660731
2409.14911
Unsourced Random Access: A Recent Paradigm for Massive Connectivity
<|reference_start|>Unsourced Random Access: A Recent Paradigm for Massive Connectivity: The sixth generation and beyond communication systems are expected to enable communications of a massive number of machine-type devices. The traffic generated by some of these devices will significantly deviate from those in conventional communication scenarios. For instance, for applications where a massive number of cheap sensors communicate with a base station (BS), the devices will only be sporadically active and there will be no coordination among them or with the BS. For such systems requiring massive random access solutions, a new paradigm called unsourced random access (URA) has been proposed. In URA, all the users employ the same codebook and there is no user identity during the data transmission phase. The destination is only interested in the list of messages being sent from the set of active users. In this survey, we provide a comprehensive overview of existing URA solutions with an emphasis on the state-of-the-art, covering both algorithmic and information-theoretic aspects. Moreover, we provide future research directions and challenges, and describe some potential methods of addressing them.<|reference_end|>
arxiv
@article{ozates2024unsourced, title={Unsourced Random Access: A Recent Paradigm for Massive Connectivity}, author={Mert Ozates, Mohammad Javad Ahmadi, Mohammad Kazemi, Tolga M. Duman}, journal={arXiv preprint arXiv:2409.14911}, year={2024}, archivePrefix={arXiv}, eprint={2409.14911}, primaryClass={cs.IT math.IT} }
ozates2024unsourced
arxiv-660732
2409.14912
Efficient Tabular Data Preprocessing of ML Pipelines
<|reference_start|>Efficient Tabular Data Preprocessing of ML Pipelines: Data preprocessing pipelines, which includes data decoding, cleaning, and transforming, are a crucial component of Machine Learning (ML) training. Thy are computationally intensive and often become a major bottleneck, due to the increasing performance gap between the CPUs used for preprocessing and the GPUs used for model training. Recent studies show that a significant number of CPUs across several machines are required to achieve sufficient throughput to saturate the GPUs, leading to increased resource and energy consumption. When the pipeline involves vocabulary generation, the preprocessing performance scales poorly due to significant row-wise synchronization overhead between different CPU cores and servers. To address this limitation, in this paper we present the design of Piper, a hardware accelerator for tabular data preprocessing, prototype it on FPGAs, and demonstrate its potential for training pipelines of commercial recommender systems. Piper achieves 4.7 $\sim$ 71.3$\times$ speedup in latency over a 128-core CPU server and outperforms a data-center GPU by 4.8$\sim$ 20.3$\times$ when using binary input. The impressive performance showcases Piper's potential to increase the efficiency of data preprocessing pipelines and significantly reduce their resource consumption.<|reference_end|>
arxiv
@article{zhu2024efficient, title={Efficient Tabular Data Preprocessing of ML Pipelines}, author={Yu Zhu, Wenqi Jiang, Gustavo Alonso}, journal={arXiv preprint arXiv:2409.14912}, year={2024}, archivePrefix={arXiv}, eprint={2409.14912}, primaryClass={cs.AR cs.LG} }
zhu2024efficient
arxiv-660733
2409.14913
Towards a Realistic Long-Term Benchmark for Open-Web Research Agents
<|reference_start|>Towards a Realistic Long-Term Benchmark for Open-Web Research Agents: We present initial results of a forthcoming benchmark for evaluating LLM agents on white-collar tasks of economic value. We evaluate agents on real-world "messy" open-web research tasks of the type that are routine in finance and consulting. In doing so, we lay the groundwork for an LLM agent evaluation suite where good performance directly corresponds to a large economic and societal impact. We built and tested several agent architectures with o1-preview, GPT-4o, Claude-3.5 Sonnet, Llama 3.1 (405b), and GPT-4o-mini. On average, LLM agents powered by Claude-3.5 Sonnet and o1-preview substantially outperformed agents using GPT-4o, with agents based on Llama 3.1 (405b) and GPT-4o-mini lagging noticeably behind. Across LLMs, a ReAct architecture with the ability to delegate subtasks to subagents performed best. In addition to quantitative evaluations, we qualitatively assessed the performance of the LLM agents by inspecting their traces and reflecting on their observations. Our evaluation represents the first in-depth assessment of agents' abilities to conduct challenging, economically valuable analyst-style research on the real open web.<|reference_end|>
arxiv
@article{mühlbacher2024towards, title={Towards a Realistic Long-Term Benchmark for Open-Web Research Agents}, author={Peter M"uhlbacher, Nikos I. Bosse, Lawrence Phillips}, journal={arXiv preprint arXiv:2409.14913}, year={2024}, archivePrefix={arXiv}, eprint={2409.14913}, primaryClass={cs.CL cs.IR cs.LG} }
mühlbacher2024towards
arxiv-660734
2409.14915
Reducing concept lattices by means of a weaker notion of congruence
<|reference_start|>Reducing concept lattices by means of a weaker notion of congruence: Attribute and size reductions are key issues in formal concept analysis. In this paper, we consider a special kind of equivalence relation to reduce concept lattices, which will be called local congruence. This equivalence relation is based on the notion of congruence on lattices, with the goal of losing as less information as possible and being suitable for the reduction of concept lattices. We analyze how the equivalence classes obtained from a local congruence can be ordered. Moreover, different properties related to the algebraic structure of the whole set of local congruences are also presented. Finally, a procedure to reduce concept lattices by the new weaker notion of congruence is introduced. This procedure can be applied to the classical and fuzzy formal concept analysis frameworks.<|reference_end|>
arxiv
@article{aragón2024reducing, title={Reducing concept lattices by means of a weaker notion of congruence}, author={Roberto G. Arag'on, Jes'us Medina, Elo'isa Ram'irez-Poussa}, journal={Fuzzy Sets and Systems, 418 (2021) 153-169}, year={2024}, doi={10.1016/j.fss.2020.09.013}, archivePrefix={arXiv}, eprint={2409.14915}, primaryClass={cs.DS} }
aragón2024reducing
arxiv-660735
2409.14917
With Ears to See and Eyes to Hear: Sound Symbolism Experiments with Multimodal Large Language Models
<|reference_start|>With Ears to See and Eyes to Hear: Sound Symbolism Experiments with Multimodal Large Language Models: Recently, Large Language Models (LLMs) and Vision Language Models (VLMs) have demonstrated aptitude as potential substitutes for human participants in experiments testing psycholinguistic phenomena. However, an understudied question is to what extent models that only have access to vision and text modalities are able to implicitly understand sound-based phenomena via abstract reasoning from orthography and imagery alone. To investigate this, we analyse the ability of VLMs and LLMs to demonstrate sound symbolism (i.e., to recognise a non-arbitrary link between sounds and concepts) as well as their ability to ``hear'' via the interplay of the language and vision modules of open and closed-source multimodal models. We perform multiple experiments, including replicating the classic Kiki-Bouba and Mil-Mal shape and magnitude symbolism tasks, and comparing human judgements of linguistic iconicity with that of LLMs. Our results show that VLMs demonstrate varying levels of agreement with human labels, and more task information may be required for VLMs versus their human counterparts for in silico experimentation. We additionally see through higher maximum agreement levels that Magnitude Symbolism is an easier pattern for VLMs to identify than Shape Symbolism, and that an understanding of linguistic iconicity is highly dependent on model size.<|reference_end|>
arxiv
@article{loakman2024with, title={With Ears to See and Eyes to Hear: Sound Symbolism Experiments with Multimodal Large Language Models}, author={Tyler Loakman, Yucheng Li and Chenghua Lin}, journal={arXiv preprint arXiv:2409.14917}, year={2024}, archivePrefix={arXiv}, eprint={2409.14917}, primaryClass={cs.CL} }
loakman2024with
arxiv-660736
2409.14918
A Realistic Simulation Framework for Analog/Digital Neuromorphic Architectures
<|reference_start|>A Realistic Simulation Framework for Analog/Digital Neuromorphic Architectures: Developing dedicated neuromorphic computing platforms optimized for embedded or edge-computing applications requires time-consuming design, fabrication, and deployment of full-custom neuromorphic processors.bTo ensure that initial prototyping efforts, exploring the properties of different network architectures and parameter settings, lead to realistic results it is important to use simulation frameworks that match as best as possible the properties of the final hardware. This is particularly challenging for neuromorphic hardware platforms made using mixed-signal analog/digital circuits, due to the variability and noise sensitivity of their components. In this paper, we address this challenge by developing a software spiking neural network simulator explicitly designed to account for the properties of mixed-signal neuromorphic circuits, including device mismatch variability. The simulator, called ARCANA (A Realistic Simulation Framework for Analog/Digital Neuromorphic Architectures), is designed to reproduce the dynamics of mixed-signal synapse and neuron electronic circuits with autogradient differentiation for parameter optimization and GPU acceleration. We demonstrate the effectiveness of this approach by matching software simulation results with measurements made from an existing neuromorphic processor. We show how the results obtained provide a reliable estimate of the behavior of the spiking neural network trained in software, once deployed in hardware. This framework enables the development and innovation of new learning rules and processing architectures in neuromorphic embedded systems.<|reference_end|>
arxiv
@article{quintana2024a, title={A Realistic Simulation Framework for Analog/Digital Neuromorphic Architectures}, author={Fernando M. Quintana, Maryada, Pedro L. Galindo, Elisa Donati, Giacomo Indiveri, Fernando Perez-Pe~na}, journal={arXiv preprint arXiv:2409.14918}, year={2024}, archivePrefix={arXiv}, eprint={2409.14918}, primaryClass={cs.NE cs.AR cs.LG} }
quintana2024a
arxiv-660737
2409.14919
Voice Conversion-based Privacy through Adversarial Information Hiding
<|reference_start|>Voice Conversion-based Privacy through Adversarial Information Hiding: Privacy-preserving voice conversion aims to remove only the attributes of speech audio that convey identity information, keeping other speech characteristics intact. This paper presents a mechanism for privacy-preserving voice conversion that allows controlling the leakage of identity-bearing information using adversarial information hiding. This enables a deliberate trade-off between maintaining source-speech characteristics and modification of speaker identity. As such, the approach improves on voice-conversion techniques like CycleGAN and StarGAN, which were not designed for privacy, meaning that converted speech may leak personal information in unpredictable ways. Our approach is also more flexible than ASR-TTS voice conversion pipelines, which by design discard all prosodic information linked to textual content. Evaluations show that the proposed system successfully modifies perceived speaker identity whilst well maintaining source lexical content.<|reference_end|>
arxiv
@article{webber2024voice, title={Voice Conversion-based Privacy through Adversarial Information Hiding}, author={Jacob J Webber, Oliver Watts, Gustav Eje Henter, Jennifer Williams, Simon King}, journal={arXiv preprint arXiv:2409.14919}, year={2024}, archivePrefix={arXiv}, eprint={2409.14919}, primaryClass={cs.SD eess.AS} }
webber2024voice
arxiv-660738
2409.14924
Retrieval Augmented Generation (RAG) and Beyond: A Comprehensive Survey on How to Make your LLMs use External Data More Wisely
<|reference_start|>Retrieval Augmented Generation (RAG) and Beyond: A Comprehensive Survey on How to Make your LLMs use External Data More Wisely: Large language models (LLMs) augmented with external data have demonstrated remarkable capabilities in completing real-world tasks. Techniques for integrating external data into LLMs, such as Retrieval-Augmented Generation (RAG) and fine-tuning, are gaining increasing attention and widespread application. Nonetheless, the effective deployment of data-augmented LLMs across various specialized fields presents substantial challenges. These challenges encompass a wide range of issues, from retrieving relevant data and accurately interpreting user intent to fully harnessing the reasoning capabilities of LLMs for complex tasks. We believe that there is no one-size-fits-all solution for data-augmented LLM applications. In practice, underperformance often arises from a failure to correctly identify the core focus of a task or because the task inherently requires a blend of multiple capabilities that must be disentangled for better resolution. In this survey, we propose a RAG task categorization method, classifying user queries into four levels based on the type of external data required and primary focus of the task: explicit fact queries, implicit fact queries, interpretable rationale queries, and hidden rationale queries. We define these levels of queries, provide relevant datasets, and summarize the key challenges and most effective techniques for addressing these challenges. Finally, we discuss three main forms of integrating external data into LLMs: context, small model, and fine-tuning, highlighting their respective strengths, limitations, and the types of problems they are suited to solve. This work aims to help readers thoroughly understand and decompose the data requirements and key bottlenecks in building LLM applications, offering solutions to the different challenges and serving as a guide to systematically developing such applications.<|reference_end|>
arxiv
@article{zhao2024retrieval, title={Retrieval Augmented Generation (RAG) and Beyond: A Comprehensive Survey on How to Make your LLMs use External Data More Wisely}, author={Siyun Zhao, Yuqing Yang, Zilong Wang, Zhiyuan He, Luna K. Qiu, Lili Qiu}, journal={arXiv preprint arXiv:2409.14924}, year={2024}, archivePrefix={arXiv}, eprint={2409.14924}, primaryClass={cs.CL cs.AI} }
zhao2024retrieval
arxiv-660739
2409.14925
DanceCamAnimator: Keyframe-Based Controllable 3D Dance Camera Synthesis
<|reference_start|>DanceCamAnimator: Keyframe-Based Controllable 3D Dance Camera Synthesis: Synthesizing camera movements from music and dance is highly challenging due to the contradicting requirements and complexities of dance cinematography. Unlike human movements, which are always continuous, dance camera movements involve both continuous sequences of variable lengths and sudden drastic changes to simulate the switching of multiple cameras. However, in previous works, every camera frame is equally treated and this causes jittering and unavoidable smoothing in post-processing. To solve these problems, we propose to integrate animator dance cinematography knowledge by formulating this task as a three-stage process: keyframe detection, keyframe synthesis, and tween function prediction. Following this formulation, we design a novel end-to-end dance camera synthesis framework \textbf{DanceCamAnimator}, which imitates human animation procedures and shows powerful keyframe-based controllability with variable lengths. Extensive experiments on the DCM dataset demonstrate that our method surpasses previous baselines quantitatively and qualitatively. Code will be available at \url{https://github.com/Carmenw1203/DanceCamAnimator-Official}.<|reference_end|>
arxiv
@article{wang2024dancecamanimator:, title={DanceCamAnimator: Keyframe-Based Controllable 3D Dance Camera Synthesis}, author={Zixuan Wang, Jiayi Li, Xiaoyu Qin, Shikun Sun, Songtao Zhou, Jia Jia, Jiebo Luo}, journal={arXiv preprint arXiv:2409.14925}, year={2024}, doi={10.1145/3664647.3680980}, archivePrefix={arXiv}, eprint={2409.14925}, primaryClass={cs.CV cs.MM} }
wang2024dancecamanimator:
arxiv-660740
2409.14931
Impact of local congruences in variable selection from datasets
<|reference_start|>Impact of local congruences in variable selection from datasets: Formal concept analysis (FCA) is a useful mathematical tool for obtaining information from relational datasets. One of the most interesting research goals in FCA is the selection of the most representative variables of the dataset, which is called attribute reduction. Recently, the attribute reduction mechanism has been complemented with the use of local congruences in order to obtain robust clusters of concepts, which form convex sublattices of the original concept lattice. Since the application of such local congruences modifies the quotient set associated with the attribute reduction, it is fundamental to know how the original context (attributes, objects and relationship) has been modified in order to understand the impact of the application of the local congruence in the attribute reduction.<|reference_end|>
arxiv
@article{aragón2024impact, title={Impact of local congruences in variable selection from datasets}, author={Roberto G. Arag'on, Jes'us Medina, Elo'isa Ram'irez-Poussa}, journal={Journal of Computational and Applied Mathematics, 404(113416) (2022)}, year={2024}, doi={10.1016/j.cam.2021.113416}, archivePrefix={arXiv}, eprint={2409.14931}, primaryClass={cs.DS} }
aragón2024impact
arxiv-660741
2409.14935
Deep Cost Ray Fusion for Sparse Depth Video Completion
<|reference_start|>Deep Cost Ray Fusion for Sparse Depth Video Completion: In this paper, we present a learning-based framework for sparse depth video completion. Given a sparse depth map and a color image at a certain viewpoint, our approach makes a cost volume that is constructed on depth hypothesis planes. To effectively fuse sequential cost volumes of the multiple viewpoints for improved depth completion, we introduce a learning-based cost volume fusion framework, namely RayFusion, that effectively leverages the attention mechanism for each pair of overlapped rays in adjacent cost volumes. As a result of leveraging feature statistics accumulated over time, our proposed framework consistently outperforms or rivals state-of-the-art approaches on diverse indoor and outdoor datasets, including the KITTI Depth Completion benchmark, VOID Depth Completion benchmark, and ScanNetV2 dataset, using much fewer network parameters.<|reference_end|>
arxiv
@article{kim2024deep, title={Deep Cost Ray Fusion for Sparse Depth Video Completion}, author={Jungeon Kim, Soongjin Kim, Jaesik Park, Seungyong Lee}, journal={arXiv preprint arXiv:2409.14935}, year={2024}, archivePrefix={arXiv}, eprint={2409.14935}, primaryClass={cs.CV} }
kim2024deep
arxiv-660742
2409.14938
A multi-fidelity adaptive dynamical low-rank based optimization algorithm for fission criticality problems
<|reference_start|>A multi-fidelity adaptive dynamical low-rank based optimization algorithm for fission criticality problems: Computing the dominant eigenvalue is important in nuclear systems as it determines the stability of the system (i.e. whether the system is sub or supercritical). Recently, the work of Kusch, Whewell, McClarren and Frank \cite{KWMF} showed that performing a low-rank approximation can be very effective in reducing the high memory requirement and computational cost of such problems. In this work, we propose a rank adaptive approach that changes the rank during the inverse power iteration. This allows us to progressively increase the rank (i.e. changing the fidelity of the model) as we get closer to convergence, thereby further reducing computational cost. We then exploit this multi-fidelity approach to optimize a simplified nuclear reactor. In this case the system is parameterized and the values of the parameters that give criticality are sought.<|reference_end|>
arxiv
@article{scalone2024a, title={A multi-fidelity adaptive dynamical low-rank based optimization algorithm for fission criticality problems}, author={C. Scalone, L. Einkemmer, J. Kusch and R. J. McClarren}, journal={arXiv preprint arXiv:2409.14938}, year={2024}, archivePrefix={arXiv}, eprint={2409.14938}, primaryClass={math.NA cs.NA} }
scalone2024a
arxiv-660743
2409.14939
FastGL: A GPU-Efficient Framework for Accelerating Sampling-Based GNN Training at Large Scale
<|reference_start|>FastGL: A GPU-Efficient Framework for Accelerating Sampling-Based GNN Training at Large Scale: Graph Neural Networks (GNNs) have shown great superiority on non-Euclidean graph data, achieving ground-breaking performance on various graph-related tasks. As a practical solution to train GNN on large graphs with billions of nodes and edges, the sampling-based training is widely adopted by existing training frameworks. However, through an in-depth analysis, we observe that the efficiency of existing sampling-based training frameworks is still limited due to the key bottlenecks lying in all three phases of sampling-based training, i.e., subgraph sample, memory IO, and computation. To this end, we propose FastGL, a GPU-efficient Framework for accelerating sampling-based training of GNN at Large scale by simultaneously optimizing all above three phases, taking into account both GPU characteristics and graph structure. Specifically, by exploiting the inherent overlap within graph structures, FastGL develops the Match-Reorder strategy to reduce the data traffic, which accelerates the memory IO without incurring any GPU memory overhead. Additionally, FastGL leverages a Memory-Aware computation method, harnessing the GPU memory's hierarchical nature to mitigate irregular data access during computation. FastGL further incorporates the Fused-Map approach aimed at diminishing the synchronization overhead during sampling. Extensive experiments demonstrate that FastGL can achieve an average speedup of 11.8x, 2.2x and 1.5x over the state-of-the-art frameworks PyG, DGL, and GNNLab, respectively.Our code is available at https://github.com/a1bc2def6g/fastgl-ae.<|reference_end|>
arxiv
@article{zhu2024fastgl:, title={FastGL: A GPU-Efficient Framework for Accelerating Sampling-Based GNN Training at Large Scale}, author={Zeyu Zhu, Peisong Wang, Qinghao Hu, Gang Li, Xiaoyao Liang, Jian Cheng}, journal={arXiv preprint arXiv:2409.14939}, year={2024}, doi={10.1145/3622781.3674167}, archivePrefix={arXiv}, eprint={2409.14939}, primaryClass={cs.LG cs.AR cs.DC} }
zhu2024fastgl:
arxiv-660744
2409.14940
Improving Adversarial Robustness for 3D Point Cloud Recognition at Test-Time through Purified Self-Training
<|reference_start|>Improving Adversarial Robustness for 3D Point Cloud Recognition at Test-Time through Purified Self-Training: Recognizing 3D point cloud plays a pivotal role in many real-world applications. However, deploying 3D point cloud deep learning model is vulnerable to adversarial attacks. Despite many efforts into developing robust model by adversarial training, they may become less effective against emerging attacks. This limitation motivates the development of adversarial purification which employs generative model to mitigate the impact of adversarial attacks. In this work, we highlight the remaining challenges from two perspectives. First, the purification based method requires retraining the classifier on purified samples which introduces additional computation overhead. Moreover, in a more realistic scenario, testing samples arrives in a streaming fashion and adversarial samples are not isolated from clean samples. These challenges motivates us to explore dynamically update model upon observing testing samples. We proposed a test-time purified self-training strategy to achieve this objective. Adaptive thresholding and feature distribution alignment are introduced to improve the robustness of self-training. Extensive results on different adversarial attacks suggest the proposed method is complementary to purification based method in handling continually changing adversarial attacks on the testing data stream.<|reference_end|>
arxiv
@article{lin2024improving, title={Improving Adversarial Robustness for 3D Point Cloud Recognition at Test-Time through Purified Self-Training}, author={Jinpeng Lin, Xulei Yang, Tianrui Li, Xun Xu}, journal={arXiv preprint arXiv:2409.14940}, year={2024}, archivePrefix={arXiv}, eprint={2409.14940}, primaryClass={cs.CV} }
lin2024improving
arxiv-660745
2409.14945
Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction
<|reference_start|>Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction: Recently, models for user representation learning have been widely applied in click-through-rate (CTR) and conversion-rate (CVR) prediction. Usually, the model learns a universal user representation as the input for subsequent scenario-specific models. However, in numerous industrial applications (e.g., recommendation and marketing), the business always operates such applications as various online activities among different user segmentation. These segmentation are always created by domain experts. Due to the difference in user distribution (i.e., user segmentation) and business objectives in subsequent tasks, learning solely on universal representation may lead to detrimental effects on both model performance and robustness. In this paper, we propose a novel learning framework that can first learn general universal user representation through information bottleneck. Then, merge and learn a segmentation-specific or a task-specific representation through neural interaction. We design the interactive learning process by leveraging a bipartite graph architecture to model the representation learning and merging between contextual clusters and each user segmentation. Our proposed method is evaluated in two open-source benchmarks, two offline business datasets, and deployed on two online marketing applications to predict users' CVR. The results demonstrate that our method can achieve superior performance and surpass the baseline methods.<|reference_end|>
arxiv
@article{tan2024adaptive, title={Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction}, author={Xiaoyu Tan, Yongxin Deng, Chao Qu, Siqiao Xue, Xiaoming Shi, James Zhang, Xihe Qiu}, journal={arXiv preprint arXiv:2409.14945}, year={2024}, doi={10.1145/3624918.3625323}, archivePrefix={arXiv}, eprint={2409.14945}, primaryClass={cs.LG cs.IR} }
tan2024adaptive
arxiv-660746
2409.14948
On the periodic decompositions of multidimensional configurations
<|reference_start|>On the periodic decompositions of multidimensional configurations: We consider $d$-dimensional configurations, that is, colorings of the $d$-dimensional integer grid $\mathbb{Z}^d$ with finitely many colors. Moreover, we interpret the colors as integers so that configurations are functions $\mathbb{Z}^d \to \mathbb{Z}$ of finite range. We say that such function is $k$-periodic if it is invariant under translations in $k$ linearly independent directions. It is known that if a configuration has a non-trivial annihilator, that is, if some non-trivial linear combination of its translations is the zero function, then it is a sum of finitely many periodic functions. This result is known as the periodic decomposition theorem. We prove two different improvements of it. The first improvement gives a characterization on annihilators of a configuration to guarantee the $k$-periodicity of the functions in its periodic decomposition -- for any $k$. The periodic decomposition theorem is then a special case of this result with $k=1$. The second improvement concerns so called sparse configurations for which the number of non-zero values in patterns grows at most linearly with respect to the diameter of the pattern. We prove that a sparse configuration with a non-trivial annihilator is a sum of finitely many periodic fibers where a fiber means a function whose non-zero values lie on a unique line.<|reference_end|>
arxiv
@article{herva2024on, title={On the periodic decompositions of multidimensional configurations}, author={Pyry Herva and Jarkko Kari}, journal={arXiv preprint arXiv:2409.14948}, year={2024}, archivePrefix={arXiv}, eprint={2409.14948}, primaryClass={cs.DM math.CO math.DS} }
herva2024on
arxiv-660747
2409.14950
Online Adaptation of Learned Vehicle Dynamics Model with Meta-Learning Approach
<|reference_start|>Online Adaptation of Learned Vehicle Dynamics Model with Meta-Learning Approach: We represent a vehicle dynamics model for autonomous driving near the limits of handling via a multi-layer neural network. Online adaptation is desirable in order to address unseen environments. However, the model needs to adapt to new environments without forgetting previously encountered ones. In this study, we apply Continual-MAML to overcome this difficulty. It enables the model to adapt to the previously encountered environments quickly and efficiently by starting updates from optimized initial parameters. We evaluate the impact of online model adaptation with respect to inference performance and impact on control performance of a model predictive path integral (MPPI) controller using the TRIKart platform. The neural network was pre-trained using driving data collected in our test environment, and experiments for online adaptation were executed on multiple different road conditions not contained in the training data. Empirical results show that the model using Continual-MAML outperforms the fixed model and the model using gradient descent in test set loss and online tracking performance of MPPI.<|reference_end|>
arxiv
@article{tsuchiya2024online, title={Online Adaptation of Learned Vehicle Dynamics Model with Meta-Learning Approach}, author={Yuki Tsuchiya, Thomas Balch, Paul Drews and Guy Rosman}, journal={arXiv preprint arXiv:2409.14950}, year={2024}, archivePrefix={arXiv}, eprint={2409.14950}, primaryClass={cs.RO} }
tsuchiya2024online
arxiv-660748
2409.14951
Robust Continuous Motion Strategy Against Muscle Rupture using Online Learning of Redundant Intersensory Networks for Musculoskeletal Humanoids
<|reference_start|>Robust Continuous Motion Strategy Against Muscle Rupture using Online Learning of Redundant Intersensory Networks for Musculoskeletal Humanoids: Musculoskeletal humanoids have various biomimetic advantages, of which redundant muscle arrangement is one of the most important features. This feature enables variable stiffness control and allows the robot to keep moving its joints even if one of the redundant muscles breaks, but this has been rarely explored. In this study, we construct a neural network that represents the relationship among sensors in the flexible and difficult-to-modelize body of the musculoskeletal humanoid, and by learning this neural network, accurate motions can be achieved. In order to take advantage of the redundancy of muscles, we discuss the use of this network for muscle rupture detection, online update of the intersensory relationship considering the muscle rupture, and body control and state estimation using the muscle rupture information. This study explains a method of constructing a musculoskeletal humanoid that continues to move and perform tasks robustly even when one muscle breaks.<|reference_end|>
arxiv
@article{kawaharazuka2024robust, title={Robust Continuous Motion Strategy Against Muscle Rupture using Online Learning of Redundant Intersensory Networks for Musculoskeletal Humanoids}, author={Kento Kawaharazuka, Manabu Nishiura, Yasunori Toshimitsu, Yusuke Omura, Yuya Koga, Yuki Asano, Koji Kawasaki, Masayuki Inaba}, journal={arXiv preprint arXiv:2409.14951}, year={2024}, doi={10.1016/j.robot.2022.104067}, archivePrefix={arXiv}, eprint={2409.14951}, primaryClass={cs.RO} }
kawaharazuka2024robust
arxiv-660749
2409.14952
The Laurent-Horner method for validated evaluation of Chebyshev expansions
<|reference_start|>The Laurent-Horner method for validated evaluation of Chebyshev expansions: We develop a simple two-step algorithm for enclosing Chebyshev expansions whose cost is linear in terms of the polynomial degree. The algorithm first transforms the expansion from Chebyshev to the Laurent basis and then applies the interval Horner method. It outperforms the existing eigenvalue-based methods if the degree is high or the evaluation point is close to the boundaries of the domain.<|reference_end|>
arxiv
@article{aurentz2024the, title={The Laurent-Horner method for validated evaluation of Chebyshev expansions}, author={Jared L. Aurentz and Behnam Hashemi}, journal={arXiv preprint arXiv:2409.14952}, year={2024}, doi={10.1016/j.aml.2019.106113}, archivePrefix={arXiv}, eprint={2409.14952}, primaryClass={math.NA cs.NA} }
aurentz2024the
arxiv-660750
2409.14953
MSARS: A Meta-Learning and Reinforcement Learning Framework for SLO Resource Allocation and Adaptive Scaling for Microservices
<|reference_start|>MSARS: A Meta-Learning and Reinforcement Learning Framework for SLO Resource Allocation and Adaptive Scaling for Microservices: Service Level Objectives (SLOs) aim to set threshold for service time in cloud services to ensure acceptable quality of service (QoS) and user satisfaction. Currently, many studies consider SLOs as a system resource to be allocated, ensuring QoS meets the SLOs. Existing microservice auto-scaling frameworks that rely on SLO resources often utilize complex and computationally intensive models, requiring significant time and resources to determine appropriate resource allocation. This paper aims to rapidly allocate SLO resources and minimize resource costs while ensuring application QoS meets the SLO requirements in a dynamically changing microservice environment. We propose MSARS, a framework that leverages meta-learning to quickly derive SLO resource allocation strategies and employs reinforcement learning for adaptive scaling of microservice resources. It features three innovative components: First, MSARS uses graph convolutional networks to predict the most suitable SLO resource allocation scheme for the current environment. Second, MSARS utilizes meta-learning to enable the graph neural network to quickly adapt to environmental changes ensuring adaptability in highly dynamic microservice environments. Third, MSARS generates auto-scaling policies for each microservice based on an improved Twin Delayed Deep Deterministic Policy Gradient (TD3) model. The adaptive auto-scaling policy integrates the SLO resource allocation strategy into the scheduling algorithm to satisfy SLOs. Finally, we compare MSARS with state-of-the-art resource auto-scaling algorithms that utilize neural networks and reinforcement learning, MSARS takes 40% less time to adapt to new environments, 38% reduction of SLO violations, and 8% less resources cost.<|reference_end|>
arxiv
@article{hu2024msars:, title={MSARS: A Meta-Learning and Reinforcement Learning Framework for SLO Resource Allocation and Adaptive Scaling for Microservices}, author={Kan Hu, Linfeng Wen, Minxian Xu, Kejiang Ye}, journal={arXiv preprint arXiv:2409.14953}, year={2024}, archivePrefix={arXiv}, eprint={2409.14953}, primaryClass={cs.DC} }
hu2024msars:
arxiv-660751
2409.14955
Efficient Collision Detection Framework for Enhancing Collision-Free Robot Motion
<|reference_start|>Efficient Collision Detection Framework for Enhancing Collision-Free Robot Motion: Fast and efficient collision detection is essential for motion generation in robotics. In this paper, we propose an efficient collision detection framework based on the Signed Distance Field (SDF) of robots, seamlessly integrated with a self-collision detection module. Firstly, we decompose the robot's SDF using forward kinematics and leverage multiple extremely lightweight networks in parallel to efficiently approximate the SDF. Moreover, we introduce support vector machines to integrate the self-collision detection module into the framework, which we refer to as the SDF-SC framework. Using statistical features, our approach unifies the representation of collision distance for both SDF and self-collision detection. During this process, we maintain and utilize the differentiable properties of the framework to optimize collision-free robot trajectories. Finally, we develop a reactive motion controller based on our framework, enabling real-time avoidance of multiple dynamic obstacles. While maintaining high accuracy, our framework achieves inference speeds up to five times faster than previous methods. Experimental results on the Franka robotic arm demonstrate the effectiveness of our approach.<|reference_end|>
arxiv
@article{zhu2024efficient, title={Efficient Collision Detection Framework for Enhancing Collision-Free Robot Motion}, author={Xiankun Zhu, Yucheng Xin, Shoujie Li, Houde Liu, Chongkun Xia, Bin Liang}, journal={arXiv preprint arXiv:2409.14955}, year={2024}, archivePrefix={arXiv}, eprint={2409.14955}, primaryClass={cs.RO} }
zhu2024efficient
arxiv-660752
2409.14961
UELLM: A Unified and Efficient Approach for LLM Inference Serving
<|reference_start|>UELLM: A Unified and Efficient Approach for LLM Inference Serving: In the context of Machine Learning as a Service (MLaaS) clouds, the extensive use of Large Language Models (LLMs) often requires efficient management of significant query loads. When providing real-time inference services, several challenges arise. Firstly, increasing the number of GPUs may lead to a decrease in inference speed due to heightened communication overhead, while an inadequate number of GPUs can lead to out-of-memory errors. Secondly, different deployment strategies need to be evaluated to guarantee optimal utilization and minimal inference latency. Lastly, inefficient orchestration of inference queries can easily lead to significant Service Level Objective (SLO) violations. Lastly, inefficient orchestration of inference queries can easily lead to significant Service Level Objective (SLO) violations. To address these challenges, we propose a Unified and Efficient approach for Large Language Model inference serving (UELLM), which consists of three main components: 1) resource profiler, 2) batch scheduler, and 3) LLM deployer. UELLM minimizes resource overhead, reduces inference latency, and lowers SLO violation rates. Compared with state-of-the-art (SOTA) techniques, UELLM reduces the inference latency by 72.3% to 90.3%, enhances GPU utilization by 1.2X to 4.1X, and increases throughput by 1.92X to 4.98X, it can also serve without violating the inference latency SLO.<|reference_end|>
arxiv
@article{he2024uellm:, title={UELLM: A Unified and Efficient Approach for LLM Inference Serving}, author={Yiyuan He, Minxian Xu, Jingfeng Wu, Wanyi Zheng, Kejiang Ye, Chengzhong Xu}, journal={arXiv preprint arXiv:2409.14961}, year={2024}, archivePrefix={arXiv}, eprint={2409.14961}, primaryClass={cs.DC} }
he2024uellm:
arxiv-660753
2409.14963
Exploring Fine-grained Retail Product Discrimination with Zero-shot Object Classification Using Vision-Language Models
<|reference_start|>Exploring Fine-grained Retail Product Discrimination with Zero-shot Object Classification Using Vision-Language Models: In smart retail applications, the large number of products and their frequent turnover necessitate reliable zero-shot object classification methods. The zero-shot assumption is essential to avoid the need for re-training the classifier every time a new product is introduced into stock or an existing product undergoes rebranding. In this paper, we make three key contributions. Firstly, we introduce the MIMEX dataset, comprising 28 distinct product categories. Unlike existing datasets in the literature, MIMEX focuses on fine-grained product classification and includes a diverse range of retail products. Secondly, we benchmark the zero-shot object classification performance of state-of-the-art vision-language models (VLMs) on the proposed MIMEX dataset. Our experiments reveal that these models achieve unsatisfactory fine-grained classification performance, highlighting the need for specialized approaches. Lastly, we propose a novel ensemble approach that integrates embeddings from CLIP and DINOv2 with dimensionality reduction techniques to enhance classification performance. By combining these components, our ensemble approach outperforms VLMs, effectively capturing visual cues crucial for fine-grained product discrimination. Additionally, we introduce a class adaptation method that utilizes visual prototyping with limited samples in scenarios with scarce labeled data, addressing a critical need in retail environments where product variety frequently changes. To encourage further research into zero-shot object classification for smart retail applications, we will release both the MIMEX dataset and benchmark to the research community. Interested researchers can contact the authors for details on the terms and conditions of use. The code is available: https://github.com/AnilOsmanTur/Zero-shot-Retail-Product-Classification.<|reference_end|>
arxiv
@article{tur2024exploring, title={Exploring Fine-grained Retail Product Discrimination with Zero-shot Object Classification Using Vision-Language Models}, author={Anil Osman Tur, Alessandro Conti, Cigdem Beyan, Davide Boscaini, Roberto Larcher, Stefano Messelodi, Fabio Poiesi, Elisa Ricci}, journal={arXiv preprint arXiv:2409.14963}, year={2024}, archivePrefix={arXiv}, eprint={2409.14963}, primaryClass={cs.CV} }
tur2024exploring
arxiv-660754
2409.14967
A Class of Countably Covered Two-Dimensional Sofic Shifts
<|reference_start|>A Class of Countably Covered Two-Dimensional Sofic Shifts: A multidimensional sofic shift is called countably covered if it has an SFT cover containing only countably many configurations. In contrast to the one-dimensional setting, not all countable sofic shifts are countably covered. We study a subclass of countable shift spaces and characterize the countably covered sofic shifts among them.<|reference_end|>
arxiv
@article{törmä2024a, title={A Class of Countably Covered Two-Dimensional Sofic Shifts}, author={Ilkka T"orm"a}, journal={arXiv preprint arXiv:2409.14967}, year={2024}, archivePrefix={arXiv}, eprint={2409.14967}, primaryClass={math.DS cs.FL} }
törmä2024a
arxiv-660755
2409.14968
Mutation-Based Deep Learning Framework Testing Method in JavaScript Environment
<|reference_start|>Mutation-Based Deep Learning Framework Testing Method in JavaScript Environment: In recent years, Deep Learning (DL) applications in JavaScript environment have become increasingly popular. As the infrastructure for DL applications, JavaScript DL frameworks play a crucial role in the development and deployment. It is essential to ensure the quality of JavaScript DL frameworks. However, the bottleneck of limited computational resources in the JavaScript environment brings new challenges to framework testing. Specifically, JavaScript DL frameworks are equipped with various optimization mechanisms (e.g., cache reuse, inference acceleration) to overcome the bottleneck of limited computational resources. These optimization mechanisms are overlooked by existing methods, resulting in many bugs in JavaScript DL frameworks being missed. To address the above challenges, we propose a mutation-based JavaScript DL framework testing method named DLJSFuzzer. DLJSFuzzer designs 13 tensor mutation rules targeting the cache reuse mechanism to generate test input tensors. Besides, DLJSFuzzer designs eight model mutation rules targeting the inference acceleration mechanism to generate test input models. To evaluate the effectiveness of DLJSFuzzer, we conduct experiments on the most widely-used JavaScript DL framework, TensorFlow.js. The experimental results show that DLJSFuzzer outperforms state-of-the-art methods in both effectiveness and efficiency. DLJSFuzzer successfully detects 21 unique crashes and 126 unique NaN & Inconsistency bugs. All detected crashes have been reported to the open-source community, with 12 of them already confirmed by developers. Additionally, DLJSFuzzer has improved by over 47% in model generation efficiency and over 91% in bug detection efficiency compared to all baselines.<|reference_end|>
arxiv
@article{zou2024mutation-based, title={Mutation-Based Deep Learning Framework Testing Method in JavaScript Environment}, author={Yinglong Zou, Juan Zhai, Chunrong Fang, Jiawei Liu, Tao Zheng, Zhenyu Chen}, journal={arXiv preprint arXiv:2409.14968}, year={2024}, archivePrefix={arXiv}, eprint={2409.14968}, primaryClass={cs.SE} }
zou2024mutation-based
arxiv-660756
2409.14969
Bilingual Rhetorical Structure Parsing with Large Parallel Annotations
<|reference_start|>Bilingual Rhetorical Structure Parsing with Large Parallel Annotations: Discourse parsing is a crucial task in natural language processing that aims to reveal the higher-level relations in a text. Despite growing interest in cross-lingual discourse parsing, challenges persist due to limited parallel data and inconsistencies in the Rhetorical Structure Theory (RST) application across languages and corpora. To address this, we introduce a parallel Russian annotation for the large and diverse English GUM RST corpus. Leveraging recent advances, our end-to-end RST parser achieves state-of-the-art results on both English and Russian corpora. It demonstrates effectiveness in both monolingual and bilingual settings, successfully transferring even with limited second-language annotation. To the best of our knowledge, this work is the first to evaluate the potential of cross-lingual end-to-end RST parsing on a manually annotated parallel corpus.<|reference_end|>
arxiv
@article{chistova2024bilingual, title={Bilingual Rhetorical Structure Parsing with Large Parallel Annotations}, author={Elena Chistova}, journal={Findings of the Association for Computational Linguistics ACL 2024}, year={2024}, doi={10.18653/v1/2024.findings-acl.577}, archivePrefix={arXiv}, eprint={2409.14969}, primaryClass={cs.CL} }
chistova2024bilingual
arxiv-660757
2409.14971
Blind Spatial Impulse Response Generation from Separate Room- and Scene-Specific Information
<|reference_start|>Blind Spatial Impulse Response Generation from Separate Room- and Scene-Specific Information: For audio in augmented reality (AR), knowledge of the users' real acoustic environment is crucial for rendering virtual sounds that seamlessly blend into the environment. As acoustic measurements are usually not feasible in practical AR applications, information about the room needs to be inferred from available sound sources. Then, additional sound sources can be rendered with the same room acoustic qualities. Crucially, these are placed at different positions than the sources available for estimation. Here, we propose to use an encoder network trained using a contrastive loss that maps input sounds to a low-dimensional feature space representing only room-specific information. Then, a diffusion-based spatial room impulse response generator is trained to take the latent space and generate a new response, given a new source-receiver position. We show how both room- and position-specific parameters are considered in the final output.<|reference_end|>
arxiv
@article{lluís2024blind, title={Blind Spatial Impulse Response Generation from Separate Room- and Scene-Specific Information}, author={Francesc Llu'is and Nils Meyer-Kahlen}, journal={arXiv preprint arXiv:2409.14971}, year={2024}, archivePrefix={arXiv}, eprint={2409.14971}, primaryClass={cs.SD cs.LG eess.AS} }
lluís2024blind
arxiv-660758
2409.14972
Deep Reinforcement Learning-based Obstacle Avoidance for Robot Movement in Warehouse Environments
<|reference_start|>Deep Reinforcement Learning-based Obstacle Avoidance for Robot Movement in Warehouse Environments: At present, in most warehouse environments, the accumulation of goods is complex, and the management personnel in the control of goods at the same time with the warehouse mobile robot trajectory interaction, the traditional mobile robot can not be very good on the goods and pedestrians to feed back the correct obstacle avoidance strategy, in order to control the mobile robot in the warehouse environment efficiently and friendly to complete the obstacle avoidance task, this paper proposes a deep reinforcement learning based on the warehouse environment, the mobile robot obstacle avoidance Algorithm. Firstly, for the insufficient learning ability of the value function network in the deep reinforcement learning algorithm, the value function network is improved based on the pedestrian interaction, the interaction information between pedestrians is extracted through the pedestrian angle grid, and the temporal features of individual pedestrians are extracted through the attention mechanism, so that we can learn to obtain the relative importance of the current state and the historical trajectory state as well as the joint impact on the robot's obstacle avoidance strategy, which provides an opportunity for the learning of multi-layer perceptual machines afterwards. Secondly, the reward function of reinforcement learning is designed based on the spatial behaviour of pedestrians, and the robot is punished for the state where the angle changes too much, so as to achieve the requirement of comfortable obstacle avoidance; Finally, the feasibility and effectiveness of the deep reinforcement learning-based mobile robot obstacle avoidance algorithm in the warehouse environment in the complex environment of the warehouse are verified through simulation experiments.<|reference_end|>
arxiv
@article{li2024deep, title={Deep Reinforcement Learning-based Obstacle Avoidance for Robot Movement in Warehouse Environments}, author={Keqin Li, Jiajing Chen, Denzhi Yu, Tao Dajun, Xinyu Qiu, Lian Jieting, Sun Baiwei, Zhang Shengyuan, Zhenyu Wan, Ran Ji, Bo Hong, Fanghao Ni}, journal={arXiv preprint arXiv:2409.14972}, year={2024}, archivePrefix={arXiv}, eprint={2409.14972}, primaryClass={cs.RO cs.AI} }
li2024deep
arxiv-660759
2409.14975
Unbiased third-party bots lead to a tradeoff between cooperation and social payoffs
<|reference_start|>Unbiased third-party bots lead to a tradeoff between cooperation and social payoffs: The rise of artificial intelligence (AI) offers new opportunities to influence cooperative dynamics with greater applicability and control. In this paper, we examine the impact of third-party bots--agents that do not directly participate in games but unbiasedly modify the payoffs of normal players engaged in prisoner's dilemma interactions--on the emergence of cooperation. Using an evolutionary simulation model, we demonstrate that unbiased bots are unable to shift the defective equilibrium among normal players in well-mixed populations. However, in structured populations, despite their unbiased actions, the bots spontaneously generate distinct impacts on cooperators and defectors, leading to enhanced cooperation. Notably, bots that apply negative influences are more effective at promoting cooperation than those applying positive ones, as fewer bots are needed to catalyze cooperative behavior among normal players. However, as the number of bots increases, a trade-off emerges: while cooperation is maintained, overall social payoffs decline. These findings highlight the need for careful management of AI's role in social systems, as even well-intentioned bots can have unintended consequences on collective outcomes.<|reference_end|>
arxiv
@article{he2024unbiased, title={Unbiased third-party bots lead to a tradeoff between cooperation and social payoffs}, author={Zhixue He, Chen Shen, Lei Shi and Jun Tanimoto}, journal={arXiv preprint arXiv:2409.14975}, year={2024}, archivePrefix={arXiv}, eprint={2409.14975}, primaryClass={physics.soc-ph cs.CY} }
he2024unbiased
arxiv-660760
2409.14976
A new baseline for edge detection: Make Encoder-Decoder great again
<|reference_start|>A new baseline for edge detection: Make Encoder-Decoder great again: The performance of deep learning based edge detector has far exceeded that of humans, but the huge computational cost and complex training strategy hinder its further development and application. In this paper, we eliminate these complexities with a vanilla encoder-decoder based detector. Firstly, we design a bilateral encoder to decouple the extraction process of location features and semantic features. Since the location branch no longer provides cues for the semantic branch, the richness of features can be further compressed, which is the key to make our model more compact. We propose a cascaded feature fusion decoder, where the location features are progressively refined by semantic features. The refined location features are the only basis for generating the edge map. The coarse original location features and semantic features are avoided from direct contact with the final result. So the noise in the location features and the location error in the semantic features can be suppressed in the generated edge map. The proposed New Baseline for Edge Detection (NBED) achieves superior performance consistently across multiple edge detection benchmarks, even compared with those methods with huge computational cost and complex training strategy. The ODS of NBED on BSDS500 is 0.838, achieving state-of-the-art performance. Our study shows that what really matters in the current edge detection is high-quality features, and we can make the encoder-decoder based detector great again even without complex training strategies and huge computational cost. The code is available at https://github.com/Li-yachuan/NBED.<|reference_end|>
arxiv
@article{li2024a, title={A new baseline for edge detection: Make Encoder-Decoder great again}, author={Yachuan Li, Xavier Soria Pomab, Yongke Xi, Guanlin Li, Chaozhi Yang, Qian Xiao, Yun Bai and Zongmin LI}, journal={arXiv preprint arXiv:2409.14976}, year={2024}, archivePrefix={arXiv}, eprint={2409.14976}, primaryClass={cs.CV} }
li2024a
arxiv-660761
2409.14978
TS-TCD: Triplet-Level Cross-Modal Distillation for Time-Series Forecasting Using Large Language Models
<|reference_start|>TS-TCD: Triplet-Level Cross-Modal Distillation for Time-Series Forecasting Using Large Language Models: In recent years, large language models (LLMs) have shown great potential in time-series analysis by capturing complex dependencies and improving predictive performance. However, existing approaches often struggle with modality alignment, leading to suboptimal results. To address these challenges, we present a novel framework, TS-TCD, which introduces a comprehensive three-tiered cross-modal knowledge distillation mechanism. Unlike prior work that focuses on isolated alignment techniques, our framework systematically integrates: 1) Dynamic Adaptive Gating for Input Encoding and Alignment}, ensuring coherent alignment between time-series tokens and QR-decomposed textual embeddings; 2) Layer-Wise Contrastive Learning}, aligning intermediate representations across modalities to reduce feature-level discrepancies; and 3) Optimal Transport-Driven Output Alignment}, which ensures consistent output predictions through fine-grained cross-modal alignment. Extensive experiments on benchmark time-series datasets demonstrate that TS-TCD achieves state-of-the-art results, outperforming traditional methods in both accuracy and robustness.<|reference_end|>
arxiv
@article{wang2024ts-tcd:, title={TS-TCD: Triplet-Level Cross-Modal Distillation for Time-Series Forecasting Using Large Language Models}, author={Pengfei Wang, Huanran Zheng, Silong Dai, Wenjing Yue, Wei Zhu, and Xiaoling Wang}, journal={arXiv preprint arXiv:2409.14978}, year={2024}, archivePrefix={arXiv}, eprint={2409.14978}, primaryClass={cs.AI} }
wang2024ts-tcd:
arxiv-660762
2409.14979
A DOFs condensation based algorithm for solving saddle point systems in contact computation
<|reference_start|>A DOFs condensation based algorithm for solving saddle point systems in contact computation: In contact mechanics computation, the constraint conditions on the contact surfaces are typically enforced by the Lagrange multiplier method, resulting in a saddle point system. The mortar finite element method is usually employed to discretize the variational form on the meshed contact surfaces, leading to a large-scale discretized saddle point system. Due to the indefiniteness of the discretized system, it is a challenge to solve the saddle point algebraic system. For two-dimensional tied contact problem, an efficient DOFs condensation technique is developed. The essential of the proposed method is to carry out the DOFs elimination by using the tridiagonal characteristic of the mortar matrix. The scale of the linear system obtained after DOFs elimination is smaller, and the matrix is symmetric positive definite. By using the preconditioned conjugate gradient (PCG) method, the linear system can be solved efficiently. Numerical results show the effectiveness of the method.<|reference_end|>
arxiv
@article{duan2024a, title={A DOFs condensation based algorithm for solving saddle point systems in contact computation}, author={Xiaoyu Duan, Hengbin An, Zeyao Mo}, journal={arXiv preprint arXiv:2409.14979}, year={2024}, archivePrefix={arXiv}, eprint={2409.14979}, primaryClass={math.NA cs.NA} }
duan2024a
arxiv-660763
2409.14980
(De)-regularized Maximum Mean Discrepancy Gradient Flow
<|reference_start|>(De)-regularized Maximum Mean Discrepancy Gradient Flow: We introduce a (de)-regularization of the Maximum Mean Discrepancy (DrMMD) and its Wasserstein gradient flow. Existing gradient flows that transport samples from source distribution to target distribution with only target samples, either lack tractable numerical implementation ($f$-divergence flows) or require strong assumptions, and modifications such as noise injection, to ensure convergence (Maximum Mean Discrepancy flows). In contrast, DrMMD flow can simultaneously (i) guarantee near-global convergence for a broad class of targets in both continuous and discrete time, and (ii) be implemented in closed form using only samples. The former is achieved by leveraging the connection between the DrMMD and the $\chi^2$-divergence, while the latter comes by treating DrMMD as MMD with a de-regularized kernel. Our numerical scheme uses an adaptive de-regularization schedule throughout the flow to optimally trade off between discretization errors and deviations from the $\chi^2$ regime. The potential application of the DrMMD flow is demonstrated across several numerical experiments, including a large-scale setting of training student/teacher networks.<|reference_end|>
arxiv
@article{chen2024(de)-regularized, title={(De)-regularized Maximum Mean Discrepancy Gradient Flow}, author={Zonghao Chen, Aratrika Mustafi, Pierre Glaser, Anna Korba, Arthur Gretton, Bharath K. Sriperumbudur}, journal={arXiv preprint arXiv:2409.14980}, year={2024}, archivePrefix={arXiv}, eprint={2409.14980}, primaryClass={stat.ML cs.LG} }
chen2024(de)-regularized
arxiv-660764
2409.14981
On The Specialization of Neural Modules
<|reference_start|>On The Specialization of Neural Modules: A number of machine learning models have been proposed with the goal of achieving systematic generalization: the ability to reason about new situations by combining aspects of previous experiences. These models leverage compositional architectures which aim to learn specialized modules dedicated to structures in a task that can be composed to solve novel problems with similar structures. While the compositionality of these architectures is guaranteed by design, the modules specializing is not. Here we theoretically study the ability of network modules to specialize to useful structures in a dataset and achieve systematic generalization. To this end we introduce a minimal space of datasets motivated by practical systematic generalization benchmarks. From this space of datasets we present a mathematical definition of systematicity and study the learning dynamics of linear neural modules when solving components of the task. Our results shed light on the difficulty of module specialization, what is required for modules to successfully specialize, and the necessity of modular architectures to achieve systematicity. Finally, we confirm that the theoretical results in our tractable setting generalize to more complex datasets and non-linear architectures.<|reference_end|>
arxiv
@article{jarvis2024on, title={On The Specialization of Neural Modules}, author={Devon Jarvis, Richard Klein, Benjamin Rosman and Andrew M. Saxe}, journal={arXiv preprint arXiv:2409.14981}, year={2024}, archivePrefix={arXiv}, eprint={2409.14981}, primaryClass={cs.LG cs.AI} }
jarvis2024on
arxiv-660765
2409.14983
Dynamic Integration of Task-Specific Adapters for Class Incremental Learning
<|reference_start|>Dynamic Integration of Task-Specific Adapters for Class Incremental Learning: Non-exemplar class Incremental Learning (NECIL) enables models to continuously acquire new classes without retraining from scratch and storing old task exemplars, addressing privacy and storage issues. However, the absence of data from earlier tasks exacerbates the challenge of catastrophic forgetting in NECIL. In this paper, we propose a novel framework called Dynamic Integration of task-specific Adapters (DIA), which comprises two key components: Task-Specific Adapter Integration (TSAI) and Patch-Level Model Alignment. TSAI boosts compositionality through a patch-level adapter integration strategy, which provides a more flexible compositional solution while maintaining low computation costs. Patch-Level Model Alignment maintains feature consistency and accurate decision boundaries via two specialized mechanisms: Patch-Level Distillation Loss (PDL) and Patch-Level Feature Reconstruction method (PFR). Specifically, the PDL preserves feature-level consistency between successive models by implementing a distillation loss based on the contributions of patch tokens to new class learning. The PFR facilitates accurate classifier alignment by reconstructing old class features from previous tasks that adapt to new task knowledge. Extensive experiments validate the effectiveness of our DIA, revealing significant improvements on benchmark datasets in the NECIL setting, maintaining an optimal balance between computational complexity and accuracy. The full code implementation will be made publicly available upon the publication of this paper.<|reference_end|>
arxiv
@article{li2024dynamic, title={Dynamic Integration of Task-Specific Adapters for Class Incremental Learning}, author={Jiashuo Li, Shaokun Wang, Bo Qian, Yuhang He, Xing Wei, Yihong Gong}, journal={arXiv preprint arXiv:2409.14983}, year={2024}, archivePrefix={arXiv}, eprint={2409.14983}, primaryClass={cs.CV cs.AI cs.LG} }
li2024dynamic
arxiv-660766
2409.14984
SocialCircle+: Learning the Angle-based Conditioned Interaction Representation for Pedestrian Trajectory Prediction
<|reference_start|>SocialCircle+: Learning the Angle-based Conditioned Interaction Representation for Pedestrian Trajectory Prediction: Trajectory prediction is a crucial aspect of understanding human behaviors. Researchers have made efforts to represent socially interactive behaviors among pedestrians and utilize various networks to enhance prediction capability. Unfortunately, they still face challenges not only in fully explaining and measuring how these interactive behaviors work to modify trajectories but also in modeling pedestrians' preferences to plan or participate in social interactions in response to the changeable physical environments as extra conditions. This manuscript mainly focuses on the above explainability and conditionality requirements for trajectory prediction networks. Inspired by marine animals perceiving other companions and the environment underwater by echolocation, this work constructs an angle-based conditioned social interaction representation SocialCircle+ to represent the socially interactive context and its corresponding conditions. It employs a social branch and a conditional branch to describe how pedestrians are positioned in prediction scenes socially and physically in angle-based-cyclic-sequence forms. Then, adaptive fusion is applied to fuse the above conditional clues onto the social ones to learn the final interaction representation. Experiments demonstrate the superiority of SocialCircle+ with different trajectory prediction backbones. Moreover, counterfactual interventions have been made to simultaneously verify the modeling capacity of causalities among interactive variables and the conditioning capability.<|reference_end|>
arxiv
@article{wong2024socialcircle+:, title={SocialCircle+: Learning the Angle-based Conditioned Interaction Representation for Pedestrian Trajectory Prediction}, author={Conghao Wong, Beihao Xia, Ziqian Zou, Xinge You}, journal={arXiv preprint arXiv:2409.14984}, year={2024}, archivePrefix={arXiv}, eprint={2409.14984}, primaryClass={cs.CV} }
wong2024socialcircle+:
arxiv-660767
2409.14985
Sparse-to-Dense LiDAR Point Generation by LiDAR-Camera Fusion for 3D Object Detection
<|reference_start|>Sparse-to-Dense LiDAR Point Generation by LiDAR-Camera Fusion for 3D Object Detection: Accurately detecting objects at long distances remains a critical challenge in 3D object detection when relying solely on LiDAR sensors due to the inherent limitations of data sparsity. To address this issue, we propose the LiDAR-Camera Augmentation Network (LCANet), a novel framework that reconstructs LiDAR point cloud data by fusing 2D image features, which contain rich semantic information, generating additional points to improve detection accuracy. LCANet fuses data from LiDAR sensors and cameras by projecting image features into the 3D space, integrating semantic information into the point cloud data. This fused data is then encoded to produce 3D features that contain both semantic and spatial information, which are further refined to reconstruct final points before bounding box prediction. This fusion effectively compensates for LiDAR's weakness in detecting objects at long distances, which are often represented by sparse points. Additionally, due to the sparsity of many objects in the original dataset, which makes effective supervision for point generation challenging, we employ a point cloud completion network to create a complete point cloud dataset that supervises the generation of dense point clouds in our network. Extensive experiments on the KITTI and Waymo datasets demonstrate that LCANet significantly outperforms existing models, particularly in detecting sparse and distant objects.<|reference_end|>
arxiv
@article{lee2024sparse-to-dense, title={Sparse-to-Dense LiDAR Point Generation by LiDAR-Camera Fusion for 3D Object Detection}, author={Minseung Lee, Seokha Moon, Seung Joon Lee and Jinkyu Kim}, journal={arXiv preprint arXiv:2409.14985}, year={2024}, archivePrefix={arXiv}, eprint={2409.14985}, primaryClass={cs.CV cs.AI} }
lee2024sparse-to-dense
arxiv-660768
2409.14986
Evaluating Theory of (an uncertain) Mind: Predicting the Uncertain Beliefs of Others in Conversation Forecasting
<|reference_start|>Evaluating Theory of (an uncertain) Mind: Predicting the Uncertain Beliefs of Others in Conversation Forecasting: Typically, when evaluating Theory of Mind, we consider the beliefs of others to be binary: held or not held. But what if someone is unsure about their own beliefs? How can we quantify this uncertainty? We propose a new suite of tasks, challenging language models (LMs) to model the uncertainty of others in dialogue. We design these tasks around conversation forecasting, wherein an agent forecasts an unobserved outcome to a conversation. Uniquely, we view interlocutors themselves as forecasters, asking an LM to predict the uncertainty of the interlocutors (a probability). We experiment with re-scaling methods, variance reduction strategies, and demographic context, for this regression task, conducting experiments on three dialogue corpora (social, negotiation, task-oriented) with eight LMs. While LMs can explain up to 7% variance in the uncertainty of others, we highlight the difficulty of the tasks and room for future work, especially in practical applications, like anticipating ``false<|reference_end|>
arxiv
@article{sicilia2024evaluating, title={Evaluating Theory of (an uncertain) Mind: Predicting the Uncertain Beliefs of Others in Conversation Forecasting}, author={Anthony Sicilia and Malihe Alikhani}, journal={arXiv preprint arXiv:2409.14986}, year={2024}, archivePrefix={arXiv}, eprint={2409.14986}, primaryClass={cs.CL cs.AI} }
sicilia2024evaluating
arxiv-660769
2409.14987
A Comparative Quality Metric for Untargeted Fuzzing with Logic State Coverage
<|reference_start|>A Comparative Quality Metric for Untargeted Fuzzing with Logic State Coverage: While fuzzing is widely accepted as an efficient program testing technique, it is still unclear how to measure the comparative quality of different fuzzers. The current de facto quality metrics are edge coverage and the number of discovered bugs, but they are frequently discredited by inconclusive, exaggerated, or even counter-intuitive results. To establish a more reliable quality metric, we first note that fuzzing aims to reduce the number of unknown abnormal behaviors by observing more interesting (i.e., relating to unknown abnormal) behaviors. The more interesting behaviors a fuzzer has observed, the stronger guarantee it can provide about the absence of unknown abnormal behaviors. This suggests that the number of observed interesting behaviors must directly indicate the fuzzing quality. In this work, we propose logic state coverage as a proxy metric to count observed interesting behaviors. A logic state is a set of satisfied branches during one execution, where its coverage is the count of individual observed logic states during a fuzzing campaign. A logic state distinguishes less repetitive (i.e., more interesting) behaviors in a finer granularity, making the amount of logic state coverage reliably proportional to the number of observed interesting behaviors. We implemented logic state coverage using a bloom filter and performed a preliminary evaluation with AFL++ and XMLLint.<|reference_end|>
arxiv
@article{lee2024a, title={A Comparative Quality Metric for Untargeted Fuzzing with Logic State Coverage}, author={Gwangmu Lee}, journal={arXiv preprint arXiv:2409.14987}, year={2024}, archivePrefix={arXiv}, eprint={2409.14987}, primaryClass={cs.SE cs.CR} }
lee2024a
arxiv-660770
2409.14988
Beyond Fine-tuning: Unleashing the Potential of Continuous Pretraining for Clinical LLMs
<|reference_start|>Beyond Fine-tuning: Unleashing the Potential of Continuous Pretraining for Clinical LLMs: Large Language Models (LLMs) have demonstrated significant potential in transforming clinical applications. In this study, we investigate the efficacy of four techniques in adapting LLMs for clinical use-cases: continuous pretraining, instruct fine-tuning, NEFTune, and prompt engineering. We employ these methods on Mistral 7B and Mixtral 8x7B models, leveraging a large-scale clinical pretraining dataset of 50 billion tokens and an instruct fine-tuning dataset of 500 million tokens. Our evaluation across various clinical tasks reveals the impact of each technique. While continuous pretraining beyond 250 billion tokens yields marginal improvements on its own, it establishes a strong foundation for instruct fine-tuning. Notably, NEFTune, designed primarily to enhance generation quality, surprisingly demonstrates additional gains on our benchmark. Complex prompt engineering methods further enhance performance. These findings show the importance of tailoring fine-tuning strategies and exploring innovative techniques to optimize LLM performance in the clinical domain.<|reference_end|>
arxiv
@article{christophe2024beyond, title={Beyond Fine-tuning: Unleashing the Potential of Continuous Pretraining for Clinical LLMs}, author={Cl'ement Christophe, Tathagata Raha, Svetlana Maslenkova, Muhammad Umar Salman, Praveen K Kanithi, Marco AF Pimentel, Shadab Khan}, journal={arXiv preprint arXiv:2409.14988}, year={2024}, archivePrefix={arXiv}, eprint={2409.14988}, primaryClass={cs.CL} }
christophe2024beyond
arxiv-660771
2409.14989
Methods for Convex $(L_0,L_1)$-Smooth Optimization: Clipping, Acceleration, and Adaptivity
<|reference_start|>Methods for Convex $(L_0,L_1)$-Smooth Optimization: Clipping, Acceleration, and Adaptivity: Due to the non-smoothness of optimization problems in Machine Learning, generalized smoothness assumptions have been gaining a lot of attention in recent years. One of the most popular assumptions of this type is $(L_0,L_1)$-smoothness (Zhang et al., 2020). In this paper, we focus on the class of (strongly) convex $(L_0,L_1)$-smooth functions and derive new convergence guarantees for several existing methods. In particular, we derive improved convergence rates for Gradient Descent with (Smoothed) Gradient Clipping and for Gradient Descent with Polyak Stepsizes. In contrast to the existing results, our rates do not rely on the standard smoothness assumption and do not suffer from the exponential dependency from the initial distance to the solution. We also extend these results to the stochastic case under the over-parameterization assumption, propose a new accelerated method for convex $(L_0,L_1)$-smooth optimization, and derive new convergence rates for Adaptive Gradient Descent (Malitsky and Mishchenko, 2020).<|reference_end|>
arxiv
@article{gorbunov2024methods, title={Methods for Convex $(L_0,L_1)$-Smooth Optimization: Clipping, Acceleration, and Adaptivity}, author={Eduard Gorbunov, Nazarii Tupitsa, Sayantan Choudhury, Alen Aliev, Peter Richt'arik, Samuel Horv'ath, Martin Tak'av{c}}, journal={arXiv preprint arXiv:2409.14989}, year={2024}, archivePrefix={arXiv}, eprint={2409.14989}, primaryClass={math.OC cs.LG} }
gorbunov2024methods
arxiv-660772
2409.14993
Multi-Modal Generative AI: Multi-modal LLM, Diffusion and Beyond
<|reference_start|>Multi-Modal Generative AI: Multi-modal LLM, Diffusion and Beyond: Multi-modal generative AI has received increasing attention in both academia and industry. Particularly, two dominant families of techniques are: i) The multi-modal large language model (MLLM) such as GPT-4V, which shows impressive ability for multi-modal understanding; ii) The diffusion model such as Sora, which exhibits remarkable multi-modal powers, especially with respect to visual generation. As such, one natural question arises: Is it possible to have a unified model for both understanding and generation? To answer this question, in this paper, we first provide a detailed review of both MLLM and diffusion models, including their probabilistic modeling procedure, multi-modal architecture design, and advanced applications to image/video large language models as well as text-to-image/video generation. Then, we discuss the two important questions on the unified model: i) whether the unified model should adopt the auto-regressive or diffusion probabilistic modeling, and ii) whether the model should utilize a dense architecture or the Mixture of Experts(MoE) architectures to better support generation and understanding, two objectives. We further provide several possible strategies for building a unified model and analyze their potential advantages and disadvantages. We also summarize existing large-scale multi-modal datasets for better model pretraining in the future. To conclude the paper, we present several challenging future directions, which we believe can contribute to the ongoing advancement of multi-modal generative AI.<|reference_end|>
arxiv
@article{chen2024multi-modal, title={Multi-Modal Generative AI: Multi-modal LLM, Diffusion and Beyond}, author={Hong Chen, Xin Wang, Yuwei Zhou, Bin Huang, Yipeng Zhang, Wei Feng, Houlun Chen, Zeyang Zhang, Siao Tang, and Wenwu Zhu}, journal={arXiv preprint arXiv:2409.14993}, year={2024}, archivePrefix={arXiv}, eprint={2409.14993}, primaryClass={cs.AI cs.CV} }
chen2024multi-modal
arxiv-660773
2409.14997
Enhancing Aspect-based Sentiment Analysis in Tourism Using Large Language Models and Positional Information
<|reference_start|>Enhancing Aspect-based Sentiment Analysis in Tourism Using Large Language Models and Positional Information: Aspect-Based Sentiment Analysis (ABSA) in tourism plays a significant role in understanding tourists' evaluations of specific aspects of attractions, which is crucial for driving innovation and development in the tourism industry. However, traditional pipeline models are afflicted by issues such as error propagation and incomplete extraction of sentiment elements. To alleviate this issue, this paper proposes an aspect-based sentiment analysis model, ACOS_LLM, for Aspect-Category-Opinion-Sentiment Quadruple Extraction (ACOSQE). The model comprises two key stages: auxiliary knowledge generation and ACOSQE. Firstly, Adalora is used to fine-tune large language models for generating high-quality auxiliary knowledge. To enhance model efficiency, Sparsegpt is utilized to compress the fine-tuned model to 50% sparsity. Subsequently, Positional information and sequence modeling are employed to achieve the ACOSQE task, with auxiliary knowledge and the original text as inputs. Experiments are conducted on both self-created tourism datasets and publicly available datasets, Rest15 and Rest16. Results demonstrate the model's superior performance, with an F1 improvement of 7.49% compared to other models on the tourism dataset. Additionally, there is an F1 improvement of 0.05% and 1.06% on the Rest15 and Rest16 datasets, respectively.<|reference_end|>
arxiv
@article{xu2024enhancing, title={Enhancing Aspect-based Sentiment Analysis in Tourism Using Large Language Models and Positional Information}, author={Chun Xu, Mengmeng Wang, Yan Ren, and Shaolin Zhu}, journal={arXiv preprint arXiv:2409.14997}, year={2024}, archivePrefix={arXiv}, eprint={2409.14997}, primaryClass={cs.CL} }
xu2024enhancing
arxiv-660774
2409.15004
ViBERTgrid BiLSTM-CRF: Multimodal Key Information Extraction from Unstructured Financial Documents
<|reference_start|>ViBERTgrid BiLSTM-CRF: Multimodal Key Information Extraction from Unstructured Financial Documents: Multimodal key information extraction (KIE) models have been studied extensively on semi-structured documents. However, their investigation on unstructured documents is an emerging research topic. The paper presents an approach to adapt a multimodal transformer (i.e., ViBERTgrid previously explored on semi-structured documents) for unstructured financial documents, by incorporating a BiLSTM-CRF layer. The proposed ViBERTgrid BiLSTM-CRF model demonstrates a significant improvement in performance (up to 2 percentage points) on named entity recognition from unstructured documents in financial domain, while maintaining its KIE performance on semi-structured documents. As an additional contribution, we publicly released token-level annotations for the SROIE dataset in order to pave the way for its use in multimodal sequence labeling models.<|reference_end|>
arxiv
@article{pala2024vibertgrid, title={ViBERTgrid BiLSTM-CRF: Multimodal Key Information Extraction from Unstructured Financial Documents}, author={Furkan Pala, Mehmet Yasin Akp{i}nar, Onur Deniz, G"ulc{s}en Eryiu{g}it}, journal={arXiv preprint arXiv:2409.15004}, year={2024}, archivePrefix={arXiv}, eprint={2409.15004}, primaryClass={cs.AI cs.CL cs.CV cs.IR} }
pala2024vibertgrid
arxiv-660775
2409.15005
Method of Equal Shares with Bounded Overspending
<|reference_start|>Method of Equal Shares with Bounded Overspending: In participatory budgeting (PB), voters decide through voting which subset of projects to fund within a given budget. Proportionality in the context of PB is crucial to ensure equal treatment of all groups of voters. However, pure proportional rules can sometimes lead to suboptimal outcomes. We introduce the Method of Equal Shares with Bounded Overspending (BOS Equal Shares), a robust variant of Equal Shares that balances proportionality and efficiency. BOS Equal Shares addresses inefficiencies inherent in strict proportionality guarantees yet still provides good proportionality similar to the original Method of Equal Shares. In the course of the analysis, we also discuss a fractional variant of the method which allows for partial funding of projects.<|reference_end|>
arxiv
@article{papasotiropoulos2024method, title={Method of Equal Shares with Bounded Overspending}, author={Georgios Papasotiropoulos, Seyedeh Zeinab Pishbin, Oskar Skibski, Piotr Skowron, Tomasz Wk{a}s}, journal={arXiv preprint arXiv:2409.15005}, year={2024}, archivePrefix={arXiv}, eprint={2409.15005}, primaryClass={cs.GT cs.AI cs.MA} }
papasotiropoulos2024method
arxiv-660776
2409.15006
Generalizing monocular colonoscopy image depth estimation by uncertainty-based global and local fusion network
<|reference_start|>Generalizing monocular colonoscopy image depth estimation by uncertainty-based global and local fusion network: Objective: Depth estimation is crucial for endoscopic navigation and manipulation, but obtaining ground-truth depth maps in real clinical scenarios, such as the colon, is challenging. This study aims to develop a robust framework that generalizes well to real colonoscopy images, overcoming challenges like non-Lambertian surface reflection and diverse data distributions. Methods: We propose a framework combining a convolutional neural network (CNN) for capturing local features and a Transformer for capturing global information. An uncertainty-based fusion block was designed to enhance generalization by identifying complementary contributions from the CNN and Transformer branches. The network can be trained with simulated datasets and generalize directly to unseen clinical data without any fine-tuning. Results: Our method is validated on multiple datasets and demonstrates an excellent generalization ability across various datasets and anatomical structures. Furthermore, qualitative analysis in real clinical scenarios confirmed the robustness of the proposed method. Conclusion: The integration of local and global features through the CNN-Transformer architecture, along with the uncertainty-based fusion block, improves depth estimation performance and generalization in both simulated and real-world endoscopic environments. Significance: This study offers a novel approach to estimate depth maps for endoscopy images despite the complex conditions in clinic, serving as a foundation for endoscopic automatic navigation and other clinical tasks, such as polyp detection and segmentation.<|reference_end|>
arxiv
@article{du2024generalizing, title={Generalizing monocular colonoscopy image depth estimation by uncertainty-based global and local fusion network}, author={Sijia Du, Chengfeng Zhou, Suncheng Xiang, Jianwei Xu, Dahong Qian}, journal={arXiv preprint arXiv:2409.15006}, year={2024}, archivePrefix={arXiv}, eprint={2409.15006}, primaryClass={cs.CV cs.AI} }
du2024generalizing
arxiv-660777
2409.15008
Sketched Lanczos uncertainty score: a low-memory summary of the Fisher information
<|reference_start|>Sketched Lanczos uncertainty score: a low-memory summary of the Fisher information: Current uncertainty quantification is memory and compute expensive, which hinders practical uptake. To counter, we develop Sketched Lanczos Uncertainty (SLU): an architecture-agnostic uncertainty score that can be applied to pre-trained neural networks with minimal overhead. Importantly, the memory use of SLU only grows logarithmically with the number of model parameters. We combine Lanczos' algorithm with dimensionality reduction techniques to compute a sketch of the leading eigenvectors of a matrix. Applying this novel algorithm to the Fisher information matrix yields a cheap and reliable uncertainty score. Empirically, SLU yields well-calibrated uncertainties, reliably detects out-of-distribution examples, and consistently outperforms existing methods in the low-memory regime.<|reference_end|>
arxiv
@article{miani2024sketched, title={Sketched Lanczos uncertainty score: a low-memory summary of the Fisher information}, author={Marco Miani, Lorenzo Beretta, S{o}ren Hauberg}, journal={arXiv preprint arXiv:2409.15008}, year={2024}, archivePrefix={arXiv}, eprint={2409.15008}, primaryClass={math.NA cs.NA} }
miani2024sketched
arxiv-660778
2409.15010
DepthART: Monocular Depth Estimation as Autoregressive Refinement Task
<|reference_start|>DepthART: Monocular Depth Estimation as Autoregressive Refinement Task: Despite recent success in discriminative approaches in monocular depth estimation its quality remains limited by training datasets. Generative approaches mitigate this issue by leveraging strong priors derived from training on internet-scale datasets. Recent studies have demonstrated that large text-to-image diffusion models achieve state-of-the-art results in depth estimation when fine-tuned on small depth datasets. Concurrently, autoregressive generative approaches, such as the Visual AutoRegressive modeling~(VAR), have shown promising results in conditioned image synthesis. Following the visual autoregressive modeling paradigm, we introduce the first autoregressive depth estimation model based on the visual autoregressive transformer. Our primary contribution is DepthART -- a novel training method formulated as Depth Autoregressive Refinement Task. Unlike the original VAR training procedure, which employs static targets, our method utilizes a dynamic target formulation that enables model self-refinement and incorporates multi-modal guidance during training. Specifically, we use model predictions as inputs instead of ground truth token maps during training, framing the objective as residual minimization. Our experiments demonstrate that the proposed training approach significantly outperforms visual autoregressive modeling via next-scale prediction in the depth estimation task. The Visual Autoregressive Transformer trained with our approach on Hypersim achieves superior results on a set of unseen benchmarks compared to other generative and discriminative baselines.<|reference_end|>
arxiv
@article{gabdullin2024depthart:, title={DepthART: Monocular Depth Estimation as Autoregressive Refinement Task}, author={Bulat Gabdullin, Nina Konovalova, Nikolay Patakin, Dmitry Senushkin, Anton Konushin}, journal={arXiv preprint arXiv:2409.15010}, year={2024}, archivePrefix={arXiv}, eprint={2409.15010}, primaryClass={cs.CV} }
gabdullin2024depthart:
arxiv-660779
2409.15012
Inference-Friendly Models With MixAttention
<|reference_start|>Inference-Friendly Models With MixAttention: The size of the key-value (KV) cache plays a critical role in determining both the maximum context length and the number of concurrent requests supported during inference in modern language models. The KV cache size grows proportionally with the number of attention heads and the tokens processed, leading to increased memory consumption and slower inference for long inputs. In this work, we explore the use of MixAttention, a model architecture modification closely related to a blog published by Character.AI. MixAttention combines sliding window attention, where only a small subset of recent tokens is stored in the KV cache, with KV cache sharing across layers. Our experiments demonstrate that MixAttention significantly reduces memory usage and improves inference speed without sacrificing model performance in both short and long-context tasks. We also explore various configurations of this architecture, identifying those that maintain quality across evaluation metrics while optimizing resource efficiency.<|reference_end|>
arxiv
@article{rajput2024inference-friendly, title={Inference-Friendly Models With MixAttention}, author={Shashank Rajput, Ying Sheng, Sean Owen, Vitaliy Chiley}, journal={arXiv preprint arXiv:2409.15012}, year={2024}, archivePrefix={arXiv}, eprint={2409.15012}, primaryClass={cs.CL cs.AI} }
rajput2024inference-friendly
arxiv-660780
2409.15013
Analogous Alignments: Digital "Formally" meets Analog
<|reference_start|>Analogous Alignments: Digital "Formally" meets Analog: The complexity of modern-day System-on-Chips (SoCs) is continually increasing, and it becomes increasingly challenging to deliver dependable and credible chips in a short time-to-market. Especially, in the case of test chips, where the aim is to study the feasibility of the design, time is a crucial factor. Pre-silicon functional verification is one of the main contributors that makes up a large portion of the product development cycle. Verification engineers often loosely verify test chips that turn out to be non-functional on the silicon, ultimately resulting in expensive re-spins. To left-shift the verification efforts, formal verification is a powerful methodology that aims to exhaustively verify designs, giving better confidence in the overall quality. This paper focuses on the pragmatic formal verification of a mixed signal Intellectual Property (IP) that has a combination of digital and analog blocks. This paper discusses a novel approach of including the analog behavioral model into the formal verification setup. Digital and Analog Mixed-Signal (AMS) designs, which are fundamentally different in nature, are integrated seamlessly in a formal verification setup, a concept that can be referred to as "Analogous Alignments". Our formal setup leverages powerful formal techniques such as FPV, CSR verification, and connectivity checks. The properties used for FPV are auto-generated using a metamodeling framework. The paper also discusses the challenges faced especially related to state-space explosion, non-compatibility of formal with AMS models, and techniques to mitigate them such as k-induction. With this verification approach, we were able to exhaustively verify the design within a reasonable time and with sufficient coverage. We also reported several bugs at an early stage, making the complete design verification process iterative and effective.<|reference_end|>
arxiv
@article{mohanty2024analogous, title={Analogous Alignments: Digital "Formally" meets Analog}, author={Hansa Mohanty and Deepak Narayan Gadde}, journal={arXiv preprint arXiv:2409.15013}, year={2024}, archivePrefix={arXiv}, eprint={2409.15013}, primaryClass={cs.AI cs.AR} }
mohanty2024analogous
arxiv-660781
2409.15014
Acting for the Right Reasons: Creating Reason-Sensitive Artificial Moral Agents
<|reference_start|>Acting for the Right Reasons: Creating Reason-Sensitive Artificial Moral Agents: We propose an extension of the reinforcement learning architecture that enables moral decision-making of reinforcement learning agents based on normative reasons. Central to this approach is a reason-based shield generator yielding a moral shield that binds the agent to actions that conform with recognized normative reasons so that our overall architecture restricts the agent to actions that are (internally) morally justified. In addition, we describe an algorithm that allows to iteratively improve the reason-based shield generator through case-based feedback from a moral judge.<|reference_end|>
arxiv
@article{baum2024acting, title={Acting for the Right Reasons: Creating Reason-Sensitive Artificial Moral Agents}, author={Kevin Baum, Lisa Dargasz, Felix Jahn, Timo P. Gros, Verena Wolf}, journal={arXiv preprint arXiv:2409.15014}, year={2024}, archivePrefix={arXiv}, eprint={2409.15014}, primaryClass={cs.AI cs.CY cs.LG} }
baum2024acting
arxiv-660782
2409.15019
Evaluating Synthetic Activations composed of SAE Latents in GPT-2
<|reference_start|>Evaluating Synthetic Activations composed of SAE Latents in GPT-2: Sparse Auto-Encoders (SAEs) are commonly employed in mechanistic interpretability to decompose the residual stream into monosemantic SAE latents. Recent work demonstrates that perturbing a model's activations at an early layer results in a step-function-like change in the model's final layer activations. Furthermore, the model's sensitivity to this perturbation differs between model-generated (real) activations and random activations. In our study, we assess model sensitivity in order to compare real activations to synthetic activations composed of SAE latents. Our findings indicate that synthetic activations closely resemble real activations when we control for the sparsity and cosine similarity of the constituent SAE latents. This suggests that real activations cannot be explained by a simple "bag of SAE latents" lacking internal structure, and instead suggests that SAE latents possess significant geometric and statistical properties. Notably, we observe that our synthetic activations exhibit less pronounced activation plateaus compared to those typically surrounding real activations.<|reference_end|>
arxiv
@article{giglemiani2024evaluating, title={Evaluating Synthetic Activations composed of SAE Latents in GPT-2}, author={Giorgi Giglemiani, Nora Petrova, Chatrik Singh Mangat, Jett Janiak, Stefan Heimersheim}, journal={arXiv preprint arXiv:2409.15019}, year={2024}, archivePrefix={arXiv}, eprint={2409.15019}, primaryClass={cs.LG} }
giglemiani2024evaluating
arxiv-660783
2409.15021
Cross Branch Feature Fusion Decoder for Consistency Regularization-based Semi-Supervised Change Detection
<|reference_start|>Cross Branch Feature Fusion Decoder for Consistency Regularization-based Semi-Supervised Change Detection: Semi-supervised change detection (SSCD) utilizes partially labeled data and a large amount of unlabeled data to detect changes. However, the transformer-based SSCD network does not perform as well as the convolution-based SSCD network due to the lack of labeled data. To overcome this limitation, we introduce a new decoder called Cross Branch Feature Fusion CBFF, which combines the strengths of both local convolutional branch and global transformer branch. The convolutional branch is easy to learn and can produce high-quality features with a small amount of labeled data. The transformer branch, on the other hand, can extract global context features but is hard to learn without a lot of labeled data. Using CBFF, we build our SSCD model based on a strong-to-weak consistency strategy. Through comprehensive experiments on WHU-CD and LEVIR-CD datasets, we have demonstrated the superiority of our method over seven state-of-the-art SSCD methods.<|reference_end|>
arxiv
@article{xing2024cross, title={Cross Branch Feature Fusion Decoder for Consistency Regularization-based Semi-Supervised Change Detection}, author={Yan Xing, Qi'ao Xu, Jingcheng Zeng, Rui Huang, Sihua Gao, Weifeng Xu, Yuxiang Zhang, Wei Fan}, journal={arXiv preprint arXiv:2409.15021}, year={2024}, doi={10.1109/ICASSP48485.2024.10446862}, archivePrefix={arXiv}, eprint={2409.15021}, primaryClass={cs.CV} }
xing2024cross
arxiv-660784
2409.15022
A Diagonal Structured State Space Model on Loihi 2 for Efficient Streaming Sequence Processing
<|reference_start|>A Diagonal Structured State Space Model on Loihi 2 for Efficient Streaming Sequence Processing: Deep State-Space Models (SSM) demonstrate state-of-the art performance on long-range sequence modeling tasks. While the recurrent structure of SSMs can be efficiently implemented as a convolution or as a parallel scan during training, recurrent token-by-token processing cannot currently be implemented efficiently on GPUs. Here, we demonstrate efficient token-by-token inference of the SSM S4D on Intel's Loihi 2 state-of-the-art neuromorphic processor. We compare this first ever neuromorphic-hardware implementation of an SSM on sMNIST, psMNIST, and sCIFAR to a recurrent and a convolutional implementation of S4D on Jetson Orin Nano (Jetson). While we find Jetson to perform better in an offline sample-by-sample based batched processing mode, Loihi 2 outperforms during token-by-token based processing, where it consumes 1000 times less energy with a 75 times lower latency and a 75 times higher throughput compared to the recurrent implementation of S4D on Jetson. This opens up new avenues towards efficient real-time streaming applications of SSMs.<|reference_end|>
arxiv
@article{meyer2024a, title={A Diagonal Structured State Space Model on Loihi 2 for Efficient Streaming Sequence Processing}, author={Svea Marie Meyer, Philipp Weidel, Philipp Plank, Leobardo Campos-Macias, Sumit Bam Shrestha, Philipp Stratmann, Mathis Richter}, journal={arXiv preprint arXiv:2409.15022}, year={2024}, archivePrefix={arXiv}, eprint={2409.15022}, primaryClass={cs.LG cs.AI cs.ET cs.NE} }
meyer2024a
arxiv-660785
2409.15023
Efficient Nearest Neighbor Search Using Dynamic Programming
<|reference_start|>Efficient Nearest Neighbor Search Using Dynamic Programming: When dealing with point clouds distributed on manifold surfaces in 3D space, or when the query point is far from the data, the efficiency of traditional nearest neighbor search algorithms (e.g., KD Tree and R Tree) may degrade. In extreme cases, the complexity of the query can approach O(n). In this paper, we propose a novel dynamic programming technique that precomputes a Directed Acyclic Graph (DAG) to enable more efficient nearest neighbor queries for 2D manifold data. By leveraging this structure, only a small number of distance comparisons between point pairs are required to accurately identify the nearest neighbor. Extensive experimental results demonstrate that our method achieves query speeds that are 1x-10x faster than traditional methods. Moreover, our algorithm exhibits significant potential. It achieves query efficiency comparable to KD-trees on uniformly distributed point clouds. Additionally, our algorithm supports nearest neighbor queries among the first k points. Coupled with our algorithm, a farthest point sampling algorithm with lower complexity can also be implemented. Furthermore, our method has the potential to support nearest neighbor queries with different types of primitives and distance metrics. We believe that the method proposed in this paper represents the most concise and straightforward exact nearest neighbor search algorithm currently available, and it will contribute significantly to advancements in the field.<|reference_end|>
arxiv
@article{wang2024efficient, title={Efficient Nearest Neighbor Search Using Dynamic Programming}, author={Pengfei Wang, Jiantao Song, Shiqing Xin, Shuangmin Chen, Changhe Tu, Wenping Wang, Jiaye Wang}, journal={arXiv preprint arXiv:2409.15023}, year={2024}, archivePrefix={arXiv}, eprint={2409.15023}, primaryClass={cs.CG cs.GR} }
wang2024efficient
arxiv-660786
2409.15027
Generative LLM Powered Conversational AI Application for Personalized Risk Assessment: A Case Study in COVID-19
<|reference_start|>Generative LLM Powered Conversational AI Application for Personalized Risk Assessment: A Case Study in COVID-19: Large language models (LLMs) have shown remarkable capabilities in various natural language tasks and are increasingly being applied in healthcare domains. This work demonstrates a new LLM-powered disease risk assessment approach via streaming human-AI conversation, eliminating the need for programming required by traditional machine learning approaches. In a COVID-19 severity risk assessment case study, we fine-tune pre-trained generative LLMs (e.g., Llama2-7b and Flan-t5-xl) using a few shots of natural language examples, comparing their performance with traditional classifiers (i.e., Logistic Regression, XGBoost, Random Forest) that are trained de novo using tabular data across various experimental settings. We develop a mobile application that uses these fine-tuned LLMs as its generative AI (GenAI) core to facilitate real-time interaction between clinicians and patients, providing no-code risk assessment through conversational interfaces. This integration not only allows for the use of streaming Questions and Answers (QA) as inputs but also offers personalized feature importance analysis derived from the LLM's attention layers, enhancing the interpretability of risk assessments. By achieving high Area Under the Curve (AUC) scores with a limited number of fine-tuning samples, our results demonstrate the potential of generative LLMs to outperform discriminative classification methods in low-data regimes, highlighting their real-world adaptability and effectiveness. This work aims to fill the existing gap in leveraging generative LLMs for interactive no-code risk assessment and to encourage further research in this emerging field.<|reference_end|>
arxiv
@article{roshani2024generative, title={Generative LLM Powered Conversational AI Application for Personalized Risk Assessment: A Case Study in COVID-19}, author={Mohammad Amin Roshani, Xiangyu Zhou, Yao Qiang, Srinivasan Suresh, Steve Hicks, Usha Sethuraman, Dongxiao Zhu}, journal={arXiv preprint arXiv:2409.15027}, year={2024}, archivePrefix={arXiv}, eprint={2409.15027}, primaryClass={cs.CL cs.AI} }
roshani2024generative
arxiv-660787
2409.15028
Region Mixup
<|reference_start|>Region Mixup: This paper introduces a simple extension of mixup (Zhang et al., 2018) data augmentation to enhance generalization in visual recognition tasks. Unlike the vanilla mixup method, which blends entire images, our approach focuses on combining regions from multiple images.<|reference_end|>
arxiv
@article{saha2024region, title={Region Mixup}, author={Saptarshi Saha and Utpal Garain}, journal={The Second Tiny Papers Track at ICLR 2024}, year={2024}, archivePrefix={arXiv}, eprint={2409.15028}, primaryClass={cs.CV cs.AI cs.LG} }
saha2024region
arxiv-660788
2409.15030
Anomaly Detection from a Tensor Train Perspective
<|reference_start|>Anomaly Detection from a Tensor Train Perspective: We present a series of algorithms in tensor networks for anomaly detection in datasets, by using data compression in a Tensor Train representation. These algorithms consist of preserving the structure of normal data in compression and deleting the structure of anomalous data. The algorithms can be applied to any tensor network representation. We test the effectiveness of the methods with digits and Olivetti faces datasets and a cybersecurity dataset to determine cyber-attacks.<|reference_end|>
arxiv
@article{ali2024anomaly, title={Anomaly Detection from a Tensor Train Perspective}, author={Alejandro Mata Ali, Aitor Moreno Fdez. de Leceta and Jorge L'opez Rubio}, journal={arXiv preprint arXiv:2409.15030}, year={2024}, archivePrefix={arXiv}, eprint={2409.15030}, primaryClass={cs.LG cs.CR cs.ET cs.IT math.IT quant-ph} }
ali2024anomaly
arxiv-660789
2409.15033
Immersed in my Ideas: Using Virtual Reality and Multimodal Interactions to Visualize Users' Ideas and Thoughts
<|reference_start|>Immersed in my Ideas: Using Virtual Reality and Multimodal Interactions to Visualize Users' Ideas and Thoughts: This paper introduces VIVRA (Voice Interactive Virtual Reality Annotation), a VR application combining multimodal interaction with large language models (LLMs) to transform users' ideas into interactive 3D visualizations. VIVRA converts verbalized thoughts into "idea balloons" that summarize and expand on detected topics by an LLM. VIVRA allows users to verbalize their thoughts in real time or record their ideas to display the topics later. We evaluated the effectiveness of VIVRA in an exploratory study with 29 participants and a user study with 10 participants. Our results show that VIVRA enhanced users' ability to reflect on and develop ideas, achieving high levels of satisfaction, usability, and engagement. Participants valued VIVRA as a reflective tool for exploring personal thoughts and ideas. We discuss the potential advantages and uses of this application, highlighting the potential of combining immersive technologies with LLMs to create powerful ideation and reflection tools.<|reference_end|>
arxiv
@article{xing2024immersed, title={Immersed in my Ideas: Using Virtual Reality and Multimodal Interactions to Visualize Users' Ideas and Thoughts}, author={Yunhao Xing, Jerrick Ban, Timothy D. Hubbard, Michael Villano, Diego Gomez-Zara}, journal={arXiv preprint arXiv:2409.15033}, year={2024}, archivePrefix={arXiv}, eprint={2409.15033}, primaryClass={cs.HC} }
xing2024immersed
arxiv-660790
2409.15035
Can CLIP Count Stars? An Empirical Study on Quantity Bias in CLIP
<|reference_start|>Can CLIP Count Stars? An Empirical Study on Quantity Bias in CLIP: CLIP has demonstrated great versatility in adapting to various downstream tasks, such as image editing and generation, visual question answering, and video understanding. However, CLIP-based applications often suffer from misunderstandings regarding user intent, leading to discrepancies between the required number of objects and the actual outputs in image generation tasks. In this work, we empirically investigate the quantity bias in CLIP. By carefully designing different experimental settings and datasets, we comprehensively evaluate CLIP's understanding of quantity from text, image, and cross-modal perspectives. Our experimental results reveal a quantity bias in CLIP embeddings, impacting the reliability of downstream tasks.<|reference_end|>
arxiv
@article{zhang2024can, title={Can CLIP Count Stars? An Empirical Study on Quantity Bias in CLIP}, author={Zeliang Zhang, Zhuo Liu, Mingqian Feng, Chenliang Xu}, journal={arXiv preprint arXiv:2409.15035}, year={2024}, archivePrefix={arXiv}, eprint={2409.15035}, primaryClass={cs.CV cs.CL} }
zhang2024can
arxiv-660791
2409.15041
AIM 2024 Sparse Neural Rendering Challenge: Dataset and Benchmark
<|reference_start|>AIM 2024 Sparse Neural Rendering Challenge: Dataset and Benchmark: Recent developments in differentiable and neural rendering have made impressive breakthroughs in a variety of 2D and 3D tasks, e.g. novel view synthesis, 3D reconstruction. Typically, differentiable rendering relies on a dense viewpoint coverage of the scene, such that the geometry can be disambiguated from appearance observations alone. Several challenges arise when only a few input views are available, often referred to as sparse or few-shot neural rendering. As this is an underconstrained problem, most existing approaches introduce the use of regularisation, together with a diversity of learnt and hand-crafted priors. A recurring problem in sparse rendering literature is the lack of an homogeneous, up-to-date, dataset and evaluation protocol. While high-resolution datasets are standard in dense reconstruction literature, sparse rendering methods often evaluate with low-resolution images. Additionally, data splits are inconsistent across different manuscripts, and testing ground-truth images are often publicly available, which may lead to over-fitting. In this work, we propose the Sparse Rendering (SpaRe) dataset and benchmark. We introduce a new dataset that follows the setup of the DTU MVS dataset. The dataset is composed of 97 new scenes based on synthetic, high-quality assets. Each scene has up to 64 camera views and 7 lighting configurations, rendered at 1600x1200 resolution. We release a training split of 82 scenes to foster generalizable approaches, and provide an online evaluation platform for the validation and test sets, whose ground-truth images remain hidden. We propose two different sparse configurations (3 and 9 input images respectively). This provides a powerful and convenient tool for reproducible evaluation, and enable researchers easy access to a public leaderboard with the state-of-the-art performance scores. Available at: https://sparebenchmark.github.io/<|reference_end|>
arxiv
@article{nazarczuk2024aim, title={AIM 2024 Sparse Neural Rendering Challenge: Dataset and Benchmark}, author={Michal Nazarczuk, Thomas Tanay, Sibi Catley-Chandar, Richard Shaw, Radu Timofte, Eduardo P'erez-Pellitero}, journal={arXiv preprint arXiv:2409.15041}, year={2024}, archivePrefix={arXiv}, eprint={2409.15041}, primaryClass={cs.CV} }
nazarczuk2024aim
arxiv-660792
2409.15042
A discrete de Rham discretization of interface diffusion problems with application to the Leaky Dielectric Model
<|reference_start|>A discrete de Rham discretization of interface diffusion problems with application to the Leaky Dielectric Model: Motivated by the study of the electrodynamics of particles, we propose in this work an arbitrary-order discrete de Rham scheme for the treatment of elliptic problems with potential and flux jumps across a fixed interface. The scheme seamlessly supports general elements resulting from the cutting of a background mesh along the interface. Interface conditions are enforced weakly \`a la Nitsche. We provide a rigorous convergence of analysis of the scheme for a steady model problem and showcase an application to a physical problem inspired by the Leaky Dielectric Model.<|reference_end|>
arxiv
@article{di pietro2024a, title={A discrete de Rham discretization of interface diffusion problems with application to the Leaky Dielectric Model}, author={Daniele A. Di Pietro and Simon Mendez and Aurelio Edoardo Spadotto}, journal={arXiv preprint arXiv:2409.15042}, year={2024}, archivePrefix={arXiv}, eprint={2409.15042}, primaryClass={math.NA cs.NA} }
di pietro2024a
arxiv-660793
2409.15043
Exploring Error Types in Formal Languages Among Students of Upper Secondary Education
<|reference_start|>Exploring Error Types in Formal Languages Among Students of Upper Secondary Education: Foundations of formal languages, as subfield of theoretical computer science, are part of typical upper secondary education curricula. There is very little research on the potential difficulties that students at this level have with this subject. In this paper, we report on an exploratory study of errors in formal languages among upper secondary education students. We collect the data by posing exercises in an intelligent tutoring system and analyzing student input. Our results suggest a) instances of non-functional understanding of concepts such as the empty word or a grammar as a substitution system; b) strategic problems such as lack of foresight when deriving a word or confounding formal specifications with real-world knowledge on certain aspects; and c) various syntactic problems. These findings can serve as a starting point for a broader understanding of how and why students struggle with this topic.<|reference_end|>
arxiv
@article{schmellenkamp2024exploring, title={Exploring Error Types in Formal Languages Among Students of Upper Secondary Education}, author={Marko Schmellenkamp and Dennis Stanglmair and Tilman Michaeli and Thomas Zeume}, journal={arXiv preprint arXiv:2409.15043}, year={2024}, archivePrefix={arXiv}, eprint={2409.15043}, primaryClass={cs.CY cs.FL} }
schmellenkamp2024exploring
arxiv-660794
2409.15045
AIM 2024 Sparse Neural Rendering Challenge: Methods and Results
<|reference_start|>AIM 2024 Sparse Neural Rendering Challenge: Methods and Results: This paper reviews the challenge on Sparse Neural Rendering that was part of the Advances in Image Manipulation (AIM) workshop, held in conjunction with ECCV 2024. This manuscript focuses on the competition set-up, the proposed methods and their respective results. The challenge aims at producing novel camera view synthesis of diverse scenes from sparse image observations. It is composed of two tracks, with differing levels of sparsity; 3 views in Track 1 (very sparse) and 9 views in Track 2 (sparse). Participants are asked to optimise objective fidelity to the ground-truth images as measured via the Peak Signal-to-Noise Ratio (PSNR) metric. For both tracks, we use the newly introduced Sparse Rendering (SpaRe) dataset and the popular DTU MVS dataset. In this challenge, 5 teams submitted final results to Track 1 and 4 teams submitted final results to Track 2. The submitted models are varied and push the boundaries of the current state-of-the-art in sparse neural rendering. A detailed description of all models developed in the challenge is provided in this paper.<|reference_end|>
arxiv
@article{nazarczuk2024aim, title={AIM 2024 Sparse Neural Rendering Challenge: Methods and Results}, author={Michal Nazarczuk, Sibi Catley-Chandar, Thomas Tanay, Richard Shaw, Eduardo P'erez-Pellitero, Radu Timofte, Xing Yan, Pan Wang, Yali Guo, Yongxin Wu, Youcheng Cai, Yanan Yang, Junting Li, Yanghong Zhou, P. Y. Mok, Zongqi He, Zhe Xiao, Kin-Chung Chan, Hana Lebeta Goshu, Cuixin Yang, Rongkang Dong, Jun Xiao, Kin-Man Lam, Jiayao Hao, Qiong Gao, Yanyan Zu, Junpei Zhang, Licheng Jiao, Xu Liu, Kuldeep Purohit}, journal={arXiv preprint arXiv:2409.15045}, year={2024}, archivePrefix={arXiv}, eprint={2409.15045}, primaryClass={cs.CV} }
nazarczuk2024aim
arxiv-660795
2409.15046
AlphaZip: Neural Network-Enhanced Lossless Text Compression
<|reference_start|>AlphaZip: Neural Network-Enhanced Lossless Text Compression: Data compression continues to evolve, with traditional information theory methods being widely used for compressing text, images, and videos. Recently, there has been growing interest in leveraging Generative AI for predictive compression techniques. This paper introduces a lossless text compression approach using a Large Language Model (LLM). The method involves two key steps: first, prediction using a dense neural network architecture, such as a transformer block; second, compressing the predicted ranks with standard compression algorithms like Adaptive Huffman, LZ77, or Gzip. Extensive analysis and benchmarking against conventional information-theoretic baselines demonstrate that neural compression offers improved performance.<|reference_end|>
arxiv
@article{narashiman2024alphazip:, title={AlphaZip: Neural Network-Enhanced Lossless Text Compression}, author={Swathi Shree Narashiman, Nitin Chandrachoodan}, journal={arXiv preprint arXiv:2409.15046}, year={2024}, archivePrefix={arXiv}, eprint={2409.15046}, primaryClass={cs.IT cs.AI cs.LG math.IT} }
narashiman2024alphazip:
arxiv-660796
2409.15047
Towards a General Market for Cloud-Edge-IoT Continuum
<|reference_start|>Towards a General Market for Cloud-Edge-IoT Continuum: Recent years have witnessed the proposals aiming at enabling Vertical, two-sided markets with a Single Marketplace (or exchange) (VSMs) for computing and data resources/services (products) offerings in a multi-cloud and crowdsourced IoT-edge sensing environment. A VSM is designed vertically from bottom up with a broker being a built-in component of the marketplace. While preventing seller lock-in and improving efficiency and availability, a VSM suffers from a key weakness from a buyer's perspective, i.e., the broker and the corresponding marketplace lock-in, which may lead to suboptimal shopping experience for buyers, due to marketplace monopoly by the broker and limited choice of products in the marketplace. In this position paper, we argue that a Horizontal two-sided market with Multiple Marketplaces (HMM), resembling the global stock market, should be developed. In an HMM, different marketplaces may be operated by different parties and sell similar and/or different types of products, e.g., computing and/or sensory data products. A broker is no longer a built-in component of any given marketplace. Instead, it may cover multiple marketplaces at the same time and there can be more than one broker in the HMM. Both the number and types of marketplaces and brokers may grow independently or scale horizontally to meet the growing demand. A buyer shops for a broker through whom the buyer gains access to the needed products sold in the marketplace(s) the broker covers and from whom the buyer receives various possible services, e.g., discount, value-added, or full services. An HMM not only overcomes the key weakness of a VSM but also allows the market to grow incrementally and organically. Finally, two example use cases are given to illustrate the benefits of HMM.<|reference_end|>
arxiv
@article{che2024towards, title={Towards a General Market for Cloud-Edge-IoT Continuum}, author={Hao Che, Hong Jiang and Zhijun Wang}, journal={arXiv preprint arXiv:2409.15047}, year={2024}, archivePrefix={arXiv}, eprint={2409.15047}, primaryClass={eess.SY cs.SY} }
che2024towards
arxiv-660797
2409.15049
PackageIntel: Leveraging Large Language Models for Automated Intelligence Extraction in Package Ecosystems
<|reference_start|>PackageIntel: Leveraging Large Language Models for Automated Intelligence Extraction in Package Ecosystems: The rise of malicious packages in public registries poses a significant threat to software supply chain (SSC) security. Although academia and industry employ methods like software composition analysis (SCA) to address this issue, existing approaches often lack timely and comprehensive intelligence updates. This paper introduces PackageIntel, a novel platform that revolutionizes the collection, processing, and retrieval of malicious package intelligence. By utilizing exhaustive search techniques, snowball sampling from diverse sources, and large language models (LLMs) with specialized prompts, PackageIntel ensures enhanced coverage, timeliness, and accuracy. We have developed a comprehensive database containing 20,692 malicious NPM and PyPI packages sourced from 21 distinct intelligence repositories. Empirical evaluations demonstrate that PackageIntel achieves a precision of 98.6% and an F1 score of 92.0 in intelligence extraction. Additionally, it detects threats on average 70% earlier than leading databases like Snyk and OSV, and operates cost-effectively at $0.094 per intelligence piece. The platform has successfully identified and reported over 1,000 malicious packages in downstream package manager mirror registries. This research provides a robust, efficient, and timely solution for identifying and mitigating threats within the software supply chain ecosystem.<|reference_end|>
arxiv
@article{guo2024packageintel:, title={PackageIntel: Leveraging Large Language Models for Automated Intelligence Extraction in Package Ecosystems}, author={Wenbo Guo, Chengwei Liu, Limin Wang, Jiahui Wu, Zhengzi Xu, Cheng Huang, Yong Fang, Yang Liu}, journal={arXiv preprint arXiv:2409.15049}, year={2024}, archivePrefix={arXiv}, eprint={2409.15049}, primaryClass={cs.SE} }
guo2024packageintel:
arxiv-660798
2409.15051
Scaling Laws of Decoder-Only Models on the Multilingual Machine Translation Task
<|reference_start|>Scaling Laws of Decoder-Only Models on the Multilingual Machine Translation Task: Recent studies have showcased remarkable capabilities of decoder-only models in many NLP tasks, including translation. Yet, the machine translation field has been largely dominated by encoder-decoder models based on the Transformer architecture. As a consequence, scaling laws of encoder-decoder models for neural machine translation have already been well studied, but decoder-only models have received less attention. This work explores the scaling laws of decoder-only models on the multilingual and multidomain translation task. We trained a collection of six decoder-only models, ranging from 70M to 7B parameters, on a sentence-level, multilingual and multidomain dataset. We conducted a series of experiments showing that the loss of decoder-only models can be estimated using a scaling law similar to the one discovered for large language models, but we also show that this scaling law has difficulties to generalize to too large models or to a different data distribution. We also study different scaling methods and show that scaling the depth and the width of a model lead to similar test loss improvements, but with different impact on the model's efficiency.<|reference_end|>
arxiv
@article{caillaut2024scaling, title={Scaling Laws of Decoder-Only Models on the Multilingual Machine Translation Task}, author={Ga"etan Caillaut, Raheel Qader, Mariam Nakhl'e, Jingshu Liu, Jean-Gabriel Barth'elemy}, journal={arXiv preprint arXiv:2409.15051}, year={2024}, archivePrefix={arXiv}, eprint={2409.15051}, primaryClass={cs.CL cs.AI} }
caillaut2024scaling
arxiv-660799
2409.15052
Brotherhood at WMT 2024: Leveraging LLM-Generated Contextual Conversations for Cross-Lingual Image Captioning
<|reference_start|>Brotherhood at WMT 2024: Leveraging LLM-Generated Contextual Conversations for Cross-Lingual Image Captioning: In this paper, we describe our system under the team name Brotherhood for the English-to-Lowres Multi-Modal Translation Task. We participate in the multi-modal translation tasks for English-Hindi, English-Hausa, English-Bengali, and English-Malayalam language pairs. We present a method leveraging multi-modal Large Language Models (LLMs), specifically GPT-4o and Claude 3.5 Sonnet, to enhance cross-lingual image captioning without traditional training or fine-tuning. Our approach utilizes instruction-tuned prompting to generate rich, contextual conversations about cropped images, using their English captions as additional context. These synthetic conversations are then translated into the target languages. Finally, we employ a weighted prompting strategy, balancing the original English caption with the translated conversation to generate captions in the target language. This method achieved competitive results, scoring 37.90 BLEU on the English-Hindi Challenge Set and ranking first and second for English-Hausa on the Challenge and Evaluation Leaderboards, respectively. We conduct additional experiments on a subset of 250 images, exploring the trade-offs between BLEU scores and semantic similarity across various weighting schemes.<|reference_end|>
arxiv
@article{betala2024brotherhood, title={Brotherhood at WMT 2024: Leveraging LLM-Generated Contextual Conversations for Cross-Lingual Image Captioning}, author={Siddharth Betala and Ishan Chokshi}, journal={arXiv preprint arXiv:2409.15052}, year={2024}, archivePrefix={arXiv}, eprint={2409.15052}, primaryClass={cs.CL cs.AI} }
betala2024brotherhood
arxiv-660800
2409.15053
Cucheb: A GPU implementation of the filtered Lanczos procedure
<|reference_start|>Cucheb: A GPU implementation of the filtered Lanczos procedure: This paper describes the software package Cucheb, a GPU implementation of the filtered Lanczos procedure for the solution of large sparse symmetric eigenvalue problems. The filtered Lanczos procedure uses a carefully chosen polynomial spectral transformation to accelerate convergence of the Lanczos method when computing eigenvalues within a desired interval. This method has proven particularly effective for eigenvalue problems that arise in electronic structure calculations and density functional theory. We compare our implementation against an equivalent CPU implementation and show that using the GPU can reduce the computation time by more than a factor of 10.<|reference_end|>
arxiv
@article{aurentz2024cucheb:, title={Cucheb: A GPU implementation of the filtered Lanczos procedure}, author={Jared L. Aurentz and Vassilis Kalantzis and Yousef Saad}, journal={arXiv preprint arXiv:2409.15053}, year={2024}, doi={10.1016/j.cpc.2017.06.016}, archivePrefix={arXiv}, eprint={2409.15053}, primaryClass={math.NA cs.DC cs.NA} }
aurentz2024cucheb: